max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
clothing_exchange/exchange_app/models.py | GALesniak/clothing_exchange | 0 | 6614651 | from django.db import models
from accounts.models import CustomUser
# Create your models here.
TYPEofINSTITUTION = (
(1, 'FUNACJA'),
(2, 'ORGANIZACJA POZARZĄDOWA'),
(3, 'ZBIÓRKA LOKALNA')
)
class Category(models.Model):
name = models.CharField(max_length=255, blank=False, null=False)
class Institution(models.Model):
name = models.CharField(max_length=255, blank=False, null=False)
description = models.CharField(max_length=255, blank=True, null=True)
type = models.IntegerField(choices=TYPEofINSTITUTION, default=1)
categories = models.ManyToManyField(Category)
class Donation(models.Model):
quantity = models.PositiveIntegerField()
categories = models.ManyToManyField(Category)
institution = models.ForeignKey(Institution, on_delete=models.CASCADE)
address_1 = models.CharField(max_length=255)
address_2 = models.PositiveIntegerField()
phonenumber = models.IntegerField()
city = models.CharField(max_length=255)
zip_code = models.CharField(max_length=6)
pick_up_date = models.DateTimeField(null=True)
pick_up_time = models.DateTimeField(null=True)
pick_up_comment = models.CharField(max_length=255)
user = models.ForeignKey(CustomUser, null=True, on_delete=models.SET_NULL)
| from django.db import models
from accounts.models import CustomUser
# Create your models here.
TYPEofINSTITUTION = (
(1, 'FUNACJA'),
(2, 'ORGANIZACJA POZARZĄDOWA'),
(3, 'ZBIÓRKA LOKALNA')
)
class Category(models.Model):
name = models.CharField(max_length=255, blank=False, null=False)
class Institution(models.Model):
name = models.CharField(max_length=255, blank=False, null=False)
description = models.CharField(max_length=255, blank=True, null=True)
type = models.IntegerField(choices=TYPEofINSTITUTION, default=1)
categories = models.ManyToManyField(Category)
class Donation(models.Model):
quantity = models.PositiveIntegerField()
categories = models.ManyToManyField(Category)
institution = models.ForeignKey(Institution, on_delete=models.CASCADE)
address_1 = models.CharField(max_length=255)
address_2 = models.PositiveIntegerField()
phonenumber = models.IntegerField()
city = models.CharField(max_length=255)
zip_code = models.CharField(max_length=6)
pick_up_date = models.DateTimeField(null=True)
pick_up_time = models.DateTimeField(null=True)
pick_up_comment = models.CharField(max_length=255)
user = models.ForeignKey(CustomUser, null=True, on_delete=models.SET_NULL)
| en | 0.963489 | # Create your models here. | 2.213235 | 2 |
PaperwithCode/3.P-tuning/construct_query_label_demo.py | techthiyanes/nlp-notebook | 136 | 6614652 | <reponame>techthiyanes/nlp-notebook
# -*- coding: utf-8 -*-
import torch
from torch.nn.utils.rnn import pad_sequence
cls_token_id = 102
sep_token_id = 103
mask_token_id = 2
pseudo_token_id = 1
unk_token_id = 3
template = (2,2,2)
x_h_1 = 90
x_h_2 = 80
x_t_1 = 100
x_t_2 = 200
batch_size = 2
queries = [torch.LongTensor([cls_token_id,pseudo_token_id,pseudo_token_id,mask_token_id,pseudo_token_id,pseudo_token_id,x_h_1,pseudo_token_id,pseudo_token_id,sep_token_id]),
torch.LongTensor([cls_token_id,pseudo_token_id,pseudo_token_id,mask_token_id,pseudo_token_id,pseudo_token_id,x_h_2,pseudo_token_id,pseudo_token_id,sep_token_id])]
#print(queries)
queries = pad_sequence(queries, True, padding_value=0).long()
print(queries)
queries_for_embedding = queries.clone()
queries_for_embedding[(queries == pseudo_token_id)] = unk_token_id
print(queries_for_embedding)
#raw_embeds = embeddings(queries_for_embedding)
print('-------------------------------------------')
print((queries == pseudo_token_id))
print((queries == pseudo_token_id).nonzero())
print((queries == pseudo_token_id).nonzero().reshape((batch_size, sum(template), 2)))
blocked_indices = (queries == 1).nonzero().reshape((batch_size, sum(template), 2))[:, :, 1]
print(blocked_indices)
#根据每个BATCH中为pseudo_token_id的索引,使用prompt_encoder的结果进行替代
#replace_embeds = prompt_encoder()
#for bidx in range(bz):
# for i in range(self.prompt_encoder.spell_length):
# raw_embeds[bidx, blocked_indices[bidx, i], :] = replace_embeds[i, :]
print('-------------------------------------------')
print((queries == mask_token_id))
print((queries == mask_token_id).nonzero())
print((queries == mask_token_id).nonzero().reshape(batch_size, -1))
print((queries == mask_token_id).nonzero().reshape(batch_size, -1)[:, 1])
label_mask = (queries == mask_token_id).nonzero().reshape(batch_size, -1)[:, 1].unsqueeze(1)
print(label_mask)
labels = torch.empty_like(queries).fill_(-100).long()
print(labels)
label_ids = torch.LongTensor([x_t_1, x_t_2]).reshape((batch_size, -1))
print(label_ids)
labels = labels.scatter_(1, label_mask, label_ids)
print(labels) | # -*- coding: utf-8 -*-
import torch
from torch.nn.utils.rnn import pad_sequence
cls_token_id = 102
sep_token_id = 103
mask_token_id = 2
pseudo_token_id = 1
unk_token_id = 3
template = (2,2,2)
x_h_1 = 90
x_h_2 = 80
x_t_1 = 100
x_t_2 = 200
batch_size = 2
queries = [torch.LongTensor([cls_token_id,pseudo_token_id,pseudo_token_id,mask_token_id,pseudo_token_id,pseudo_token_id,x_h_1,pseudo_token_id,pseudo_token_id,sep_token_id]),
torch.LongTensor([cls_token_id,pseudo_token_id,pseudo_token_id,mask_token_id,pseudo_token_id,pseudo_token_id,x_h_2,pseudo_token_id,pseudo_token_id,sep_token_id])]
#print(queries)
queries = pad_sequence(queries, True, padding_value=0).long()
print(queries)
queries_for_embedding = queries.clone()
queries_for_embedding[(queries == pseudo_token_id)] = unk_token_id
print(queries_for_embedding)
#raw_embeds = embeddings(queries_for_embedding)
print('-------------------------------------------')
print((queries == pseudo_token_id))
print((queries == pseudo_token_id).nonzero())
print((queries == pseudo_token_id).nonzero().reshape((batch_size, sum(template), 2)))
blocked_indices = (queries == 1).nonzero().reshape((batch_size, sum(template), 2))[:, :, 1]
print(blocked_indices)
#根据每个BATCH中为pseudo_token_id的索引,使用prompt_encoder的结果进行替代
#replace_embeds = prompt_encoder()
#for bidx in range(bz):
# for i in range(self.prompt_encoder.spell_length):
# raw_embeds[bidx, blocked_indices[bidx, i], :] = replace_embeds[i, :]
print('-------------------------------------------')
print((queries == mask_token_id))
print((queries == mask_token_id).nonzero())
print((queries == mask_token_id).nonzero().reshape(batch_size, -1))
print((queries == mask_token_id).nonzero().reshape(batch_size, -1)[:, 1])
label_mask = (queries == mask_token_id).nonzero().reshape(batch_size, -1)[:, 1].unsqueeze(1)
print(label_mask)
labels = torch.empty_like(queries).fill_(-100).long()
print(labels)
label_ids = torch.LongTensor([x_t_1, x_t_2]).reshape((batch_size, -1))
print(label_ids)
labels = labels.scatter_(1, label_mask, label_ids)
print(labels) | en | 0.257679 | # -*- coding: utf-8 -*- #print(queries) #raw_embeds = embeddings(queries_for_embedding) #根据每个BATCH中为pseudo_token_id的索引,使用prompt_encoder的结果进行替代 #replace_embeds = prompt_encoder() #for bidx in range(bz): # for i in range(self.prompt_encoder.spell_length): # raw_embeds[bidx, blocked_indices[bidx, i], :] = replace_embeds[i, :] | 2.357526 | 2 |
misc/py/dex_binary_object.py | apaszke/dex-lang | 1 | 6614653 | <filename>misc/py/dex_binary_object.py
# Copyright 2019 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
import itertools as it
from collections import namedtuple
import numpy as np
TabType = namedtuple('TabType', ['index_set', 'element_type'])
preheader_length = 81
preheader_start = "-- dex-object-file-v0.0.1 num-header-bytes "
def dump(obj, f):
ty = get_dex_ty(obj)
buffers = flatten_to_buffers(obj)
ty_str = "type: {}\n".format(pprint_ty(ty))
sizes_str = "bufferSizes: [{}]\n".format(", ".join([str(get_buffer_size(x))
for x in buffers]))
header_size = preheader_length + len(ty_str) + len(sizes_str)
pre_header_str = make_preheader(header_size)
header = pre_header_str + ty_str + sizes_str
assert header_size == len(header)
f.write(header)
f.flush()
for b in buffers:
buf_bytes = b.tobytes()
assert len(buf_bytes) == get_buffer_size(b), \
"{} {} != {}".format(b, len(buf_bytes), get_buffer_size(b))
f.buffer.write(buf_bytes)
f.flush()
def get_dex_ty(obj):
if isinstance(obj, tuple):
return tuple(get_dex_ty(x) for x in obj)
elif isinstance(obj, np.ndarray):
base_ty = dtype_to_dex_ty(obj.dtype)
return make_tab_type(base_ty, obj.shape)
elif isinstance(obj, float):
return float
elif isinstance(obj, bool):
return bool
elif isinstance(obj, int):
return int
else:
raise Exception("No corresponding Dex type for {}".format(type(obj)))
def flatten_to_buffers(obj):
if isinstance(obj, tuple):
return tuple(it.chain(*(flatten_to_buffers(x) for x in obj)))
elif isinstance(obj, np.ndarray):
flat_array = obj.ravel()
if obj.dtype == np.bool:
return [np.asarray(flat_array, dtype=np.int64)]
else:
return [flat_array]
elif isinstance(obj, float):
return [np.array(obj, dtype=np.float64)]
elif isinstance(obj, bool):
return [np.array(obj, dtype=np.int64)]
elif isinstance(obj, int):
return [np.array(obj, dtype=np.int64)]
else:
raise Exception("No corresponding Dex type for {}".format(type(obj)))
def dtype_to_dex_ty(dtype):
if dtype == np.float64:
return float
elif dtype == np.int64:
return int
elif dtype == np.bool:
return bool
else:
raise Exception("Unrecognized dtype: " + str(dtype))
def make_tab_type(base_ty, shape):
shape = tuple(shape)
if shape == ():
return base_ty
else:
(n, *rest) = shape
return TabType(n, make_tab_type(base_ty, rest))
def get_buffer_size(array):
return array.size * 8
def pprint_ty(ty):
if isinstance(ty, TabType):
return "{}=>{}".format(str(ty.index_set), pprint_ty(ty.element_type))
elif isinstance(ty, tuple):
return "({})".format(", ".join(map(pprint_ty, ty)))
if ty is int:
return "Int"
elif ty is float:
return "Real"
elif ty is bool:
return "Bool"
else:
raise Exception("Can't print type: {}".format(ty))
def make_preheader(n):
preheader_prefix = preheader_start + str(n) + " "
padding = '-' * (preheader_length - len(preheader_prefix) - 1) + "\n"
return preheader_prefix + padding
| <filename>misc/py/dex_binary_object.py
# Copyright 2019 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
import itertools as it
from collections import namedtuple
import numpy as np
TabType = namedtuple('TabType', ['index_set', 'element_type'])
preheader_length = 81
preheader_start = "-- dex-object-file-v0.0.1 num-header-bytes "
def dump(obj, f):
ty = get_dex_ty(obj)
buffers = flatten_to_buffers(obj)
ty_str = "type: {}\n".format(pprint_ty(ty))
sizes_str = "bufferSizes: [{}]\n".format(", ".join([str(get_buffer_size(x))
for x in buffers]))
header_size = preheader_length + len(ty_str) + len(sizes_str)
pre_header_str = make_preheader(header_size)
header = pre_header_str + ty_str + sizes_str
assert header_size == len(header)
f.write(header)
f.flush()
for b in buffers:
buf_bytes = b.tobytes()
assert len(buf_bytes) == get_buffer_size(b), \
"{} {} != {}".format(b, len(buf_bytes), get_buffer_size(b))
f.buffer.write(buf_bytes)
f.flush()
def get_dex_ty(obj):
if isinstance(obj, tuple):
return tuple(get_dex_ty(x) for x in obj)
elif isinstance(obj, np.ndarray):
base_ty = dtype_to_dex_ty(obj.dtype)
return make_tab_type(base_ty, obj.shape)
elif isinstance(obj, float):
return float
elif isinstance(obj, bool):
return bool
elif isinstance(obj, int):
return int
else:
raise Exception("No corresponding Dex type for {}".format(type(obj)))
def flatten_to_buffers(obj):
if isinstance(obj, tuple):
return tuple(it.chain(*(flatten_to_buffers(x) for x in obj)))
elif isinstance(obj, np.ndarray):
flat_array = obj.ravel()
if obj.dtype == np.bool:
return [np.asarray(flat_array, dtype=np.int64)]
else:
return [flat_array]
elif isinstance(obj, float):
return [np.array(obj, dtype=np.float64)]
elif isinstance(obj, bool):
return [np.array(obj, dtype=np.int64)]
elif isinstance(obj, int):
return [np.array(obj, dtype=np.int64)]
else:
raise Exception("No corresponding Dex type for {}".format(type(obj)))
def dtype_to_dex_ty(dtype):
if dtype == np.float64:
return float
elif dtype == np.int64:
return int
elif dtype == np.bool:
return bool
else:
raise Exception("Unrecognized dtype: " + str(dtype))
def make_tab_type(base_ty, shape):
shape = tuple(shape)
if shape == ():
return base_ty
else:
(n, *rest) = shape
return TabType(n, make_tab_type(base_ty, rest))
def get_buffer_size(array):
return array.size * 8
def pprint_ty(ty):
if isinstance(ty, TabType):
return "{}=>{}".format(str(ty.index_set), pprint_ty(ty.element_type))
elif isinstance(ty, tuple):
return "({})".format(", ".join(map(pprint_ty, ty)))
if ty is int:
return "Int"
elif ty is float:
return "Real"
elif ty is bool:
return "Bool"
else:
raise Exception("Can't print type: {}".format(ty))
def make_preheader(n):
preheader_prefix = preheader_start + str(n) + " "
padding = '-' * (preheader_length - len(preheader_prefix) - 1) + "\n"
return preheader_prefix + padding
| en | 0.88846 | # Copyright 2019 Google LLC # # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file or at # https://developers.google.com/open-source/licenses/bsd | 2.168482 | 2 |
crazyimports/sqlite/__init__.py | fossabot/crazy-imports | 0 | 6614654 | from .loader import SQLite3
| from .loader import SQLite3
| none | 1 | 1.155359 | 1 | |
Telas_Usuario/tela_de_login.py | daniel20159050454/Biblioteca | 0 | 6614655 | from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Tela_Login(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(410, 150, 63, 23))
font = QtGui.QFont()
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(311, 280, 231, 31))
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.senha = QtWidgets.QLineEdit(self.layoutWidget)
self.senha.setEchoMode(QtWidgets.QLineEdit.Password)
self.senha.setObjectName("senha")
self.horizontalLayout_2.addWidget(self.senha)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(320, 230, 51, 19))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.email_login = QtWidgets.QLineEdit(self.centralwidget)
self.email_login.setGeometry(QtCore.QRect(371, 230, 171, 25))
self.email_login.setObjectName("email_login")
self.entrar = QtWidgets.QPushButton(self.centralwidget)
self.entrar.setGeometry(QtCore.QRect(390, 350, 121, 41))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.entrar.setFont(font)
self.entrar.setObjectName("entrar")
self.cadastrarse = QtWidgets.QPushButton(self.centralwidget)
self.cadastrarse.setGeometry(QtCore.QRect(358, 400, 181, 25))
self.cadastrarse.setObjectName("cadastrarse")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "LOGIN"))
self.label_3.setText(_translate("MainWindow", "Senha:"))
self.label_2.setText(_translate("MainWindow", "Email"))
self.entrar.setText(_translate("MainWindow", "Entrar"))
self.cadastrarse.setText(_translate("MainWindow", "Cadastre-se"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_Tela_Login()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Tela_Login(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(410, 150, 63, 23))
font = QtGui.QFont()
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(311, 280, 231, 31))
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.senha = QtWidgets.QLineEdit(self.layoutWidget)
self.senha.setEchoMode(QtWidgets.QLineEdit.Password)
self.senha.setObjectName("senha")
self.horizontalLayout_2.addWidget(self.senha)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(320, 230, 51, 19))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.email_login = QtWidgets.QLineEdit(self.centralwidget)
self.email_login.setGeometry(QtCore.QRect(371, 230, 171, 25))
self.email_login.setObjectName("email_login")
self.entrar = QtWidgets.QPushButton(self.centralwidget)
self.entrar.setGeometry(QtCore.QRect(390, 350, 121, 41))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.entrar.setFont(font)
self.entrar.setObjectName("entrar")
self.cadastrarse = QtWidgets.QPushButton(self.centralwidget)
self.cadastrarse.setGeometry(QtCore.QRect(358, 400, 181, 25))
self.cadastrarse.setObjectName("cadastrarse")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "LOGIN"))
self.label_3.setText(_translate("MainWindow", "Senha:"))
self.label_2.setText(_translate("MainWindow", "Email"))
self.entrar.setText(_translate("MainWindow", "Entrar"))
self.cadastrarse.setText(_translate("MainWindow", "Cadastre-se"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_Tela_Login()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| none | 1 | 2.475942 | 2 | |
tries/trie.py | neerajp99/algorithms | 1 | 6614656 | """
Implementation of Trie Data Structure
"""
class TrieNode:
""" A node in the trie data structure """
def __init__(self, char):
self.char = char
# Check if the node is the end of the word and is not connected to further nodes as False initially
self.is_end = False
# Counter element to check the number of times elements are inserted into the node
self.counter = 0
# Dictionary to keep the details of all the connected nodes, empty dict initially
self.children = dict()
""" Main Trie Data Structure Class """
class Trie:
""" Constructor method """
def __init__(self):
""" Initialise the root node, as the trie has at least the root node.
The root node does not store any character.
"""
self.root = TrieNode("")
def insert(self, word):
""" Insert a word into the trie data structure """
node = self.root
# Loop over each character in the word
for char in word:
if char in node.children:
node = node.children[char]
else:
# Create a new node
new_node = TrieNode(char)
# Link the new node to the current character of the parent map
node.children[char] = new_node
node = new_node
# Mark the node as the end node
node.is_end = True
# Increment the node counter
node.counter += 1
def search(self, word):
self.output = []
node = self.root
# Iterate over
for char in word:
if char in node.children:
node = node.children[char]
else:
return []
# DFS traversal
self.dfs(node, word[:-1])
# Sort the results in reverse order and return
return sorted(self.output, key=lambda word: word[1], reverse=True)
def dfs(self, node, prefix):
"""
Depth First Search traversal
"""
if node.is_end:
self.output.append((prefix + node.char, node.counter))
for child in node.children.values():
self.dfs(child, prefix + node.char)
x = Trie()
x.insert('hello')
x.insert('hellos')
x.insert('hells')
x.insert('hallo')
x.insert('hello')
x.insert('huku')
print(x.search('he'))
| """
Implementation of Trie Data Structure
"""
class TrieNode:
""" A node in the trie data structure """
def __init__(self, char):
self.char = char
# Check if the node is the end of the word and is not connected to further nodes as False initially
self.is_end = False
# Counter element to check the number of times elements are inserted into the node
self.counter = 0
# Dictionary to keep the details of all the connected nodes, empty dict initially
self.children = dict()
""" Main Trie Data Structure Class """
class Trie:
""" Constructor method """
def __init__(self):
""" Initialise the root node, as the trie has at least the root node.
The root node does not store any character.
"""
self.root = TrieNode("")
def insert(self, word):
""" Insert a word into the trie data structure """
node = self.root
# Loop over each character in the word
for char in word:
if char in node.children:
node = node.children[char]
else:
# Create a new node
new_node = TrieNode(char)
# Link the new node to the current character of the parent map
node.children[char] = new_node
node = new_node
# Mark the node as the end node
node.is_end = True
# Increment the node counter
node.counter += 1
def search(self, word):
self.output = []
node = self.root
# Iterate over
for char in word:
if char in node.children:
node = node.children[char]
else:
return []
# DFS traversal
self.dfs(node, word[:-1])
# Sort the results in reverse order and return
return sorted(self.output, key=lambda word: word[1], reverse=True)
def dfs(self, node, prefix):
"""
Depth First Search traversal
"""
if node.is_end:
self.output.append((prefix + node.char, node.counter))
for child in node.children.values():
self.dfs(child, prefix + node.char)
x = Trie()
x.insert('hello')
x.insert('hellos')
x.insert('hells')
x.insert('hallo')
x.insert('hello')
x.insert('huku')
print(x.search('he'))
| en | 0.851736 | Implementation of Trie Data Structure A node in the trie data structure # Check if the node is the end of the word and is not connected to further nodes as False initially # Counter element to check the number of times elements are inserted into the node # Dictionary to keep the details of all the connected nodes, empty dict initially Main Trie Data Structure Class Constructor method Initialise the root node, as the trie has at least the root node. The root node does not store any character. Insert a word into the trie data structure # Loop over each character in the word # Create a new node # Link the new node to the current character of the parent map # Mark the node as the end node # Increment the node counter # Iterate over # DFS traversal # Sort the results in reverse order and return Depth First Search traversal | 4.105047 | 4 |
LearningFlask_Class/hello.py | Victa2015/probable-memory | 0 | 6614657 | from flask import Flask
from primeNumbers import prime
app = Flask(__name__)
@app.route('/')
def hundred_primes():
primN = prime()
return str(primN.get_primes(100))
return "hello"
if __name__ == "__main__":
app.run(debug=True)
| from flask import Flask
from primeNumbers import prime
app = Flask(__name__)
@app.route('/')
def hundred_primes():
primN = prime()
return str(primN.get_primes(100))
return "hello"
if __name__ == "__main__":
app.run(debug=True)
| none | 1 | 2.533693 | 3 | |
_notebooks/simpleRegressionModule.py | SolanaO/cybernated_stories | 1 | 6614658 | <reponame>SolanaO/cybernated_stories
import numpy as np
import pandas as pd
from scipy.stats import norm
import math
def sample_set(npts, min_pred, max_pred, beta_0, beta_1, standev):
'''This function will generate npts x values linearly distributed in the interval [min_pred,max_pred],
and corresponding y values according to the equation Y = beta_0 + beta_1 x + epsilon. Here
epsilon is a random variable, normally distributed with mean 0 and standard deviation standev.
The output will consist of 3 np.arrays, x_vals, y_vals and the array of pairs
of observations = [[x_1, y_1], ...]
Notation: npts = number of predictors, integer
min_pred, max_pred = smallest, largest value for the predictor series
beta_0, beta_1 = slope and intercept of the regression line
standev = standard deviation of the normal distribution of epsilon'''
# generate predictors, notice this is an np.array
x_vals = np.linspace(min_pred, max_pred, npts)
#x_vals = x_vals.round(2)
# generate responses, also an np.array
y_vals = beta_0 + beta_1 * x_vals + standev * np.random.randn(npts)
#y_vals = y_vals.round(2)
# form the array of pairs (x_i, y_i)
xy_pairs = np.stack((x_vals, y_vals), axis=-1)
# return x_vals and their corresponding y_vals, as well as the pairs
return x_vals, y_vals, xy_pairs
class SimpleLinearRegression:
"""
Compute parameters and other relevant quantities for
simple linear regression.
"""
def __init__(self, predictors, responses):
self.predictors = predictors
self.responses = responses
def mean_values(self):
"""
Find the means of predictors(x) and responses(y).
"""
self.x_bar = np.mean(self.predictors)
self.y_bar = np.mean(self.responses)
return self.x_bar, self.y_bar
def sum_squares(self):
"""
Compute the expressions Sxx, Sxy and Syy, SSM
"""
self.Sxx = sum((self.predictors - self.x_bar)**2)
self.Sxy = sum((self.predictors - self.x_bar)*(self.responses-self.y_bar))
self.Syy = sum((self.responses - self.y_bar)**2)
return self.Sxx, self.Sxy, self.Syy
def parameters(self):
"""
Compute the estimators of the coefficients: slope and intercept.
"""
self.hat_beta_1 = self.Sxy/self.Sxx
self.hat_beta_0 = self.y_bar - self.Sxy * self.x_bar / self.Sxx
return self.hat_beta_0, self.hat_beta_1
def res_sq_error(self):
"""
Compute the squared residual standard error.
"""
self.rses = (self.Syy - self.hat_beta_1 * self.Sxy)/(len(self.predictors)-2)
return self.rses, math.sqrt(self.rses)
def stat_values(self):
self.Rsquared = (self.Sxy **2) / (self.Sxx * self.Syy)
self.ssm = sum((self.hat_beta_0 + self.hat_beta_1 * self.predictors - self.y_bar)**2)
self.Fvalue = self.ssm / self.rses
return self.Rsquared, self.Fvalue
def variances(self):
"""
Compute estimators of the variances for slope and intercept.
"""
self.var_beta_0 = self.rses * (1/len(self.predictors) + (self.x_bar**2)/self.Sxx)
self.var_beta_1 = self.rses/self.Sxx
return self.var_beta_0, self.var_beta_1, math.sqrt(self.var_beta_0), math.sqrt(self.var_beta_1)
def cov_parameters(self):
"""
Compute the covariance between two parameters: slope and intercept.
"""
self.cov_hat_beta_12 = - self.x_bar**2 * self.rses/self.Sxx
return self.cov_hat_beta_12
def confidence_int_params(self, t_val):
# the endpoints for the confidence interval for hat_beta_0
self.beta_0l = self.var_beta_0 - t_val * math.sqrt(self.rses) * math.sqrt(self.var_beta_0)
self.beta_0r = self.var_beta_0 + t_val * math.sqrt(self.rses) * math.sqrt(self.var_beta_0)
# the endpoints for the confidence interval for hat_beta_1
self.beta_1l = self.var_beta_1 - t_val * math.sqrt(self.rses) * math.sqrt(self.var_beta_1)
self.beta_1r = self.var_beta_1 + t_val * math.sqrt(self.rses) * math.sqrt(self.var_beta_1)
return self.beta_0l, self.beta_0r, self.beta_1l, self.beta_1r
def confidence_int_ey(self, t_val):
"""
Compute confidence intervals for E(Y) for a series of observations (x_i, y_i)
and save the endpoints in two lists: left endpoints and the right endpoints.
"""
left_ci_points = []
right_ci_points = []
for predictor in self.predictors:
root_expression = 1/len(self.predictors) + ((predictor - self.x_bar)**2)/self.Sxx
conf_interval_ey_left = self.hat_beta_0 + \
self.hat_beta_1 * predictor - t_val * math.sqrt(self.rses * root_expression)
conf_interval_ey_right = self.hat_beta_0 + \
self.hat_beta_1 * predictor + t_val * math.sqrt(self.rses * root_expression)
left_ci_points.append(conf_interval_ey_left)
right_ci_points.append(conf_interval_ey_right)
return left_ci_points, right_ci_points
def prediction_int(self, t_val):
"""
Compute prediction intervals for a sequence of observations (x_i, y_i)
and save them in two lists: left endpoints and right endpoints.
"""
left_pred_points = []
right_pred_points = []
for predictor in self.predictors:
root_expression = 1 + 1/len(self.predictors) + ((predictor - self.x_bar)**2)/self.Sxx
pred_interval_y_left = self.hat_beta_0 + \
self.hat_beta_1 * predictor - t_val * math.sqrt(self.rses * root_expression)
pred_interval_y_right = self.hat_beta_0 + \
self.hat_beta_1 * predictor + t_val * math.sqrt(self.rses * root_expression)
left_pred_points.append(pred_interval_y_left)
right_pred_points.append(pred_interval_y_right)
return left_pred_points, right_pred_points
# set the random seed to assure reproductibility
np.random.seed(1717)
# initialize the object that creates the 9 points dataset
small_data = sample_set(9, 0, 2, 4, 3, 1.08)
# create an instance of the class that evaluates the regression quantities
lin = SimpleLinearRegression(small_data[0],small_data[1])
# compute mean values of x_i, y_i
means = lin.mean_values()
# compute Sxx, Sxy, Syy
s_sums = lin.sum_squares()
# compute slope and intercept estimators
param = lin.parameters()
# compute S^2 and S, the square of the rse and the rse
errors = lin.res_sq_error()
# compute the variances and standard errors for the slope and intercept estimators
var_err = lin.variances()
# compute the covariance between the slope and the intercept
lin.cov_parameters()
# compute Rsquared and F-value
stats = lin.stat_values()
# choose the significance level and the critical value for alpha = 0.01
t_critical = 3.499
# compute the confidence intervals for slope and intercept estimators
conf_int_params = lin.confidence_int_params(t_critical)
# compute the confidence intervals for E(Y) for points in data
confidence_intervals = lin.confidence_int_ey(t_critical)
# compute the prediction intervals for the points in data
prediction_intervals = lin.prediction_int(t_critical)
| import numpy as np
import pandas as pd
from scipy.stats import norm
import math
def sample_set(npts, min_pred, max_pred, beta_0, beta_1, standev):
'''This function will generate npts x values linearly distributed in the interval [min_pred,max_pred],
and corresponding y values according to the equation Y = beta_0 + beta_1 x + epsilon. Here
epsilon is a random variable, normally distributed with mean 0 and standard deviation standev.
The output will consist of 3 np.arrays, x_vals, y_vals and the array of pairs
of observations = [[x_1, y_1], ...]
Notation: npts = number of predictors, integer
min_pred, max_pred = smallest, largest value for the predictor series
beta_0, beta_1 = slope and intercept of the regression line
standev = standard deviation of the normal distribution of epsilon'''
# generate predictors, notice this is an np.array
x_vals = np.linspace(min_pred, max_pred, npts)
#x_vals = x_vals.round(2)
# generate responses, also an np.array
y_vals = beta_0 + beta_1 * x_vals + standev * np.random.randn(npts)
#y_vals = y_vals.round(2)
# form the array of pairs (x_i, y_i)
xy_pairs = np.stack((x_vals, y_vals), axis=-1)
# return x_vals and their corresponding y_vals, as well as the pairs
return x_vals, y_vals, xy_pairs
class SimpleLinearRegression:
"""
Compute parameters and other relevant quantities for
simple linear regression.
"""
def __init__(self, predictors, responses):
self.predictors = predictors
self.responses = responses
def mean_values(self):
"""
Find the means of predictors(x) and responses(y).
"""
self.x_bar = np.mean(self.predictors)
self.y_bar = np.mean(self.responses)
return self.x_bar, self.y_bar
def sum_squares(self):
"""
Compute the expressions Sxx, Sxy and Syy, SSM
"""
self.Sxx = sum((self.predictors - self.x_bar)**2)
self.Sxy = sum((self.predictors - self.x_bar)*(self.responses-self.y_bar))
self.Syy = sum((self.responses - self.y_bar)**2)
return self.Sxx, self.Sxy, self.Syy
def parameters(self):
"""
Compute the estimators of the coefficients: slope and intercept.
"""
self.hat_beta_1 = self.Sxy/self.Sxx
self.hat_beta_0 = self.y_bar - self.Sxy * self.x_bar / self.Sxx
return self.hat_beta_0, self.hat_beta_1
def res_sq_error(self):
"""
Compute the squared residual standard error.
"""
self.rses = (self.Syy - self.hat_beta_1 * self.Sxy)/(len(self.predictors)-2)
return self.rses, math.sqrt(self.rses)
def stat_values(self):
self.Rsquared = (self.Sxy **2) / (self.Sxx * self.Syy)
self.ssm = sum((self.hat_beta_0 + self.hat_beta_1 * self.predictors - self.y_bar)**2)
self.Fvalue = self.ssm / self.rses
return self.Rsquared, self.Fvalue
def variances(self):
"""
Compute estimators of the variances for slope and intercept.
"""
self.var_beta_0 = self.rses * (1/len(self.predictors) + (self.x_bar**2)/self.Sxx)
self.var_beta_1 = self.rses/self.Sxx
return self.var_beta_0, self.var_beta_1, math.sqrt(self.var_beta_0), math.sqrt(self.var_beta_1)
def cov_parameters(self):
"""
Compute the covariance between two parameters: slope and intercept.
"""
self.cov_hat_beta_12 = - self.x_bar**2 * self.rses/self.Sxx
return self.cov_hat_beta_12
def confidence_int_params(self, t_val):
# the endpoints for the confidence interval for hat_beta_0
self.beta_0l = self.var_beta_0 - t_val * math.sqrt(self.rses) * math.sqrt(self.var_beta_0)
self.beta_0r = self.var_beta_0 + t_val * math.sqrt(self.rses) * math.sqrt(self.var_beta_0)
# the endpoints for the confidence interval for hat_beta_1
self.beta_1l = self.var_beta_1 - t_val * math.sqrt(self.rses) * math.sqrt(self.var_beta_1)
self.beta_1r = self.var_beta_1 + t_val * math.sqrt(self.rses) * math.sqrt(self.var_beta_1)
return self.beta_0l, self.beta_0r, self.beta_1l, self.beta_1r
def confidence_int_ey(self, t_val):
"""
Compute confidence intervals for E(Y) for a series of observations (x_i, y_i)
and save the endpoints in two lists: left endpoints and the right endpoints.
"""
left_ci_points = []
right_ci_points = []
for predictor in self.predictors:
root_expression = 1/len(self.predictors) + ((predictor - self.x_bar)**2)/self.Sxx
conf_interval_ey_left = self.hat_beta_0 + \
self.hat_beta_1 * predictor - t_val * math.sqrt(self.rses * root_expression)
conf_interval_ey_right = self.hat_beta_0 + \
self.hat_beta_1 * predictor + t_val * math.sqrt(self.rses * root_expression)
left_ci_points.append(conf_interval_ey_left)
right_ci_points.append(conf_interval_ey_right)
return left_ci_points, right_ci_points
def prediction_int(self, t_val):
"""
Compute prediction intervals for a sequence of observations (x_i, y_i)
and save them in two lists: left endpoints and right endpoints.
"""
left_pred_points = []
right_pred_points = []
for predictor in self.predictors:
root_expression = 1 + 1/len(self.predictors) + ((predictor - self.x_bar)**2)/self.Sxx
pred_interval_y_left = self.hat_beta_0 + \
self.hat_beta_1 * predictor - t_val * math.sqrt(self.rses * root_expression)
pred_interval_y_right = self.hat_beta_0 + \
self.hat_beta_1 * predictor + t_val * math.sqrt(self.rses * root_expression)
left_pred_points.append(pred_interval_y_left)
right_pred_points.append(pred_interval_y_right)
return left_pred_points, right_pred_points
# set the random seed to assure reproductibility
np.random.seed(1717)
# initialize the object that creates the 9 points dataset
small_data = sample_set(9, 0, 2, 4, 3, 1.08)
# create an instance of the class that evaluates the regression quantities
lin = SimpleLinearRegression(small_data[0],small_data[1])
# compute mean values of x_i, y_i
means = lin.mean_values()
# compute Sxx, Sxy, Syy
s_sums = lin.sum_squares()
# compute slope and intercept estimators
param = lin.parameters()
# compute S^2 and S, the square of the rse and the rse
errors = lin.res_sq_error()
# compute the variances and standard errors for the slope and intercept estimators
var_err = lin.variances()
# compute the covariance between the slope and the intercept
lin.cov_parameters()
# compute Rsquared and F-value
stats = lin.stat_values()
# choose the significance level and the critical value for alpha = 0.01
t_critical = 3.499
# compute the confidence intervals for slope and intercept estimators
conf_int_params = lin.confidence_int_params(t_critical)
# compute the confidence intervals for E(Y) for points in data
confidence_intervals = lin.confidence_int_ey(t_critical)
# compute the prediction intervals for the points in data
prediction_intervals = lin.prediction_int(t_critical) | en | 0.762367 | This function will generate npts x values linearly distributed in the interval [min_pred,max_pred], and corresponding y values according to the equation Y = beta_0 + beta_1 x + epsilon. Here epsilon is a random variable, normally distributed with mean 0 and standard deviation standev. The output will consist of 3 np.arrays, x_vals, y_vals and the array of pairs of observations = [[x_1, y_1], ...] Notation: npts = number of predictors, integer min_pred, max_pred = smallest, largest value for the predictor series beta_0, beta_1 = slope and intercept of the regression line standev = standard deviation of the normal distribution of epsilon # generate predictors, notice this is an np.array #x_vals = x_vals.round(2) # generate responses, also an np.array #y_vals = y_vals.round(2) # form the array of pairs (x_i, y_i) # return x_vals and their corresponding y_vals, as well as the pairs Compute parameters and other relevant quantities for simple linear regression. Find the means of predictors(x) and responses(y). Compute the expressions Sxx, Sxy and Syy, SSM Compute the estimators of the coefficients: slope and intercept. Compute the squared residual standard error. Compute estimators of the variances for slope and intercept. Compute the covariance between two parameters: slope and intercept. # the endpoints for the confidence interval for hat_beta_0 # the endpoints for the confidence interval for hat_beta_1 Compute confidence intervals for E(Y) for a series of observations (x_i, y_i) and save the endpoints in two lists: left endpoints and the right endpoints. Compute prediction intervals for a sequence of observations (x_i, y_i) and save them in two lists: left endpoints and right endpoints. # set the random seed to assure reproductibility # initialize the object that creates the 9 points dataset # create an instance of the class that evaluates the regression quantities # compute mean values of x_i, y_i # compute Sxx, Sxy, Syy # compute slope and intercept estimators # compute S^2 and S, the square of the rse and the rse # compute the variances and standard errors for the slope and intercept estimators # compute the covariance between the slope and the intercept # compute Rsquared and F-value # choose the significance level and the critical value for alpha = 0.01 # compute the confidence intervals for slope and intercept estimators # compute the confidence intervals for E(Y) for points in data # compute the prediction intervals for the points in data | 3.69547 | 4 |
python/scikitlearn/survivaltest/scripts/kaggleinspired.py | jdurbin/sandbox | 0 | 6614659 | #!/usr/bin/env python
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
pd.set_option('display.width', 1000)
# Read data
expression = pd.read_csv("../data/vijver2002.tab",delimiter="\t")
expression = expression.transpose()
print expression
print "Expression Shape:",expression.shape
print "Expression[0]:\n",expression.iloc[0] # This is the text heading
print "Expression[1]:\n",expression.iloc[1] # This is the first numeric row
print "Expression[295]:\n",expression.iloc[295] # This is the last row
print expression.values # This includes the first row of names
# Read metadata
metadata = pd.read_csv("../data/vijver2002.clinical.t.tab",delimiter="\t")
print metadata.head(10)
print "Metadata shape:",metadata.shape # 295 x 16
# numpy array way to combine columns, output is numpy array
#survival = np.c_[metadata['ID'],metadata['TIMEsurvival']]
survival = pd.DataFrame(metadata,columns = ['ID','TIMEsurvival'])
print survival # dataframe
print "Survival shape:",survival.shape
print "expression values: ",expression.values[1:,:] # cut out column headings
print "survival.values: ",survival.values[:,1:] # cut out row labels
# Split data into test and train datasets
exp_train,exp_test,surv_train,surv_test = train_test_split(expression.values[1:,:],
survival.values[:,1:],
train_size=0.8)
print "EXP TRAIN TYPE:",type(exp_train)
print "EXP TRAIN SHAPE:",exp_train.shape # (236,9803)
#print exp_test.shape # (59,9803)
print "EXP TRAIN: \n",exp_train
print "SURV TRAIN SHAPE: ",surv_train.shape #(236,1)
print "SURV TRAIN RAVEL SHAPE: ",surv_train.ravel().shape #(236,)
print "SURV TRAIN TYPE: ",type(surv_train) # numpy.ndarray
print "SURV TRAIN: \n",surv_train
model = RandomForestClassifier(n_estimators = 100)
model = model.fit(exp_train,surv_train.ravel())
output = model.predict(exp_test)
print "OUTPUT:\n",output
print "OUTPUT TYPE:",type(output) # numpy.ndarray
print "OUTPUT SHAPE:",output.shape
print "surv_test:\n",surv_test
# So this outputs some kind of numeric value. I don't know where it comes from in a
# RandomForest. Perhaps it treated it as a multi-value prediction... let's see if the numbers
# in the output are in the input...
# output size: 59
# intersection size: 49
print "INTERSCTION of OUTPUT and surv_train:\n",np.intersect1d(output,surv_train)
print "INTERSECTION shape:\n",np.intersect1d(output,surv_train).shape
# So, I think it's pretty clea that it's just a multi-class classifier using these real numbers
# as 59 different output classes. | #!/usr/bin/env python
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
pd.set_option('display.width', 1000)
# Read data
expression = pd.read_csv("../data/vijver2002.tab",delimiter="\t")
expression = expression.transpose()
print expression
print "Expression Shape:",expression.shape
print "Expression[0]:\n",expression.iloc[0] # This is the text heading
print "Expression[1]:\n",expression.iloc[1] # This is the first numeric row
print "Expression[295]:\n",expression.iloc[295] # This is the last row
print expression.values # This includes the first row of names
# Read metadata
metadata = pd.read_csv("../data/vijver2002.clinical.t.tab",delimiter="\t")
print metadata.head(10)
print "Metadata shape:",metadata.shape # 295 x 16
# numpy array way to combine columns, output is numpy array
#survival = np.c_[metadata['ID'],metadata['TIMEsurvival']]
survival = pd.DataFrame(metadata,columns = ['ID','TIMEsurvival'])
print survival # dataframe
print "Survival shape:",survival.shape
print "expression values: ",expression.values[1:,:] # cut out column headings
print "survival.values: ",survival.values[:,1:] # cut out row labels
# Split data into test and train datasets
exp_train,exp_test,surv_train,surv_test = train_test_split(expression.values[1:,:],
survival.values[:,1:],
train_size=0.8)
print "EXP TRAIN TYPE:",type(exp_train)
print "EXP TRAIN SHAPE:",exp_train.shape # (236,9803)
#print exp_test.shape # (59,9803)
print "EXP TRAIN: \n",exp_train
print "SURV TRAIN SHAPE: ",surv_train.shape #(236,1)
print "SURV TRAIN RAVEL SHAPE: ",surv_train.ravel().shape #(236,)
print "SURV TRAIN TYPE: ",type(surv_train) # numpy.ndarray
print "SURV TRAIN: \n",surv_train
model = RandomForestClassifier(n_estimators = 100)
model = model.fit(exp_train,surv_train.ravel())
output = model.predict(exp_test)
print "OUTPUT:\n",output
print "OUTPUT TYPE:",type(output) # numpy.ndarray
print "OUTPUT SHAPE:",output.shape
print "surv_test:\n",surv_test
# So this outputs some kind of numeric value. I don't know where it comes from in a
# RandomForest. Perhaps it treated it as a multi-value prediction... let's see if the numbers
# in the output are in the input...
# output size: 59
# intersection size: 49
print "INTERSCTION of OUTPUT and surv_train:\n",np.intersect1d(output,surv_train)
print "INTERSECTION shape:\n",np.intersect1d(output,surv_train).shape
# So, I think it's pretty clea that it's just a multi-class classifier using these real numbers
# as 59 different output classes. | en | 0.794451 | #!/usr/bin/env python # Read data # This is the text heading # This is the first numeric row # This is the last row # This includes the first row of names # Read metadata # 295 x 16 # numpy array way to combine columns, output is numpy array #survival = np.c_[metadata['ID'],metadata['TIMEsurvival']] # dataframe # cut out column headings # cut out row labels # Split data into test and train datasets # (236,9803) #print exp_test.shape # (59,9803) #(236,1) #(236,) # numpy.ndarray # numpy.ndarray # So this outputs some kind of numeric value. I don't know where it comes from in a # RandomForest. Perhaps it treated it as a multi-value prediction... let's see if the numbers # in the output are in the input... # output size: 59 # intersection size: 49 # So, I think it's pretty clea that it's just a multi-class classifier using these real numbers # as 59 different output classes. | 2.939229 | 3 |
Contents/Code/agents/ave.py | Xavier-Lam/JAV.bundle | 3 | 6614660 | <filename>Contents/Code/agents/ave.py
# coding=utf-8
import datetime
import re
from bs4 import BeautifulSoup
import requests
from .base import Base
class AVE(Base):
name = "AVEntertainments"
def get_results(self, media):
rv = []
movie_id = self.get_local_id(media)
if movie_id:
if movie_id.lower().startswith("red-"):
movie_id = movie_id.lower().replace("red-", "red")
rv.extend(self.get_results_by_keyword(movie_id))
else:
vol_ids = self.get_volumn_id(media)
if vol_ids:
for vol_id in vol_ids:
rv.extend(self.get_results_by_keyword(vol_id))
rv.extend(self.get_results_by_keyword(media.name))
return rv
def get_results_by_keyword(self, keyword):
url = "https://www.aventertainments.com/search_Products.aspx"
params = {
"languageId": "2",
"dept_id": "29",
"keyword": keyword,
"searchby": "keyword"
}
resp = requests.get(url, params=params)
resp.raise_for_status()
html = resp.content.decode("utf-8")
soup = BeautifulSoup(html, "html.parser")
wrap = soup.find("div", "shop-product-wrap")
products = wrap.findAll("div", "grid-view-product")
rv = []
for product in products:
title_ele = product.find("p", "product-title").find("a")
url = title_ele["href"]
match = re.search("product_id=(\d+)", url)
rv.append({
"id": self.name + "." + match.group(1),
"name": title_ele.text.strip(),
"lang": self.lang,
"score": 100,
"thumb": product.find("div", "single-slider-product__image").find("img")["src"]
})
return rv
def is_match(self, media):
meta_id = getattr(media, "metadata_id", "")
if meta_id:
return meta_id.startswith(self.name + ".")
else:
return bool(self.get_local_id(media)
or self.get_volumn_id(media))
def get_id(self, media, data=None):
if data:
return self.find_ele(data, "商品番号").text.strip()
return self.get_local_id(media)
def get_local_id(self, media):
pattern = r"(?:^|\s|\[|\(|\.|\\|\/)([a-z\d]+[-][a-z\d]+)(?:$|\s|\]|\)|\.)"
if hasattr(media, "name"):
match = re.search(pattern, media.name, re.I)
if match:
return match.group(1)
filename = media.items[0].parts[0].file.lower()
match = re.search(pattern, filename)
if match:
return match.group(1)
def get_volumn_id(self, media):
filename = media.items[0].parts[0].file.lower()
pattern = r"vol\s*\.?\s*(\d+)"
match = re.search(pattern, filename)
rv = []
if match:
vol = int(match.group(1))
rv.append("Vol." + str(vol))
if vol < 100:
rv.append("Vol.0" + str(vol))
return rv
def get_title_sort(self, media, data):
return self.get_title(media, data)
def get_studio(self, media, data):
return self.find_ele(data, "スタジオ").text.strip()
def crawl(self, media):
url = "https://www.aventertainments.com/product_lists.aspx"
resp = requests.get(url, params={
"product_id": media.metadata_id.split(".")[1],
"languageID": 2,
"dept_id": "29"
})
resp.raise_for_status()
html = resp.content.decode("utf-8")
return BeautifulSoup(html, "html.parser")
def get_original_title(self, media, data):
return "[{0}] {1}".format(
self.get_id(media, data),
data.find("div", "section-title").find("h3").text.strip()
)
def get_originally_available_at(self, media, data):
ele = self.find_ele(data, "発売日")
if ele:
dt_str = ele.text.strip()
match = re.search("\d+/\d+/\d+", dt_str)
try:
if match:
return datetime.datetime.strptime(match.group(0), "%m/%d/%Y")
except ValueError:
pass
def get_roles(self, media, data):
ele = self.find_ele(data, "主演女優")
if ele:
return [
item.text.strip()
for item in ele.findAll("a")
]
return []
def get_duration(self, media, data):
ele = self.find_ele(data, "収録時間")
if ele:
match = re.search("\d+", ele.text)
if match:
return int(match.group(0))*60*1000
def get_collections(self, media, data):
rv = []
studio = self.get_studio(media, data)
if studio:
rv.append(studio)
series = self.find_ele(data, "シリーズ")
if series:
rv.append(series.text.strip())
return rv
def get_genres(self, media, data):
ele = self.find_ele(data, "カテゴリ")
if ele:
return [ele.text.strip() for ele in ele.findAll("a")]
return []
def get_summary(self, media, data):
ele = data.find("div", "product-description")
if ele:
return ele.text.strip()
def get_posters(self, media, data):
thumbs = self.get_thumbs(media, data)
return [
thumb.replace("bigcover", "jacket_images")
for thumb in thumbs
]
def get_thumbs(self, media, data):
ele = data.find("div", {"id": "PlayerCover"})
if ele:
return [
ele.find("img")["src"]
]
return []
def find_ele(self, data, title):
single_infos = data.findAll("div", "single-info")
for single_info in single_infos:
if single_info.find("span", "title").text.strip() == title:
return single_info.find("span", "title").findNext("span")
| <filename>Contents/Code/agents/ave.py
# coding=utf-8
import datetime
import re
from bs4 import BeautifulSoup
import requests
from .base import Base
class AVE(Base):
name = "AVEntertainments"
def get_results(self, media):
rv = []
movie_id = self.get_local_id(media)
if movie_id:
if movie_id.lower().startswith("red-"):
movie_id = movie_id.lower().replace("red-", "red")
rv.extend(self.get_results_by_keyword(movie_id))
else:
vol_ids = self.get_volumn_id(media)
if vol_ids:
for vol_id in vol_ids:
rv.extend(self.get_results_by_keyword(vol_id))
rv.extend(self.get_results_by_keyword(media.name))
return rv
def get_results_by_keyword(self, keyword):
url = "https://www.aventertainments.com/search_Products.aspx"
params = {
"languageId": "2",
"dept_id": "29",
"keyword": keyword,
"searchby": "keyword"
}
resp = requests.get(url, params=params)
resp.raise_for_status()
html = resp.content.decode("utf-8")
soup = BeautifulSoup(html, "html.parser")
wrap = soup.find("div", "shop-product-wrap")
products = wrap.findAll("div", "grid-view-product")
rv = []
for product in products:
title_ele = product.find("p", "product-title").find("a")
url = title_ele["href"]
match = re.search("product_id=(\d+)", url)
rv.append({
"id": self.name + "." + match.group(1),
"name": title_ele.text.strip(),
"lang": self.lang,
"score": 100,
"thumb": product.find("div", "single-slider-product__image").find("img")["src"]
})
return rv
def is_match(self, media):
meta_id = getattr(media, "metadata_id", "")
if meta_id:
return meta_id.startswith(self.name + ".")
else:
return bool(self.get_local_id(media)
or self.get_volumn_id(media))
def get_id(self, media, data=None):
if data:
return self.find_ele(data, "商品番号").text.strip()
return self.get_local_id(media)
def get_local_id(self, media):
pattern = r"(?:^|\s|\[|\(|\.|\\|\/)([a-z\d]+[-][a-z\d]+)(?:$|\s|\]|\)|\.)"
if hasattr(media, "name"):
match = re.search(pattern, media.name, re.I)
if match:
return match.group(1)
filename = media.items[0].parts[0].file.lower()
match = re.search(pattern, filename)
if match:
return match.group(1)
def get_volumn_id(self, media):
filename = media.items[0].parts[0].file.lower()
pattern = r"vol\s*\.?\s*(\d+)"
match = re.search(pattern, filename)
rv = []
if match:
vol = int(match.group(1))
rv.append("Vol." + str(vol))
if vol < 100:
rv.append("Vol.0" + str(vol))
return rv
def get_title_sort(self, media, data):
return self.get_title(media, data)
def get_studio(self, media, data):
return self.find_ele(data, "スタジオ").text.strip()
def crawl(self, media):
url = "https://www.aventertainments.com/product_lists.aspx"
resp = requests.get(url, params={
"product_id": media.metadata_id.split(".")[1],
"languageID": 2,
"dept_id": "29"
})
resp.raise_for_status()
html = resp.content.decode("utf-8")
return BeautifulSoup(html, "html.parser")
def get_original_title(self, media, data):
return "[{0}] {1}".format(
self.get_id(media, data),
data.find("div", "section-title").find("h3").text.strip()
)
def get_originally_available_at(self, media, data):
ele = self.find_ele(data, "発売日")
if ele:
dt_str = ele.text.strip()
match = re.search("\d+/\d+/\d+", dt_str)
try:
if match:
return datetime.datetime.strptime(match.group(0), "%m/%d/%Y")
except ValueError:
pass
def get_roles(self, media, data):
ele = self.find_ele(data, "主演女優")
if ele:
return [
item.text.strip()
for item in ele.findAll("a")
]
return []
def get_duration(self, media, data):
ele = self.find_ele(data, "収録時間")
if ele:
match = re.search("\d+", ele.text)
if match:
return int(match.group(0))*60*1000
def get_collections(self, media, data):
rv = []
studio = self.get_studio(media, data)
if studio:
rv.append(studio)
series = self.find_ele(data, "シリーズ")
if series:
rv.append(series.text.strip())
return rv
def get_genres(self, media, data):
ele = self.find_ele(data, "カテゴリ")
if ele:
return [ele.text.strip() for ele in ele.findAll("a")]
return []
def get_summary(self, media, data):
ele = data.find("div", "product-description")
if ele:
return ele.text.strip()
def get_posters(self, media, data):
thumbs = self.get_thumbs(media, data)
return [
thumb.replace("bigcover", "jacket_images")
for thumb in thumbs
]
def get_thumbs(self, media, data):
ele = data.find("div", {"id": "PlayerCover"})
if ele:
return [
ele.find("img")["src"]
]
return []
def find_ele(self, data, title):
single_infos = data.findAll("div", "single-info")
for single_info in single_infos:
if single_info.find("span", "title").text.strip() == title:
return single_info.find("span", "title").findNext("span")
| en | 0.644078 | # coding=utf-8 | 2.748465 | 3 |
explanation/book_class.py | Daniel1404/Python-multiplication-table-app-with-OPP | 0 | 6614661 | class Book:
def __init__(self, title, color):
self.title = title
self.color = color
# Instance objects of Book class
blue_book = Book("The blue kid", "Blue")
green_book = Book("The frog story", "Green")
# Printing the type of the books
print(type(blue_book))
# <class '__main__.Book'>
print(type(green_book))
# <class '__main__.Book'> | class Book:
def __init__(self, title, color):
self.title = title
self.color = color
# Instance objects of Book class
blue_book = Book("The blue kid", "Blue")
green_book = Book("The frog story", "Green")
# Printing the type of the books
print(type(blue_book))
# <class '__main__.Book'>
print(type(green_book))
# <class '__main__.Book'> | en | 0.617098 | # Instance objects of Book class # Printing the type of the books # <class '__main__.Book'> # <class '__main__.Book'> | 3.997192 | 4 |
pySMARTS/main.py | NREL/pySMARTS | 5 | 6614662 | # -*- coding: utf-8 -*-
"""
The ``smarts`` module contains functions for calling SMARTS: Simple Model of the
Atmoshperic Radiative Transfer of Sunshine, from NREL, developed by
Dr. <NAME>.
SMARTS software can be obtained from:
https://www.nrel.gov/grid/solar-resource/smarts.html
Users will be responsible to obtain a copy of SMARTS from NREL,
honor it’s license, and download the SMART files into their PVLib folder.
This wrapper is shared under a BSD-3-Clause License, and was
originally coded in Matlab by <NAME> (2001), updated and ported to python
by <NAME> (2019-2020). Original Matlab wrapper was made for graduate studies
at the University of Arizona, python porting by NREL.
Please read the license and Readme files for more information, proper use, citing, and copyrights.
"""
def IOUT_to_code(IOUT):
r''' Function to display the options of outputs that SMARTS has.
If run without input (IOUT = None), it prints in a list all possible outputs.
If IOUT is passed to equal one of the outputs (i.e.
(i.e. IOUT = 'Global horizontal irradiance W m-2'), it returns the
code number for that output (returns '4' for this example).
PARAMETERS
-----------
IOUT: String
Can be None or a SMARTS output description
RETURNS
-------
IOUT_Key: String
Key code to SMARTS cards input.
'''
IOUT_map = { 'Extraterrestrial spectrum W m-2': '1',
'Direct normal irradiance W m-2': '2',
'Diffuse horizontal irradiance W m-2': '3',
'Global horizontal irradiance W m-2': '4',
'Direct horizontal irradiance W m-2': '5',
'Direct tilted irradiance W m-2': '6',
'Diffuse tilted irradiance W m-2': '7',
'Global tilted irradiance W m-2': '8',
'Experimental direct normal irradiance (with circumsolar) W m-2': '9',
'Experimental diffuse horizontal irradiance W m-2': '10',
'Circumsolar irradiance within radiometer field of view W m-2': '11',
'Global tilted photon flux per wavelength cm-2 s-1 nm-1': '12*',
'Direct normal photon flux per wavelength cm-2 s-1 nm-1': '13',
'Diffuse horizontal photon flux per wavelength cm-2 s-1 nm-1': '14',
'Rayleigh transmittance': '15',
'Ozone transmittance': '16',
'Transmittance from all trace gases': '17',
'Water vapor transmittance': '18',
'Mixed gas transmittance': '19',
'Aerosol transmittance': '20',
'Beam radiation transmittance': '21',
'Rayleigh optical thickness': '22',
'Ozone optical thickness': '23',
'Optical thickness from all trace gases': '24',
'Water vapor optical thickness': '25',
'Mixed gas optical thickness': '26',
'Aerosol optical thickness': '27',
'Aerosol single scattering albedo': '28',
'Aerosol asymmetry factor': '29',
'Zonal surface reflectance': '30',
'Local ground reflectance': '31',
'Atmospheric reflectance': '32',
'Global foreground reflected irradiance on tilted surface W m-2': '33*',
'Upward hemispheric ground-reflected irradiance W m-2': '34*',
'Global horizontal photosynthetic photon flux ?mol m-2 s-1 nm-1': '35*',
'Direct normal photosynthetic photon flux ?mol m-2 s-1 nm-1': '36*',
'Diffuse horizontal photosynthetic photon flux ?mol m-2 s-1 nm-1': '37*',
'Global tilted photosynthetic photon flux ?mol m-2 s-1 nm-1': '38*',
'Spectral photonic energy eV': '39*',
'Global horizontal photon flux per eV cm-2 s-1 eV-1': '40*',
'Direct normal photon flux per eV cm-2 s-1 eV-1': '41*',
'Diffuse horizontal photon flux per eV cm-2 s-1 eV-1': '42*',
'Global tilted photon flux per eV cm-2 s-1 eV-1': '43*'
}
if not IOUT:
return list(IOUT_map.keys())
if IOUT not in IOUT_map:
print(f"Unknown output specified: '{IOUT}'")
return None
return IOUT_map.get(IOUT)
def _material_to_code(material):
# Comments include Description, File name(.DAT extension), Reflection, Type*, Spectral range(um), Category*
# *KEYS: L Lambertian, NL Non-Lambertian, SP Specular, M Manmade materials, S Soils and rocks, U User defined, V Vegetation, W Water, snow, or ice
material_map = { 'UsrLamb': '0', # User-defined spectral reflectance Albedo L Userdefined
'UsrNLamb': '1', # User-defined spectral reflectance Albedo NL Userdefined
'Water': '2', # Water or calm ocean (calculated) SP 0.28 4.0 W
'Snow': '3', # Fresh dry snow Snow NL 0.3 2.48 W
'Neve': '4', # Snow on a mountain neve Neve NL 0.45 1.65 W
'Basalt': '5', # Basalt rock Basalt NL 0.3 2.48 S
'Dry_sand': '6', # Dry sand Dry_sand NL 0.32 0.99 S
'WiteSand': '7', # Sand from White Sands, NM WiteSand NL 0.5 2.48 S
'Soil': '8', # Bare soil Soil NL 0.28 4.0 S
'Dry_clay': '9', # Dry clay soil Dry_clay NL 0.5 2.48 S
'Wet_clay': '10', # Wet clay soil Wet_clay NL 0.5 2.48 S
'Alfalfa': '11', # Alfalfa Alfalfa NL 0.3 0.8 V
'Grass': '12', # Green grass Grass NL 0.3 1.19 V
'RyeGrass': '13', # Perennial rye grass RyeGrass NL 0.44 2.28 V
'Meadow1': '14', # Alpine meadow Meadow1 NL 0.4 0.85 V
'Meadow2': '15', # Lush meadow Meadow2 NL 0.4 0.9 V
'Wheat': '16', # Wheat crop Wheat NL 0.42 2.26 V
'PineTree': '17', # Ponderosa pine tree PineTree NL 0.34 2.48 V
'Concrete': '18', # Concrete slab Concrete NL 0.3 1.3 M
'BlckLoam': '19', # Black loam BlckLoam NL 0.4 4.0 S
'BrwnLoam': '20', # Brown loam BrwnLoam NL 0.4 4.0 S
'BrwnSand': '21', # Brown sand BrwnSand NL 0.4 4.0 S
'Conifers': '22', # Conifer trees Conifers NL 0.302 4.0 V
'DarkLoam': '23', # Dark loam DarkLoam NL 0.46-4.0 S
'DarkSand': '24', # Dark sand DarkSand NL 0.4 4.0 S
'Decidous': '25', # Decidous trees Decidous NL 0.302 4.0 V
'DryGrass': '26', # Dry grass (sod) DryGrass NL 0.38 4.0 V
'DuneSand': '27', # Dune sand DuneSand NL 0.4 4.0 S
'FineSnow': '28', # Fresh fine snow FineSnow NL 0.3 4.0 W
'GrnGrass': '29', # Green rye grass (sod) GrnGrass NL 0.302 4.0 V
'GrnlSnow': '30', # Granular snow GrnlSnow NL 0.3 4.0 W
'LiteClay': '31', # Light clay LiteClay NL 0.4 4.0 S
'LiteLoam': '32', # Light loam LiteLoam NL 0.431 4.0 S
'LiteSand': '33', # Light sand LiteSand NL 0.4 4.0 S
'PaleLoam': '34', # Pale loam PaleLoam NL 0.4 4.0 S
'Seawater': '35', # Sea water Seawater NL 2.079 4.0 W
'SolidIce': '36', # Solid ice SolidIce NL 0.3 4.0 W
'Dry_Soil': '37', # Dry soil Dry_Soil NL 0.28 4.0 S
'LiteSoil': '38', # Light soil LiteSoil NL 0.28 4.0 S
'RConcrte': '39', # Old runway concrete RConcrte NL 0.3 4.0 M
'RoofTile': '40', # Terracota roofing clay tile RoofTile NL 0.3 4.0 M
'RedBrick': '41', # Red construction brick RedBrick NL 0.3 4.0 M
'Asphalt': '42', # Old runway asphalt Asphalt NL 0.3 4.0 M
'TallCorn': '43', # Tall green corn TallCorn NL 0.36-1.0 V
'SndGravl': '44', # Sand & gravel SndGravl NL 0.45-1.04 S
'Fallow': '45', # Fallow field Fallow NL 0.32-1.19 S
'Birch': '46', # Birch leaves Birch NL 0.36-2.48 V
'WetSoil': '47', # Wet sandy soil WetSSoil NL 0.48-2.48 S
'Gravel': '48', # Gravel Gravel NL 0.32-1.3 S
'WetClay2': '49', # Wet red clay WetClay2 NL 0.52-2.48 S
'WetSilt': '50', # Wet silt WetSilt NL 0.52-2.48 S
'LngGrass': '51', # Dry long grass LngGrass NL 0.277-2.976 V
'LwnGrass': '52', # Lawn grass (generic bluegrass) LwnGrass NL 0.305-2.944 V
'OakTree': '53', # Deciduous oak tree leaves OakTree NL 0.35-2.5 V
'Pinion': '54', # Pinion pinetree needles Pinion NL 0.301-2.592 V
'MeltSnow': '55', # Melting snow (slush) MeltSnow NL 0.35-2.5 W
'Plywood': '56', # Plywood sheet (new, pine, 4-ply) Plywood NL 0.35-2.5 M
'WiteVinl': '57', # White vinyl plastic sheet, 0.15 mm WiteVinl NL 0.35-2.5 M
'FibrGlss': '58', # Clear fiberglass greenhouse roofing FibrGlss NL 0.35-2.5 M
'ShtMetal': '59', # Galvanized corrugated sheet metal, new ShtMetal NL 0.35-2.5 M
'Wetland': '60', # Wetland vegetation canopy, Yellowstone Wetland NL 0.409-2.478 V
'SageBrsh': '61', # Sagebrush canopy, Yellowstone SageBrsh NL 0.409-2.478 V
'FirTrees': '62', # Fir trees, Colorado FirTrees NL 0.353-2.592 V
'CSeaWatr': '63', # Coastal seawater, Pacific CSeaWatr NL 0.277-2.976 W
'OSeaWatr': '64', # Open ocean seawater, Atlantic OSeaWatr NL 0.277-2.976 W
'GrazingField':'65', # Grazing field (unfertilized) GrazingField NL 0.401-2.499 V
'Spruce': '66' # Young Norway spruce tree (needles) Spruce NL 0.39-0.845 V
}
if not material:
return material_map.keys()
if material not in material_map:
print(f"Unknown material specified: '{material}'")
return None
return material_map.get(material)
def SMARTSTimeLocation(IOUT,YEAR,MONTH,DAY,HOUR, LATIT, LONGIT, ALTIT, ZONE, material='LiteSoil', min_wvl='280', max_wvl='4000'):
r'''
This function calculates the spectral albedo for a given material. If no
material is provided, the function will return a list of all valid
materials.
Parameters
----------
material : string
Unique identifier for ground cover. Pass None to retreive a list of
all valid materials.
WLMN : string
Minimum wavelength to retreive
WLMX : string
Maximum wavelength to retreive
YEAR : string
Year
MONTH : string
Month
DAY : string
Day
HOUR : string
Hour, in 24 hour format.
LATIT : string
Latitude of the location.
LONGIT : string
Longitude of the location.
ALTIT : string
elevation of the ground surface above sea level [km]
ZONE : string
Timezone
Returns
-------
data : pandas
Matrix with first column representing wavelength (in nm) and second
column representing albedo of specified material at the wavelength
Updates:
6/20 Creation of second function to use zenith and azimuth M. Monarch
'''
## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores.
CMNT = 'ASTMG173-03 (AM1.5 Standard)'
## Card 2: ISPR is an option for site's pressure.
# ISPR = 0 to input SPR on Card 2a
# ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a
# ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a.
ISPR = '1'
# Card 2a (if ISPR = 0): SPR
SPR = '1013.25' #mbar
# Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT
# SPR: Surface pressure (mb).
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
# Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT
# LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for
# Papeete, Tahiti. If LATIT is unknown, enter 45.0.
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
ALTIT = ALTIT
HEIGHT = '0'
#LATIT = LATIT
## Card 3: IATMOS is an option to select the proper default atmosphere
# Its value can be either 0 or 1.
# Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to
# provide TAIR, RH, SEASON, TDAY.
# Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The
# shortened name of this atmosphere must be provided by ATMOS on Card 3a.
IATMOS = '1'
# Card 3a (if IATMOS = 1): ATMOS
# ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can
# be one of the following:
# USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer)
# MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer)
# SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer)
# STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter)
ATMOS = 'USSA'
# Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY.
# RH: Relative humidity at site level (%).
# SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and
# stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the
# true season is Spring. SEASON slightly affects the ozone effective temperature and the
# aerosol optical characteristics.
# TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50.
# TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this
# is a reference temperature for various calculations, therefore it is important to provide a
# realistic value in this case in particular. Acceptable range: -120 < TDAY < 50.
RH = ''
TAIR = ''
SEASON = ''
TDAY = ''
## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve
# precipitable water, W. The following values of IH2O are possible:
# 0, to input W on Card 4a
# 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site
# altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step.
# 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This
# calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not
# recommended.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IH2O = '1'
# Card 4a: (if IH2O = 0): W is precipitable water above the site altitude
# in units of cm, or equivalently, g/cm2; it must be <= 12.
W = ''
## Card 5: IO3 is an option to select the appropriate ozone abundance input.
# IO3 = 0 to input IALT and AbO3 on Card 5a
# IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by
# IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IO3 = '1'
# Card 5a (if IO3 = 0): IALT, AbO3
# IALT is an option to select the appropriate ozone column altitude correction.
# IALT = 0 bypasses the altitude correction, so that the value of AbO3 on
# Card 5a is used as is. IALT = 1 should be rather used if a vertical
# profile correction needs to be applied (in case of an elevated site when
# the value of AbO3 is known only at sea level).
IALT = ''
AbO3 = ''
## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution.
# IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations
# (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be
# initiated;
# IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a,
# and 7) are to be defaulted, using average vertical profiles.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IGAS = '0'
# Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0.
# For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants.
# ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly
# reduced abundances of some gases compared to the initial default obtained with the selected
# reference atmosphere.
# Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly
# represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE
# POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4).
ILOAD = '1'
# Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2,
# ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2
# ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv), Card 6b.
# ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
ApCH2O = ''
ApCH4 = ''
ApCO = ''
ApHNO2 = ''
ApHNO3 = ''
ApNO = ''
ApNO2 = ''
ApNO3 = ''
ApO3 = ''
ApSO2 =''
## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv).
qCO2 = '0.0'
# Card 7a ISPCTR
# is an option to select the proper extraterrestrial
# spectrum. This option allows to choose one out of ten possible spectral
# files (``Spctrm_n.dat``, where n = 0-8 or n = U).
# -1 Spctrm_U.dat N/A User User
# 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10
# 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00
# 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12
# 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75
# 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00
# 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16
# 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23
# 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00
# 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10
ISPCTR ='0'
## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices:
# S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices
# refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol
# models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN.
# SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer
# respectively to the Continental, Urban, and Maritime aerosol models of
# the IAMAP preliminary standard atmosphere (IAMAP, 1986).
# B&D_C , B&D_C1 , These two choices refer respectively to the Braslau &
# Dave aerosol type C and C1, themselves based on Deirmendjian's Haze L model.
# DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal)
# conditions in desert areas, whereas DESERT_MAX corresponds to extremely
# turbid conditions (sandstorms).
# 'USER' Card 8a is then necessary to input user-supplied aerosol information.
AEROS = 'S&F_TROPO'
# Card 8a:
# if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only!
# ALPHA1: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths < 500 nm
# (generally between 0.0 and 2.6).
# ALPHA2: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths >= 500 nm
# (generally between 0.0 and 2.6).
# OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0).
# GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9).
ALPHA1 = ''
ALPHA2 = ''
OMEGL = ''
GG = ''
## Card 9: ITURB is an option to select the correct turbidity data input. The different options are:
# 0, to read TAU5 on Card 9a
# 1, to read BETA on Card 9a
# 2, to read BCHUEP on Card 9a
# 3, to read RANGE on Card 9a
# 4, to read VISI on Card 9a
# 5, to read TAU550 on Card 9a (new option).
ITURB = '0'
#Card 9a Turbidity value
TAU5 = '0.00' #if ITURB == 0
BETA = '' #if ITURB == 1
BCHUEP = '' #if ITURB == 2
RANGE = '' #if ITURB == 3
VISI = '' #if ITURB == 4
TAU550 = '' #if ITURB == 5
## Card 10: Far Field Albedo for backscattering
IALBDX = _material_to_code(material)
# Card 10a:
RHOX = ''
# Zonal broadband Lambertian ground albedo (for backscattering calculations); must
# be between 0 and 1.
# Card 10b: ITILT is an option for tilted surface calculations.
#Select ITILT= 0 for no such calculation,
#ITILT = 1 to initiate these calculations using information on Card 10c.
ITILT = '1'
# Card 10c:
# IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local
# albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus
# extends from 1 to 64 (new).
# TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical
# plane. Use -999 for a sun-tracking surface.
# WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270
# deg. for a surface facing West. Use -999 for a sun-tracking surface.
IALBDG = IALBDX
TILT = '0.0'
WAZIM = '180.0'
# Card 10d:
# RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card
# 10d (if IALBDG = -1); usually between 0.05 and 0.90.
RHOG = ''
## Card 11: Spectral range for all Calculations
WLMN = min_wvl #Min wavelength
WLMX = max_wvl #Max wavelength
SUNCOR = '1.0'
#Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth
# distance; e.g., SUNCOR = 1.024.
# SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January
# and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated
# from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on
# Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if
# the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any
# other number between 0.966 and 1.034 to correct it for distance if so desired.
SOLARC = '1367.0' #Solar constant
## Card 12: Output results selection:
# IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are
# output (to File 16) if IPRT = 0. Spectral results are added to File 16,
# and Card 12a is read, if IPRT = 1. Spectral results are rather printed to
# File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral
# results are printed to both File 16 and 17 if IPRT = 3. Cards
# 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT).
IPRT = '2'
# Card 12a: Min, Max and Step wavelength (nm) (Output can be different than
# calculation...
WPMN = WLMN
WPMX = WLMX
INTVL = '.5'
# Card 12b: Total number of output variables:
#IOTOT = XXX #This is determined with the input of this function
# Card 12c: Variables to output selection
#(space separated numbers 1-43 according to the table below:
IOUT = IOUT
## Card 13: Circumsolar Calculation
# ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when
# simulating any type of radiometer (spectral or broadband) equipped with a collimator.
# ICIRC = 0 bypasses these calculations.
# ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator
# must then defined on Card 13a.
ICIRC = '0'
#Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT
SLOPE = ''
APERT = ''
LIMIT = ''
## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor.
# The smoothed results are output on a spreadsheet-ready file, File 18 (``smarts295.scn.txt``). This postprocessor is
# activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1.
ISCAN = '0'
# Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM
IFILT = ''
WV1 = ''
WV2 = ''
STEP = ''
FWHM = ''
## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR)
# calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0.
# With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda
# curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are
# done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note
# that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations
# are done between at least 360 and 830 nm.
# Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance
# calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280
# and 4000, respectively.
ILLUM = '0'
## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation,
# IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and
# different action weighted irradiances of interest in photobiology.
# Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280
# and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of
# the IPRT, WPMN, and WPMX values.
IUV = '0'
## Card 17:
# Option for solar position and air mass calculations. Set IMASS to:
# 0, if inputs are to be ZENIT, AZIM on Card 17a
# 1, if inputs are to be ELEV, AZIM on Card 17a
# 2, if input is to be AMASS on Card 17a
# 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a
# 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation).
IMASS = '3'
# Card 17a: IMASS = 0 Zenith and azimuth
ZENITH = ''
AZIM = ''
# Card 17a: IMASS = 1 Elevation and Azimuth
ELEV = ''
# Card 17a: IMASS = 2 Input air mass directly
AMASS = ''
# Card 17a: IMASS = 3 Input date, time and coordinates
YEAR = YEAR
MONTH = MONTH
DAY = DAY
HOUR = HOUR
LATIT = LATIT
LONGIT = LONGIT
ZONE = ZONE
# Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP
DSTEP = ''
output = _smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, AZIM, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP)
return output
def SMARTSAirMass(IOUT, material='LiteSoil', AMASS = '1.0', min_wvl='280', max_wvl='4000'):
r'''
This function calculates the spectral albedo for a given material. If no
material is provided, the function will return a list of all valid
materials.
Parameters
----------
material : string
Unique identifier for ground cover. Pass None to retreive a list of
all valid materials.
WLMN : string
Minimum wavelength to retreive
WLMX : string
Maximum wavelength to retreive
YEAR : string
Year
MONTH : string
Month
DAY : string
Day
HOUR : string
Hour, in 24 hour format.
LATIT : string
Latitude of the location.
LONGIT : string
Longitude of the location.
ALTIT : string
elevation of the ground surface above sea level [km]
ZONE : string
Timezone
Returns
-------
data : pandas
Matrix with first column representing wavelength (in nm) and second
column representing albedo of specified material at the wavelength
Updates:
6/20 Creation of second function to use zenith and azimuth M. Monarch
'''
## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores.
CMNT = 'ASTMG173-03 (AM1.5 Standard)'
## Card 2: ISPR is an option for site's pressure.
# ISPR = 0 to input SPR on Card 2a
# ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a
# ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a.
ISPR = '0'
# Card 2a (if ISPR = 0): SPR
SPR = '1013.25' #mbar
# Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT
# SPR: Surface pressure (mb).
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
# Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT
# LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for
# Papeete, Tahiti. If LATIT is unknown, enter 45.0.
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
ALTIT = ''
HEIGHT = ''
#LATIT = LATIT
## Card 3: IATMOS is an option to select the proper default atmosphere
# Its value can be either 0 or 1.
# Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to
# provide TAIR, RH, SEASON, TDAY.
# Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The
# shortened name of this atmosphere must be provided by ATMOS on Card 3a.
IATMOS = '1'
# Card 3a (if IATMOS = 1): ATMOS
# ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can
# be one of the following:
# USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer)
# MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer)
# SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer)
# STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter)
ATMOS = 'USSA'
# Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY.
# RH: Relative humidity at site level (%).
# SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and
# stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the
# true season is Spring. SEASON slightly affects the ozone effective temperature and the
# aerosol optical characteristics.
# TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50.
# TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this
# is a reference temperature for various calculations, therefore it is important to provide a
# realistic value in this case in particular. Acceptable range: -120 < TDAY < 50.
RH = ''
TAIR = ''
SEASON = ''
TDAY = ''
## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve
# precipitable water, W. The following values of IH2O are possible:
# 0, to input W on Card 4a
# 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site
# altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step.
# 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This
# calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not
# recommended.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IH2O = '1'
# Card 4a: (if IH2O = 0): W is precipitable water above the site altitude
# in units of cm, or equivalently, g/cm2; it must be <= 12.
W = ''
## Card 5: IO3 is an option to select the appropriate ozone abundance input.
# IO3 = 0 to input IALT and AbO3 on Card 5a
# IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by
# IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IO3 = '1'
# Card 5a (if IO3 = 0): IALT, AbO3
# IALT is an option to select the appropriate ozone column altitude correction.
# IALT = 0 bypasses the altitude correction, so that the value of AbO3 on
# Card 5a is used as is. IALT = 1 should be rather used if a vertical
# profile correction needs to be applied (in case of an elevated site when
# the value of AbO3 is known only at sea level).
IALT = ''
AbO3 = ''
## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution.
# IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations
# (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be
# initiated;
# IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a,
# and 7) are to be defaulted, using average vertical profiles.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IGAS = '1'
# Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0.
# For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants.
# ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly
# reduced abundances of some gases compared to the initial default obtained with the selected
# reference atmosphere.
# Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly
# represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE
# POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4).
ILOAD = '1'
# Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2,
# ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2
# ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv), Card 6b.
# ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
ApCH2O = ''
ApCH4 = ''
ApCO = ''
ApHNO2 = ''
ApHNO3 = ''
ApNO = ''
ApNO2 = ''
ApNO3 = ''
ApO3 = ''
ApSO2 =''
## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv).
qCO2 = '0.0'
# Card 7a ISPCTR
# is an option to select the proper extraterrestrial
# spectrum. This option allows to choose one out of ten possible spectral
# files (``Spctrm_n.dat``, where n = 0-8 or n = U).
# -1 Spctrm_U.dat N/A User User
# 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10
# 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00
# 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12
# 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75
# 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00
# 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16
# 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23
# 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00
# 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10
ISPCTR ='0'
## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices:
# S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices
# refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol
# models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN.
# SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer
# respectively to the Continental, Urban, and Maritime aerosol models of
# the IAMAP preliminary standard atmosphere (IAMAP, 1986).
# B&D_C , B&D_C1 , These two choices refer respectively to the Braslau &
# Dave aerosol type C and C1, themselves based on Deirmendjian's Haze L model.
# DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal)
# conditions in desert areas, whereas DESERT_MAX corresponds to extremely
# turbid conditions (sandstorms).
# 'USER' Card 8a is then necessary to input user-supplied aerosol information.
AEROS = 'S&F_TROPO'
# Card 8a:
# if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only!
# ALPHA1: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths < 500 nm
# (generally between 0.0 and 2.6).
# ALPHA2: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths >= 500 nm
# (generally between 0.0 and 2.6).
# OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0).
# GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9).
ALPHA1 = ''
ALPHA2 = ''
OMEGL = ''
GG = ''
## Card 9: ITURB is an option to select the correct turbidity data input. The different options are:
# 0, to read TAU5 on Card 9a
# 1, to read BETA on Card 9a
# 2, to read BCHUEP on Card 9a
# 3, to read RANGE on Card 9a
# 4, to read VISI on Card 9a
# 5, to read TAU550 on Card 9a (new option).
ITURB = '0'
#Card 9a Turbidity value
TAU5 = '0.00' #if ITURB == 0
BETA = '' #if ITURB == 1
BCHUEP = '' #if ITURB == 2
RANGE = '' #if ITURB == 3
VISI = '' #if ITURB == 4
TAU550 = '' #if ITURB == 5
## Card 10: Far Field Albedo for backscattering
IALBDX = _material_to_code(material)
# Card 10a:
RHOX = ''
# Zonal broadband Lambertian ground albedo (for backscattering calculations); must
# be between 0 and 1.
# Card 10b: ITILT is an option for tilted surface calculations.
#Select ITILT= 0 for no such calculation,
#ITILT = 1 to initiate these calculations using information on Card 10c.
ITILT = '1'
# Card 10c:
# IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local
# albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus
# extends from 1 to 64 (new).
# TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical
# plane. Use -999 for a sun-tracking surface.
# WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270
# deg. for a surface facing West. Use -999 for a sun-tracking surface.
IALBDG = IALBDX
TILT = '0.0'
WAZIM = '180.0'
# Card 10d:
# RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card
# 10d (if IALBDG = -1); usually between 0.05 and 0.90.
RHOG = ''
## Card 11: Spectral range for all Calculations
WLMN = min_wvl #Min wavelength
WLMX = max_wvl #Max wavelength
SUNCOR = '1.0'
#Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth
# distance; e.g., SUNCOR = 1.024.
# SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January
# and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated
# from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on
# Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if
# the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any
# other number between 0.966 and 1.034 to correct it for distance if so desired.
SOLARC = '1367.0' #Solar constant
## Card 12: Output results selection:
# IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are
# output (to File 16) if IPRT = 0. Spectral results are added to File 16,
# and Card 12a is read, if IPRT = 1. Spectral results are rather printed to
# File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral
# results are printed to both File 16 and 17 if IPRT = 3. Cards
# 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT).
IPRT = '2'
# Card 12a: Min, Max and Step wavelength (nm) (Output can be different than
# calculation...
WPMN = WLMN
WPMX = WLMX
INTVL = '.5'
# Card 12b: Total number of output variables:
#IOTOT = XXX #This is determined with the input of this function
# Card 12c: Variables to output selection
#(space separated numbers 1-43 according to the table below:
IOUT = IOUT
## Card 13: Circumsolar Calculation
# ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when
# simulating any type of radiometer (spectral or broadband) equipped with a collimator.
# ICIRC = 0 bypasses these calculations.
# ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator
# must then defined on Card 13a.
ICIRC = '0'
#Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT
SLOPE = ''
APERT = ''
LIMIT = ''
## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor.
# The smoothed results are output on a spreadsheet-ready file, File 18 (``smarts295.scn.txt``). This postprocessor is
# activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1.
ISCAN = '0'
# Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM
IFILT = ''
WV1 = ''
WV2 = ''
STEP = ''
FWHM = ''
## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR)
# calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0.
# With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda
# curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are
# done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note
# that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations
# are done between at least 360 and 830 nm.
# Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance
# calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280
# and 4000, respectively.
ILLUM = '0'
## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation,
# IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and
# different action weighted irradiances of interest in photobiology.
# Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280
# and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of
# the IPRT, WPMN, and WPMX values.
IUV = '0'
## Card 17:
# Option for solar position and air mass calculations. Set IMASS to:
# 0, if inputs are to be ZENIT, AZIM on Card 17a
# 1, if inputs are to be ELEV, AZIM on Card 17a
# 2, if input is to be AMASS on Card 17a
# 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a
# 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation).
IMASS = '2'
# Card 17a: IMASS = 0 Zenith and azimuth
ZENITH = ''
AZIM = ''
# Card 17a: IMASS = 1 Elevation and Azimuth
ELEV = ''
# Card 17a: IMASS = 2 Input air mass directly
AMASS = AMASS
# Card 17a: IMASS = 3 Input date, time and coordinates
YEAR = ''
MONTH = ''
DAY = ''
HOUR = ''
LATIT = ''
LONGIT = ''
ZONE = ''
# Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP
DSTEP = ''
output = _smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, AZIM, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP)
return output
def SMARTSSpectraZenAzm(IOUT, ZENITH, AZIM, material='LiteSoil', SPR='1013.25', min_wvl='280', max_wvl='4000'):
r'''
This function calculates the spectral albedo for a given material. If no
material is provided, the function will return a list of all valid
materials.
Parameters
----------
material : string
Unique identifier for ground cover. Pass None to retreive a list of
all valid materials.
WLMN : string
Minimum wavelength to retreive
WLMX : string
Maximum wavelength to retreive
ZENITH : string
Zenith angle of sun
AZIM : string
Azimuth of sun
SPR : string
Site Pressure [mbars]. Default: SPR = '1013.25'
Returns
-------
data : pandas
Matrix with first column representing wavelength (in nm) and second
column representing albedo of specified material at the wavelength
Updates:
6/20 Creation of second function to use zenith and azimuth M. Monarch
'''
## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores.
CMNT = 'ASTMG173-03 (AM1.5 Standard)'
## Card 2: ISPR is an option for site's pressure.
# ISPR = 0 to input SPR on Card 2a
# ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a
# ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a.
ISPR = '0'
# Card 2a (if ISPR = 0): SPR
SPR = SPR #mbar
# Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT
# SPR: Surface pressure (mb).
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
# Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT
# LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for
# Papeete, Tahiti. If LATIT is unknown, enter 45.0.
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
ALTIT = ''
HEIGHT = ''
#LATIT = LATIT
## Card 3: IATMOS is an option to select the proper default atmosphere
# Its value can be either 0 or 1.
# Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to
# provide TAIR, RH, SEASON, TDAY.
# Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The
# shortened name of this atmosphere must be provided by ATMOS on Card 3a.
IATMOS = '1'
# Card 3a (if IATMOS = 1): ATMOS
# ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can
# be one of the following:
# USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer)
# MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer)
# SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer)
# STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter)
ATMOS = 'USSA'
# Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY.
# RH: Relative humidity at site level (%).
# SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and
# stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the
# true season is Spring. SEASON slightly affects the ozone effective temperature and the
# aerosol optical characteristics.
# TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50.
# TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this
# is a reference temperature for various calculations, therefore it is important to provide a
# realistic value in this case in particular. Acceptable range: -120 < TDAY < 50.
RH = ''
TAIR = ''
SEASON = ''
TDAY = ''
## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve
# precipitable water, W. The following values of IH2O are possible:
# 0, to input W on Card 4a
# 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site
# altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step.
# 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This
# calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not
# recommended.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IH2O = '1'
# Card 4a: (if IH2O = 0): W is precipitable water above the site altitude
# in units of cm, or equivalently, g/cm2; it must be <= 12.
W = ''
## Card 5: IO3 is an option to select the appropriate ozone abundance input.
# IO3 = 0 to input IALT and AbO3 on Card 5a
# IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by
# IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IO3 = '1'
# Card 5a (if IO3 = 0): IALT, AbO3
# IALT is an option to select the appropriate ozone column altitude correction.
# IALT = 0 bypasses the altitude correction, so that the value of AbO3 on
# Card 5a is used as is. IALT = 1 should be rather used if a vertical
# profile correction needs to be applied (in case of an elevated site when
# the value of AbO3 is known only at sea level).
IALT = ''
AbO3 = ''
## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution.
# IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations
# (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be
# initiated;
# IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a,
# and 7) are to be defaulted, using average vertical profiles.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IGAS = '0'
# Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0.
# For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants.
# ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly
# reduced abundances of some gases compared to the initial default obtained with the selected
# reference atmosphere.
# Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly
# represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE
# POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4).
ILOAD = '1'
# Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2,
# ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2
# ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv), Card 6b.
# ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
ApCH2O = ''
ApCH4 = ''
ApCO = ''
ApHNO2 = ''
ApHNO3 = ''
ApNO = ''
ApNO2 = ''
ApNO3 = ''
ApO3 = ''
ApSO2 =''
## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv).
qCO2 = '0.0'
# Card 7a ISPCTR
# is an option to select the proper extraterrestrial
# spectrum. This option allows to choose one out of ten possible spectral
# files (``Spctrm_n.dat``, where n = 0-8 or n = U).
# -1 Spctrm_U.dat N/A User User
# 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10
# 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00
# 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12
# 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75
# 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00
# 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16
# 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23
# 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00
# 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10
ISPCTR ='0'
## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices:
# S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices
# refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol
# models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN.
# SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer
# respectively to the Continental, Urban, and Maritime aerosol models of
# the IAMAP preliminary standard atmosphere (IAMAP, 1986).
# B&D_C , B&D_C1 , These two choices refer respectively to the Braslau &
# Dave aerosol type C and C1, themselves based on Deirmendjian s Haze L model.
# DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal)
# conditions in desert areas, whereas DESERT_MAX corresponds to extremely
# turbid conditions (sandstorms).
# 'USER' Card 8a is then necessary to input user-supplied aerosol information.
AEROS = 'S&F_TROPO'
# Card 8a:
# if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only!
# ALPHA1: Average value of Ångström`s wavelength exponent $\alpha$ for wavelengths < 500 nm
# (generally between 0.0 and 2.6).
# ALPHA2: Average value of Ångström`s wavelength exponent $\alpha$ for wavelengths >= 500 nm
# (generally between 0.0 and 2.6).
# OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0).
# GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9).
ALPHA1 = ''
ALPHA2 = ''
OMEGL = ''
GG = ''
## Card 9: ITURB is an option to select the correct turbidity data input. The different options are:
# 0, to read TAU5 on Card 9a
# 1, to read BETA on Card 9a
# 2, to read BCHUEP on Card 9a
# 3, to read RANGE on Card 9a
# 4, to read VISI on Card 9a
# 5, to read TAU550 on Card 9a (new option).
ITURB = '0'
#Card 9a Turbidity value
TAU5 = '0.00' #if ITURB == 0
BETA = '' #if ITURB == 1
BCHUEP = '' #if ITURB == 2
RANGE = '' #if ITURB == 3
VISI = '' #if ITURB == 4
TAU550 = '' #if ITURB == 5
## Card 10: Far Field Albedo for backscattering
IALBDX = _material_to_code(material)
# Card 10a:
RHOX = ''
# Zonal broadband Lambertian ground albedo (for backscattering calculations); must
# be between 0 and 1.
# Card 10b: ITILT is an option for tilted surface calculations.
#Select ITILT= 0 for no such calculation,
#ITILT = 1 to initiate these calculations using information on Card 10c.
ITILT = '1'
# Card 10c:
# IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local
# albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus
# extends from 1 to 64 (new).
# TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical
# plane. Use -999 for a sun-tracking surface.
# WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270
# deg. for a surface facing West. Use -999 for a sun-tracking surface.
IALBDG = IALBDX
TILT = '0.0'
WAZIM = '180.0'
# Card 10d:
# RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card
# 10d (if IALBDG = -1); usually between 0.05 and 0.90.
RHOG = ''
## Card 11: Spectral range for all Calculations
WLMN = min_wvl #Min wavelength
WLMX = max_wvl #Max wavelength
SUNCOR = '1.0'
#Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth
# distance; e.g., SUNCOR = 1.024.
# SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January
# and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated
# from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on
# Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if
# the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any
# other number between 0.966 and 1.034 to correct it for distance if so desired.
SOLARC = '1367.0' #Solar constant
## Card 12: Output results selection:
# IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are
# output (to File 16) if IPRT = 0. Spectral results are added to File 16,
# and Card 12a is read, if IPRT = 1. Spectral results are rather printed to
# File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral
# results are printed to both File 16 and 17 if IPRT = 3. Cards
# 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT).
IPRT = '2'
# Card 12a: Min, Max and Step wavelength (nm) (Output can be different than
# calculation...
WPMN = WLMN
WPMX = WLMX
INTVL = '.5'
# Card 12b: Total number of output variables:
#IOTOT = XXX #This is determined with the input of this function
# Card 12c: Variables to output selection
#(space separated numbers 1-43 according to the table below:
#IOUT = '30 31'
## Card 13: Circumsolar Calculation
# ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when
# simulating any type of radiometer (spectral or broadband) equipped with a collimator.
# ICIRC = 0 bypasses these calculations.
# ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator
# must then defined on Card 13a.
ICIRC = '0'
#Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT
SLOPE = ''
APERT = ''
LIMIT = ''
## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor.
# The smoothed results are output on a spreadsheet-ready file, File 18 (``smarts295.scn.txt``). This postprocessor is
# activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1.
ISCAN = '0'
# Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM
IFILT = ''
WV1 = ''
WV2 = ''
STEP = ''
FWHM = ''
## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR)
# calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0.
# With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda
# curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are
# done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note
# that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations
# are done between at least 360 and 830 nm.
# Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance
# calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280
# and 4000, respectively.
ILLUM = '0'
## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation,
# IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and
# different action weighted irradiances of interest in photobiology.
# Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280
# and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of
# the IPRT, WPMN, and WPMX values.
IUV = '0'
## Card 17:
# Option for solar position and air mass calculations. Set IMASS to:
# 0, if inputs are to be ZENIT, AZIM on Card 17a
# 1, if inputs are to be ELEV, AZIM on Card 17a
# 2, if input is to be AMASS on Card 17a
# 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a
# 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation).
IMASS = '0'
# Card 17a: IMASS = 0 Zenith and azimuth
#ZENITH = ''
#AZIM = ''
# Card 17a: IMASS = 1 Elevation and Azimuth
ELEV = ''
# Card 17a: IMASS = 2 Input air mass directly
AMASS = ''
# Card 17a: IMASS = 3 Input date, time and coordinates
YEAR = ''
MONTH = ''
DAY = ''
HOUR = ''
LATIT = ''
LONGIT = ''
ZONE = ''
# Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP
DSTEP = ''
output = _smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, AZIM, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP)
return output
def SMARTSTMY3(IOUT,YEAR,MONTH,DAY,HOUR, LATIT, LONGIT, ALTIT, ZONE, RHOG,
W, RH, TAIR, SEASON, TDAY, SPR, HEIGHT='0',
material='DryGrass', min_wvl='280', max_wvl='4000'):
r'''
This function calculates the spectral albedo for a given material. If no
material is provided, the function will return a list of all valid
materials.
Parameters
----------
material : string
Unique identifier for ground cover. Pass None to retreive a list of
all valid materials.
WLMN : string
Minimum wavelength to retreive
WLMX : string
Maximum wavelength to retreive
YEAR : string
Year
MONTH : string
Month
DAY : string
Day
HOUR : string
Hour, in 24 hour format.
LATIT : string
Latitude of the location.
LONGIT : string
Longitude of the location.
ALTIT : string
elevation of the ground surface above sea level [km].
WARNING: Please note that TMY3 data is in meters, convert before using this
function.
ZONE : string
Timezone
RHOG : string
Local broadband Lambertian foreground albedo (for tilted plane calculations)
W : string
Precipitable water above the site altitude, in units of cm or equivalently
g/cm2/
RH : string
Relative Humidity
TAIR : string
Temperature.
SEASON : string
Season, either 'WINTER' or 'SUMMER'. If Spring, use 'SUMMER'. If
Autumn, use 'WINTER'.
TDAY : string
Average of the day's temperature.
HEIGHT : string
Altitude of the simulated object over the surface, in km.
SPR : string
Site pressure, in mbars.
Returns
-------
data : pandas
Matrix with first column representing wavelength (in nm) and second
column representing albedo of specified material at the wavelength
'''
if float(ALTIT) > 800:
print("Altitude should be in km. Are you in Mt. Everest or above or",
"using meters? This might fail but we'll attempt to continue.")
## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores.
CMNT = 'TMY Parameters Spectra'
## Card 2: ISPR is an option for site's pressure.
# ISPR = 0 to input SPR on Card 2a
# ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a
# ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a.
ISPR = '1'
# Card 2a (if ISPR = 0): SPR
SPR = SPR #mbar
# Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT
# SPR: Surface pressure (mb).
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
# Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT
# LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for
# Papeete, Tahiti. If LATIT is unknown, enter 45.0.
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
ALTIT = ALTIT
HEIGHT = HEIGHT
#LATIT = LATIT
## Card 3: IATMOS is an option to select the proper default atmosphere
# Its value can be either 0 or 1.
# Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to
# provide TAIR, RH, SEASON, TDAY.
# Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The
# shortened name of this atmosphere must be provided by ATMOS on Card 3a.
IATMOS = '0'
# Card 3a (if IATMOS = 1): ATMOS
# ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can
# be one of the following:
# USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer)
# MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer)
# SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer)
# STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter)
ATMOS = 'USSA'
# Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY.
# RH: Relative humidity at site level (%).
# SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and
# stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the
# true season is Spring. SEASON slightly affects the ozone effective temperature and the
# aerosol optical characteristics.
# TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50.
# TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this
# is a reference temperature for various calculations, therefore it is important to provide a
# realistic value in this case in particular. Acceptable range: -120 < TDAY < 50.
RH = RH
TAIR = TAIR
SEASON = SEASON
TDAY = TDAY
## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve
# precipitable water, W. The following values of IH2O are possible:
# 0, to input W on Card 4a
# 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site
# altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step.
# 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This
# calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not
# recommended.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IH2O = '0'
# Card 4a: (if IH2O = 0): W is precipitable water above the site altitude
# in units of cm, or equivalently, g/cm2; it must be <= 12.
W = W
if float(W) == 0 or float(W) > 12:
print("Switching to calculating W")
IH2O = '2'
## Card 5: IO3 is an option to select the appropriate ozone abundance input.
# IO3 = 0 to input IALT and AbO3 on Card 5a
# IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by
# IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IO3 = '1'
# Card 5a (if IO3 = 0): IALT, AbO3
# IALT is an option to select the appropriate ozone column altitude correction.
# IALT = 0 bypasses the altitude correction, so that the value of AbO3 on
# Card 5a is used as is. IALT = 1 should be rather used if a vertical
# profile correction needs to be applied (in case of an elevated site when
# the value of AbO3 is known only at sea level).
IALT = ''
AbO3 = ''
## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution.
# IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations
# (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be
# initiated;
# IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a,
# and 7) are to be defaulted, using average vertical profiles.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IGAS = '0'
# Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0.
# For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants.
# ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly
# reduced abundances of some gases compared to the initial default obtained with the selected
# reference atmosphere.
# Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly
# represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE
# POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4).
ILOAD = '1'
# Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2,
# ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2
# ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv), Card 6b.
# ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
ApCH2O = ''
ApCH4 = ''
ApCO = ''
ApHNO2 = ''
ApHNO3 = ''
ApNO = ''
ApNO2 = ''
ApNO3 = ''
ApO3 = ''
ApSO2 =''
## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv).
qCO2 = '0.0'
# Card 7a ISPCTR
# is an option to select the proper extraterrestrial
# spectrum. This option allows to choose one out of ten possible spectral
# files (``Spctrm_n.dat``, where n = 0-8 or n = U).
# -1 Spctrm_U.dat N/A User User
# 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10
# 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00
# 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12
# 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75
# 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00
# 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16
# 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23
# 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00
# 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10
ISPCTR ='0'
## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices:
# S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices
# refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol
# models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN.
# SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer
# respectively to the Continental, Urban, and Maritime aerosol models of
# the IAMAP preliminary standard atmosphere (IAMAP, 1986).
# B&D_C , B&D_C1 , These two choices refer respectively to the Braslau &
# Dave aerosol type C and C1, themselves based on Deirmendjian's Haze L model.
# DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal)
# conditions in desert areas, whereas DESERT_MAX corresponds to extremely
# turbid conditions (sandstorms).
# 'USER' Card 8a is then necessary to input user-supplied aerosol information.
AEROS = 'S&F_TROPO'
# Card 8a:
# if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only!
# ALPHA1: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths < 500 nm
# (generally between 0.0 and 2.6).
# ALPHA2: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths >= 500 nm
# (generally between 0.0 and 2.6).
# OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0).
# GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9).
ALPHA1 = ''
ALPHA2 = ''
OMEGL = ''
GG = ''
## Card 9: ITURB is an option to select the correct turbidity data input. The different options are:
# 0, to read TAU5 on Card 9a
# 1, to read BETA on Card 9a
# 2, to read BCHUEP on Card 9a
# 3, to read RANGE on Card 9a
# 4, to read VISI on Card 9a
# 5, to read TAU550 on Card 9a (new option).
ITURB = '0'
#Card 9a Turbidity value
TAU5 = '0.00' #if ITURB == 0
BETA = '' #if ITURB == 1
BCHUEP = '' #if ITURB == 2
RANGE = '' #if ITURB == 3
VISI = '' #if ITURB == 4
TAU550 = '' #if ITURB == 5
## Card 10: Far Field Albedo for backscattering
IALBDX = _material_to_code(material)
# Card 10a:
RHOX = ''
# Zonal broadband Lambertian ground albedo (for backscattering calculations); must
# be between 0 and 1.
# Card 10b: ITILT is an option for tilted surface calculations.
#Select ITILT= 0 for no such calculation,
#ITILT = 1 to initiate these calculations using information on Card 10c.
ITILT = '1'
# Card 10c:
# IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local
# albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus
# extends from 1 to 64 (new).
# TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical
# plane. Use -999 for a sun-tracking surface.
# WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270
# deg. for a surface facing West. Use -999 for a sun-tracking surface.
IALBDG = '-1' #Sil check if this should be -1 or 1.
TILT = '0.0'
WAZIM = '180.0'
# Card 10d:
# RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card
# 10d (if IALBDG = -1); usually between 0.05 and 0.90.
RHOG = RHOG
## Card 11: Spectral range for all Calculations
WLMN = min_wvl #Min wavelength
WLMX = max_wvl #Max wavelength
SUNCOR = '1.0'
#Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth
# distance; e.g., SUNCOR = 1.024.
# SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January
# and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated
# from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on
# Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if
# the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any
# other number between 0.966 and 1.034 to correct it for distance if so desired.
SOLARC = '1367.0' #Solar constant
## Card 12: Output results selection:
# IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are
# output (to File 16) if IPRT = 0. Spectral results are added to File 16,
# and Card 12a is read, if IPRT = 1. Spectral results are rather printed to
# File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral
# results are printed to both File 16 and 17 if IPRT = 3. Cards
# 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT).
IPRT = '2'
# Card 12a: Min, Max and Step wavelength (nm) (Output can be different than
# calculation...
WPMN = WLMN
WPMX = WLMX
INTVL = '.5'
# Card 12b: Total number of output variables:
#IOTOT = XXX #This is determined with the input of this function
# Card 12c: Variables to output selection
#(space separated numbers 1-43 according to the table below:
#IOUT = '30 31'
## Card 13: Circumsolar Calculation
# ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when
# simulating any type of radiometer (spectral or broadband) equipped with a collimator.
# ICIRC = 0 bypasses these calculations.
# ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator
# must then defined on Card 13a.
ICIRC = '0'
#Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT
SLOPE = ''
APERT = ''
LIMIT = ''
## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor.
# The smoothed results are output on a spreadsheet-ready file, File 18 (``smarts295.scn.txt``). This postprocessor is
# activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1.
ISCAN = '0'
# Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM
IFILT = ''
WV1 = ''
WV2 = ''
STEP = ''
FWHM = ''
## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR)
# calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0.
# With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda
# curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are
# done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note
# that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations
# are done between at least 360 and 830 nm.
# Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance
# calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280
# and 4000, respectively.
ILLUM = '0'
## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation,
# IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and
# different action weighted irradiances of interest in photobiology.
# Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280
# and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of
# the IPRT, WPMN, and WPMX values.
IUV = '0'
## Card 17:
# Option for solar position and air mass calculations. Set IMASS to:
# 0, if inputs are to be ZENIT, AZIM on Card 17a
# 1, if inputs are to be ELEV, AZIM on Card 17a
# 2, if input is to be AMASS on Card 17a
# 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a
# 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation).
IMASS = '3'
# Card 17a: IMASS = 0 Zenith and azimuth
ZENITH = ''
AZIM = ''
# Card 17a: IMASS = 1 Elevation and Azimuth
ELEV = ''
# Card 17a: IMASS = 2 Input air mass directly
AMASS = ''
# Card 17a: IMASS = 3 Input date, time and coordinates
YEAR = YEAR
MONTH = MONTH
DAY = DAY
HOUR = HOUR
LATIT = LATIT
LONGIT = LONGIT
ZONE = ZONE
# Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP
DSTEP = ''
output = _smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, AZIM, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP)
return output
def SMARTSSRRL(IOUT,YEAR,MONTH,DAY,HOUR, LATIT, LONGIT, ALTIT, ZONE,
W, RH, TAIR, SEASON, TDAY, SPR, TILT, WAZIM,
RHOG, ALPHA1, ALPHA2, OMEGL, GG, BETA, TAU5, HEIGHT='0',
material='DryGrass', min_wvl='280', max_wvl='4000', POA='TRUE'):
r'''
This function calculates the spectra with inputs available on the Solar
Radiation Research Laboratory (SRRL).
Data accessible by API or website on:
https://midcdmz.nrel.gov/
Main Datasets:
SRRL Baseline Measuremnet System
https://midcdmz.nrel.gov/apps/sitehome.pl?site=BMS
SRRL AOD SkyNet Level 1.1
http://midc.nrel.gov/apps/sitehome.pl?site=AODSRRL
SRRL GPS-based PWV
http://midc.nrel.gov/apps/sitehome.pl?site=PWVSRRL
Parameters
----------
YEAR : string
Year
MONTH : string
Month
DAY : string
Day
HOUR : string
Hour, in 24 hour format.
LATIT : string
Latitude of the location.
LONGIT : string
Longitude of the location.
ALTIT : string
elevation of the ground surface above sea level [km].
WARNING: Please note that TMY3 data is in meters, convert before using this
function.
ZONE : string
Timezone
W : string
Precipitable water above the site altitude, in units of cm or equivalently
g/cm2/
This is, for example, SRRL_PWD['Precipitable Water [mm]']/10
Remember to input the correct units -- SRRL database is [mm] and this
function expects [cm].
RH : string
Relative Humidity.
This is, for example, SRRL_BMS['Tower RH [%]']
TAIR : string
Temperature.
This is, for example, SRRL_BMS['Tower Dry Bulb Temp [deg C]']
SEASON : string
Season, either 'WINTER' or 'SUMMER'. If Spring, use 'SUMMER'. If
Autumn, use 'WINTER'.
TDAY : string
Average of the day's temperature.
HEIGHT : string
Altitude of the simulated object over the surface, in km. Usually 0.
SPR : string
Site pressure, in mbars.
This is, for example, SRRL_BMS['Station Pressure [mBar]']
BETA : string
Ångström’s turbidity coefficient, ß (i.e., aerosol optical depth at 1000 nm)
If BETA and TAU5 are used as inputs, BETA is selected as priority since
TAU5 would be used to calcualte an internal SMARTS BETA value.
This is, for example, SRRL_AOD_SkyNet1['Beta']
TAU5 : string
Aerosol optical depth at 500 nm, τ5.
If BETA and TAU5 are used as inputs, BETA is selected as priority since
TAU5 would be used to calcualte an internal SMARTS BETA value.
This is, for example, SRRL_AOD_SkyNet1['AOD [500nm]']
TILT : string
Tilt angel of the receiving surface (0 to 90 decimal deg.), e.g. '90.0'
for a vertical plane. Use '-999' for a sun-tracking surface.
WAZIM : string
Surface azimuth (0 to 360 decimal deg.) counted clockwise from North;
e.g., 270 deg. for a surface facing West. Use -999 for a sun-tracking
surface.
RHOG : string
Local broadband Lambertian foreground albedo (for tilted plane calculations),
usually between 0.05 and 0.90.
This is, for example, SRRL_BMS['Albedo (CMP11)']
material : string
Unique identifier for ground cover. Pass None to retrieve a list of
all valid materials.
WLMN : string
Minimum wavelength to retreive, e.g. '280.0'
WLMX : string
Maximum wavelength to retreive, e.g. '4000'
Returns
-------
data : pandas
Matrix with first column representing wavelength (in nm) and second
column representing albedo of specified material at the wavelength
'''
if float(ALTIT) > 800:
print("Altitude should be in km. Are you in Mt. Everest or above or",
"using meters? This might fail but we'll attempt to continue.")
## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores.
CMNT = 'SRRL Spectra'
## Card 2: ISPR is an option for site's pressure.
# ISPR = 0 to input SPR on Card 2a
# ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a
# ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a.
ISPR = '1'
# Card 2a (if ISPR = 0): SPR
SPR = SPR #mbar
# Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT
# SPR: Surface pressure (mb).
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
# Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT
# LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for
# Papeete, Tahiti. If LATIT is unknown, enter 45.0.
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
ALTIT = ALTIT
HEIGHT = HEIGHT
#LATIT = LATIT
## Card 3: IATMOS is an option to select the proper default atmosphere
# Its value can be either 0 or 1.
# Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to
# provide TAIR, RH, SEASON, TDAY.
# Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The
# shortened name of this atmosphere must be provided by ATMOS on Card 3a.
IATMOS = '0'
# Card 3a (if IATMOS = 1): ATMOS
# ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can
# be one of the following:
# USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer)
# MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer)
# SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer)
# STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter)
ATMOS = 'USSA'
# Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY.
# RH: Relative humidity at site level (%).
# SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and
# stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the
# true season is Spring. SEASON slightly affects the ozone effective temperature and the
# aerosol optical characteristics.
# TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50.
# TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this
# is a reference temperature for various calculations, therefore it is important to provide a
# realistic value in this case in particular. Acceptable range: -120 < TDAY < 50.
RH = RH
TAIR = TAIR
SEASON = SEASON
TDAY = TDAY
## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve
# precipitable water, W. The following values of IH2O are possible:
# 0, to input W on Card 4a
# 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site
# altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step.
# 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This
# calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not
# recommended.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IH2O = '0'
# Card 4a: (if IH2O = 0): W is precipitable water above the site altitude
# in units of cm, or equivalently, g/cm2; it must be <= 12.
W = W
## Card 5: IO3 is an option to select the appropriate ozone abundance input.
# IO3 = 0 to input IALT and AbO3 on Card 5a
# IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by
# IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IO3 = '1'
# Card 5a (if IO3 = 0): IALT, AbO3
# IALT is an option to select the appropriate ozone column altitude correction.
# IALT = 0 bypasses the altitude correction, so that the value of AbO3 on
# Card 5a is used as is. IALT = 1 should be rather used if a vertical
# profile correction needs to be applied (in case of an elevated site when
# the value of AbO3 is known only at sea level).
IALT = ''
AbO3 = ''
## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution.
# IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations
# (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be
# initiated;
# IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a,
# and 7) are to be defaulted, using average vertical profiles.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IGAS = '0'
# Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0.
# For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants.
# ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly
# reduced abundances of some gases compared to the initial default obtained with the selected
# reference atmosphere.
# Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly
# represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE
# POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4).
ILOAD = '1'
# Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2,
# ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2
# ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv), Card 6b.
# ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
ApCH2O = ''
ApCH4 = ''
ApCO = ''
ApHNO2 = ''
ApHNO3 = ''
ApNO = ''
ApNO2 = ''
ApNO3 = ''
ApO3 = ''
ApSO2 =''
## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv).
qCO2 = '0.0'
# Card 7a ISPCTR
# is an option to select the proper extraterrestrial
# spectrum. This option allows to choose one out of ten possible spectral
# files (``Spctrm_n.dat``, where n = 0-8 or n = U).
# -1 Spctrm_U.dat N/A User User
# 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10
# 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00
# 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12
# 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75
# 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00
# 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16
# 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23
# 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00
# 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10
ISPCTR ='0'
## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices:
# S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices
# refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol
# models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN.
# SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer
# respectively to the Continental, Urban, and Maritime aerosol models of
# the IAMAP preliminary standard atmosphere (IAMAP, 1986).
# B&D_C , B&D_C1 , These two choices refer respectively to the Braslau &
# Dave aerosol type C and C1, themselves based on Deirmendjian's Haze L model.
# DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal)
# conditions in desert areas, whereas DESERT_MAX corresponds to extremely
# turbid conditions (sandstorms).
# 'USER' Card 8a is then necessary to input user-supplied aerosol information.
AEROS = 'USER' #'S&F_TROPO'
# Card 8a:
# if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only!
# ALPHA1: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths < 500 nm
# (generally between 0.0 and 2.6).
# ALPHA2: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths >= 500 nm
# (generally between 0.0 and 2.6).
# OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0).
# GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9).
ALPHA1 = ALPHA1
ALPHA2 = ALPHA2
OMEGL = OMEGL
GG = GG
## Card 9: ITURB is an option to select the correct turbidity data input. The different options are:
# 0, to read TAU5 on Card 9a
# 1, to read BETA on Card 9a
# 2, to read BCHUEP on Card 9a
# 3, to read RANGE on Card 9a
# 4, to read VISI on Card 9a
# 5, to read TAU550 on Card 9a (new option).
ITURB = '1'
#Card 9a Turbidity value
if BETA is not None:
BETA = BETA
TAU5 = ''
else:
TAU5 = TAU5
BETA = ''
BCHUEP = '' #if ITURB == 2
RANGE = '' #if ITURB == 3
VISI = '' #if ITURB == 4
TAU550 = '' #if ITURB == 5
## Card 10: Far Field Albedo for backscattering
IALBDX = _material_to_code(material)
# Card 10a:
RHOX = ''
# Zonal broadband Lambertian ground albedo (for backscattering calculations); must
# be between 0 and 1.
# Card 10b: ITILT is an option for tilted surface calculations.
#Select ITILT= 0 for no such calculation,
#ITILT = 1 to initiate these calculations using information on Card 10c.
if POA:
ITILT = '1'
else:
ITILT = '0'
# Card 10c:
# IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local
# albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus
# extends from 1 to 64 (new).
# TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical
# plane. Use -999 for a sun-tracking surface.
# WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270
# deg. for a surface facing West. Use -999 for a sun-tracking surface.
IALBDG = '-1'
TILT = TILT
WAZIM = WAZIM
# Card 10d:
# RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card
# 10d (if IALBDG = -1); usually between 0.05 and 0.90.
RHOG = RHOG
## Card 11: Spectral range for all Calculations
WLMN = min_wvl #Min wavelength
WLMX = max_wvl #Max wavelength
SUNCOR = '1.0'
#Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth
# distance; e.g., SUNCOR = 1.024.
# SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January
# and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated
# from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on
# Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if
# the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any
# other number between 0.966 and 1.034 to correct it for distance if so desired.
SOLARC = '1367.0' #Solar constant
## Card 12: Output results selection:
# IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are
# output (to File 16) if IPRT = 0. Spectral results are added to File 16,
# and Card 12a is read, if IPRT = 1. Spectral results are rather printed to
# File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral
# results are printed to both File 16 and 17 if IPRT = 3. Cards
# 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT).
IPRT = '2'
# Card 12a: Min, Max and Step wavelength (nm) (Output can be different than
# calculation...
WPMN = WLMN
WPMX = WLMX
INTVL = '.5'
# Card 12b: Total number of output variables:s
#IOTOT = XXX #This is determined with the input of this function
# Card 12c: Variables to output selection
#(space separated numbers 1-43 according to the table below:
IOUT = IOUT
## Card 13: Circumsolar Calculation
# ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when
# simulating any type of radiometer (spectral or broadband) equipped with a collimator.
# ICIRC = 0 bypasses these calculations.
# ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator
# must then defined on Card 13a.
ICIRC = '0'
#Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT
SLOPE = ''
APERT = ''
LIMIT = ''
## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor.
# The smoothed results are output on a spreadsheet-ready file, File 18 (smarts295.scn.txt). This postprocessor is
# activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1.
ISCAN = '0'
# Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM
IFILT = ''
WV1 = ''
WV2 = ''
STEP = ''
FWHM = ''
## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR)
# calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0.
# With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda
# curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are
# done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note
# that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations
# are done between at least 360 and 830 nm.
# Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance
# calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280
# and 4000, respectively.
ILLUM = '0'
## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation,
# IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and
# different action weighted irradiances of interest in photobiology.
# Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280
# and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of
# the IPRT, WPMN, and WPMX values.
IUV = '0'
## Card 17:
# Option for solar position and air mass calculations. Set IMASS to:
# 0, if inputs are to be ZENIT, AZIM on Card 17a
# 1, if inputs are to be ELEV, AZIM on Card 17a
# 2, if input is to be AMASS on Card 17a
# 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a
# 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation).
IMASS = '3'
# Card 17a: IMASS = 0 Zenith and azimuth
ZENITH = ''
AZIM = ''
# Card 17a: IMASS = 1 Elevation and Azimuth
ELEV = ''
# Card 17a: IMASS = 2 Input air mass directly
AMASS = ''
# Card 17a: IMASS = 3 Input date, time and coordinates
YEAR = YEAR
MONTH = MONTH
DAY = DAY
HOUR = HOUR
LATIT = LATIT
LONGIT = LONGIT
ZONE = ZONE
# Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP
DSTEP = ''
output = _smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, AZIM, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP)
return output
def _smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, AZIM, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP):
r'''
#data = smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP)
# SMARTS Control Function
#
# Inputs:
# All variables are labeled according to the SMARTS 2.9.5 documentation.
# NOTICE THAT "IOTOT" is not an input variable of the function since is determined in the function
# by sizing the IOUT variable.
# Outputs:
# data, is a matrix containing the outputs with as many rows as
# wavelengths+1 (includes header) and as many columns as IOTOT+1 (column 1 is wavelengths)
#
'''
## Init
import os
import pandas as pd
import subprocess
# Check if SMARTSPATH environment variable exists and change working
# directory if it does.
original_wd = None
if 'SMARTSPATH' in os.environ:
original_wd = os.getcwd()
os.chdir(os.environ['SMARTSPATH'])
try:
os.remove('smarts295.inp.txt')
except:
pass
try:
os.remove('smarts295.out.txt')
except:
pass
try:
os.remove('smarts295.ext.txt')
except:
pass
try:
os.remove('smarts295.scn.txt')
except:
pass
f = open('smarts295.inp.txt', 'w')
IOTOT = len(IOUT.split())
## Card 1: Comment.
if len(CMNT)>62:
CMNT = CMNT[0:61]
CMNT = CMNT.replace(" ", "_")
CMNT = "'"+CMNT+"'"
print('{}' . format(CMNT), file=f)
## Card 2: Site Pressure
print('{}'.format(ISPR), file=f)
##Card 2a:
if ISPR=='0':
# case '0' #Just input pressure.
print('{}'.format(SPR), file=f)
elif ISPR=='1':
# case '1' #Input pressure, altitude and height.
print('{} {} {}'.format(SPR, ALTIT, HEIGHT), file=f)
elif ISPR=='2':
#case '2' #Input lat, alt and height
print('{} {} {}'.format(LATIT, ALTIT, HEIGHT), file=f)
else:
print("ISPR Error. ISPR should be 0, 1 or 2. Currently ISPR = ", ISPR)
## Card 3: Atmosphere model
print('{}'.format(IATMOS), file=f)
## Card 3a:
if IATMOS=='0':
#case '0' #Input TAIR, RH, SEASON, TDAY
print('{} {} {} {}'.format(TAIR, RH, SEASON, TDAY), file=f)
elif IATMOS=='1':
#case '1' #Input reference atmosphere
ATMOS = "'"+ATMOS+"'"
print('{}'.format(ATMOS), file=f)
## Card 4: Water vapor data
print('{}'.format(IH2O), file=f)
## Card 4a
if IH2O=='0':
#case '0'
print('{}'.format(W), file=f)
elif IH2O=='1':
#case '1'
#The subcard 4a is skipped
pass # print("")
## Card 5: Ozone abundance
print('{}'.format(IO3), file=f)
## Card 5a
if IO3=='0':
#case '0'
print('{} {}'.format(IALT, AbO3), file=f)
elif IO3=='1':
#case '1'
#The subcard 5a is skipped and default values are used from selected
#reference atmosphere in Card 3.
pass # print("")
## Card 6: Gaseous absorption and atmospheric pollution
print('{}'.format(IGAS), file=f)
## Card 6a: Option for tropospheric pollution
if IGAS=='0':
# case '0'
print('{}'.format(ILOAD), file=f)
## Card 6b: Concentration of Pollutants
if ILOAD=='0':
#case '0'
print('{} {} {} {} {} {} {} {} {} {} '.format(ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2), file=f)
elif ILOAD=='1':
#case '1'
#The subcard 6b is skipped and values of PRISTINE
#ATMOSPHERIC conditions are assumed
pass # print("")
elif ILOAD=='2' or ILOAD =='3' or ILOAD == '4':
#case {'2', '3', '4'}
#The subcard 6b is skipped and value of ILOAD will be used
#as LIGHT POLLUTION (ILOAD = 2), MODERATE POLLUTION (ILOAD = 3),
#and SEVERE POLLUTION (ILOAD = 4).
pass # print("")
elif IGAS=='1':
#case '1'
#The subcard 6a is skipped, and values are for default average
#profiles.
print("")
## Card 7: CO2 columnar volumetric concentration (ppmv)
print('{}'.format(qCO2), file=f)
## Card 7a: Option of proper extraterrestrial spectrum
print('{}'.format(ISPCTR), file=f)
## Card 8: Aerosol model selection out of twelve
AEROS = "'"+AEROS+"'"
print('{}'.format(AEROS), file=f)
## Card 8a: If the aerosol model is 'USER' for user supplied information
if AEROS=="'USER'":
print('{} {} {} {}'.format(ALPHA1, ALPHA2, OMEGL, GG), file=f)
else:
#The subcard 8a is skipped
pass # print("")
## Card 9: Option to select turbidity model
print('{}'.format(ITURB), file=f)
## Card 9a
if ITURB=='0':
#case '0'
print('{}'.format(TAU5), file=f)
elif ITURB=='1':
#case '1'
print('{}'.format(BETA), file=f)
elif ITURB=='2':
#case '2'
print('{}'.format(BCHUEP), file=f)
elif ITURB=='3':
#case '3'
print('{}'.format(RANGE), file=f)
elif ITURB=='4':
#case '4'
print('{}'.format(VISI), file=f)
elif ITURB=='5':
#case '5'
print('{}'.format(TAU550), file=f)
else:
print("Error: Card 9 needs to be input. Assign a valid value to ITURB = ", ITURB)
## Card 10: Select zonal albedo
print('{}'.format(IALBDX), file=f)
## Card 10a: Input fix broadband lambertial albedo RHOX
if IALBDX == '-1':
print('{}'.format(RHOX), file=f)
else:
pass # print("")
#The subcard 10a is skipped.
## Card 10b: Tilted surface calculation flag
print('{}'.format(ITILT), file=f)
## Card 10c: Tilt surface calculation parameters
if ITILT == '1':
print('{} {} {}'.format(IALBDG, TILT, WAZIM), file=f)
##Card 10d: If tilt calculations are performed and zonal albedo of
##foreground.
if IALBDG == '-1':
print('{}'.format(RHOG), file=f)
else:
pass # print("")
#The subcard is skipped
## Card 11: Spectral ranges for calculations
print('{} {} {} {}'.format(WLMN, WLMX, SUNCOR, SOLARC), file=f)
## Card 12: Output selection.
print('{}'.format(IPRT), file=f)
## Card 12a: For spectral results (IPRT >= 1)
if float(IPRT) >= 1:
print('{} {} {}'.format(WPMN, WPMX, INTVL), file=f)
## Card 12b & Card 12c:
if float(IPRT) == 2 or float(IPRT) == 3:
print('{}'.format(IOTOT), file=f)
print('{}'.format(IOUT), file=f)
else:
pass # print("")
#The subcards 12b and 12c are skipped.
else:
pass # print("")
#The subcard 12a is skipped
## Card 13: Circumsolar calculations
print('{}'.format(ICIRC), file=f)
## Card 13a: Simulated radiometer parameters
if ICIRC == '1':
print('{} {} {}'.format(SLOPE, APERT, LIMIT), file=f)
else:
pass # print("")
#The subcard 13a is skipped since no circumsolar calculations or
#simulated radiometers have been requested.
## Card 14: Scanning/Smoothing virtual filter postprocessor
print('{}'.format(ISCAN), file=f)
## Card 14a: Simulated radiometer parameters
if ISCAN == '1':
print('{} {} {} {} {}'.format(IFILT, WV1, WV2, STEP, FWHM), file=f)
else:
pass # print("")
#The subcard 14a is skipped since no postprocessing is simulated.
## Card 15: Illuminace, luminous efficacy and photosythetically active radiarion calculations
print('{}'.format(ILLUM), file=f)
## Card 16: Special broadband UV calculations
print('{}'.format(IUV), file=f)
## Card 17: Option for solar position and air mass calculations
print('{}'.format(IMASS), file=f)
## Card 17a: Solar position parameters:
if IMASS=='0':
#case '0' #Enter Zenith and Azimuth of the sun
print('{} {}'.format(ZENITH, AZIM), file=f)
elif IMASS=='1':
#case '1' #Enter Elevation and Azimuth of the sun
print('{} {}'.format(ELEV, AZIM), file=f)
elif IMASS=='2':
#case '2' #Enter air mass directly
print('{}'.format(AMASS), file=f)
elif IMASS=='3':
#case '3' #Enter date, time and latitude
print('{} {} {} {} {} {} {}'.format(YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE), file=f)
elif IMASS=='4':
#case '4' #Enter date and time and step in min for a daily calculation.
print('{}, {}, {}'.format(MONTH, LATIT, DSTEP), file=f)
## Input Finalization
print('', file=f)
f.close()
## Run SMARTS 2.9.5
#dump = os.system('smarts295bat.exe')
commands = ['smarts295bat', 'smarts295bat.exe']
command = None
for cmd in commands:
if os.path.exists(cmd):
command = cmd
break
if not command:
print('Could not find SMARTS2 executable.')
data = None
else:
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=open("output.txt", "w"), shell=True)
p.wait()
## Read SMARTS 2.9.5 Output File
data = pd.read_csv('smarts295.ext.txt', delim_whitespace=True)
try:
os.remove('smarts295.inp.txt')
except:
pass # print("")
try:
os.remove('smarts295.out.txt')
except:
pass # print("")
try:
os.remove('smarts295.ext.txt')
except:
pass # print("")
try:
os.remove('smarts295.scn.txt')
except:
pass # print("")
# Return to original working directory.
if original_wd:
os.chdir(original_wd)
return data | # -*- coding: utf-8 -*-
"""
The ``smarts`` module contains functions for calling SMARTS: Simple Model of the
Atmoshperic Radiative Transfer of Sunshine, from NREL, developed by
Dr. <NAME>.
SMARTS software can be obtained from:
https://www.nrel.gov/grid/solar-resource/smarts.html
Users will be responsible to obtain a copy of SMARTS from NREL,
honor it’s license, and download the SMART files into their PVLib folder.
This wrapper is shared under a BSD-3-Clause License, and was
originally coded in Matlab by <NAME> (2001), updated and ported to python
by <NAME> (2019-2020). Original Matlab wrapper was made for graduate studies
at the University of Arizona, python porting by NREL.
Please read the license and Readme files for more information, proper use, citing, and copyrights.
"""
def IOUT_to_code(IOUT):
r''' Function to display the options of outputs that SMARTS has.
If run without input (IOUT = None), it prints in a list all possible outputs.
If IOUT is passed to equal one of the outputs (i.e.
(i.e. IOUT = 'Global horizontal irradiance W m-2'), it returns the
code number for that output (returns '4' for this example).
PARAMETERS
-----------
IOUT: String
Can be None or a SMARTS output description
RETURNS
-------
IOUT_Key: String
Key code to SMARTS cards input.
'''
IOUT_map = { 'Extraterrestrial spectrum W m-2': '1',
'Direct normal irradiance W m-2': '2',
'Diffuse horizontal irradiance W m-2': '3',
'Global horizontal irradiance W m-2': '4',
'Direct horizontal irradiance W m-2': '5',
'Direct tilted irradiance W m-2': '6',
'Diffuse tilted irradiance W m-2': '7',
'Global tilted irradiance W m-2': '8',
'Experimental direct normal irradiance (with circumsolar) W m-2': '9',
'Experimental diffuse horizontal irradiance W m-2': '10',
'Circumsolar irradiance within radiometer field of view W m-2': '11',
'Global tilted photon flux per wavelength cm-2 s-1 nm-1': '12*',
'Direct normal photon flux per wavelength cm-2 s-1 nm-1': '13',
'Diffuse horizontal photon flux per wavelength cm-2 s-1 nm-1': '14',
'Rayleigh transmittance': '15',
'Ozone transmittance': '16',
'Transmittance from all trace gases': '17',
'Water vapor transmittance': '18',
'Mixed gas transmittance': '19',
'Aerosol transmittance': '20',
'Beam radiation transmittance': '21',
'Rayleigh optical thickness': '22',
'Ozone optical thickness': '23',
'Optical thickness from all trace gases': '24',
'Water vapor optical thickness': '25',
'Mixed gas optical thickness': '26',
'Aerosol optical thickness': '27',
'Aerosol single scattering albedo': '28',
'Aerosol asymmetry factor': '29',
'Zonal surface reflectance': '30',
'Local ground reflectance': '31',
'Atmospheric reflectance': '32',
'Global foreground reflected irradiance on tilted surface W m-2': '33*',
'Upward hemispheric ground-reflected irradiance W m-2': '34*',
'Global horizontal photosynthetic photon flux ?mol m-2 s-1 nm-1': '35*',
'Direct normal photosynthetic photon flux ?mol m-2 s-1 nm-1': '36*',
'Diffuse horizontal photosynthetic photon flux ?mol m-2 s-1 nm-1': '37*',
'Global tilted photosynthetic photon flux ?mol m-2 s-1 nm-1': '38*',
'Spectral photonic energy eV': '39*',
'Global horizontal photon flux per eV cm-2 s-1 eV-1': '40*',
'Direct normal photon flux per eV cm-2 s-1 eV-1': '41*',
'Diffuse horizontal photon flux per eV cm-2 s-1 eV-1': '42*',
'Global tilted photon flux per eV cm-2 s-1 eV-1': '43*'
}
if not IOUT:
return list(IOUT_map.keys())
if IOUT not in IOUT_map:
print(f"Unknown output specified: '{IOUT}'")
return None
return IOUT_map.get(IOUT)
def _material_to_code(material):
# Comments include Description, File name(.DAT extension), Reflection, Type*, Spectral range(um), Category*
# *KEYS: L Lambertian, NL Non-Lambertian, SP Specular, M Manmade materials, S Soils and rocks, U User defined, V Vegetation, W Water, snow, or ice
material_map = { 'UsrLamb': '0', # User-defined spectral reflectance Albedo L Userdefined
'UsrNLamb': '1', # User-defined spectral reflectance Albedo NL Userdefined
'Water': '2', # Water or calm ocean (calculated) SP 0.28 4.0 W
'Snow': '3', # Fresh dry snow Snow NL 0.3 2.48 W
'Neve': '4', # Snow on a mountain neve Neve NL 0.45 1.65 W
'Basalt': '5', # Basalt rock Basalt NL 0.3 2.48 S
'Dry_sand': '6', # Dry sand Dry_sand NL 0.32 0.99 S
'WiteSand': '7', # Sand from White Sands, NM WiteSand NL 0.5 2.48 S
'Soil': '8', # Bare soil Soil NL 0.28 4.0 S
'Dry_clay': '9', # Dry clay soil Dry_clay NL 0.5 2.48 S
'Wet_clay': '10', # Wet clay soil Wet_clay NL 0.5 2.48 S
'Alfalfa': '11', # Alfalfa Alfalfa NL 0.3 0.8 V
'Grass': '12', # Green grass Grass NL 0.3 1.19 V
'RyeGrass': '13', # Perennial rye grass RyeGrass NL 0.44 2.28 V
'Meadow1': '14', # Alpine meadow Meadow1 NL 0.4 0.85 V
'Meadow2': '15', # Lush meadow Meadow2 NL 0.4 0.9 V
'Wheat': '16', # Wheat crop Wheat NL 0.42 2.26 V
'PineTree': '17', # Ponderosa pine tree PineTree NL 0.34 2.48 V
'Concrete': '18', # Concrete slab Concrete NL 0.3 1.3 M
'BlckLoam': '19', # Black loam BlckLoam NL 0.4 4.0 S
'BrwnLoam': '20', # Brown loam BrwnLoam NL 0.4 4.0 S
'BrwnSand': '21', # Brown sand BrwnSand NL 0.4 4.0 S
'Conifers': '22', # Conifer trees Conifers NL 0.302 4.0 V
'DarkLoam': '23', # Dark loam DarkLoam NL 0.46-4.0 S
'DarkSand': '24', # Dark sand DarkSand NL 0.4 4.0 S
'Decidous': '25', # Decidous trees Decidous NL 0.302 4.0 V
'DryGrass': '26', # Dry grass (sod) DryGrass NL 0.38 4.0 V
'DuneSand': '27', # Dune sand DuneSand NL 0.4 4.0 S
'FineSnow': '28', # Fresh fine snow FineSnow NL 0.3 4.0 W
'GrnGrass': '29', # Green rye grass (sod) GrnGrass NL 0.302 4.0 V
'GrnlSnow': '30', # Granular snow GrnlSnow NL 0.3 4.0 W
'LiteClay': '31', # Light clay LiteClay NL 0.4 4.0 S
'LiteLoam': '32', # Light loam LiteLoam NL 0.431 4.0 S
'LiteSand': '33', # Light sand LiteSand NL 0.4 4.0 S
'PaleLoam': '34', # Pale loam PaleLoam NL 0.4 4.0 S
'Seawater': '35', # Sea water Seawater NL 2.079 4.0 W
'SolidIce': '36', # Solid ice SolidIce NL 0.3 4.0 W
'Dry_Soil': '37', # Dry soil Dry_Soil NL 0.28 4.0 S
'LiteSoil': '38', # Light soil LiteSoil NL 0.28 4.0 S
'RConcrte': '39', # Old runway concrete RConcrte NL 0.3 4.0 M
'RoofTile': '40', # Terracota roofing clay tile RoofTile NL 0.3 4.0 M
'RedBrick': '41', # Red construction brick RedBrick NL 0.3 4.0 M
'Asphalt': '42', # Old runway asphalt Asphalt NL 0.3 4.0 M
'TallCorn': '43', # Tall green corn TallCorn NL 0.36-1.0 V
'SndGravl': '44', # Sand & gravel SndGravl NL 0.45-1.04 S
'Fallow': '45', # Fallow field Fallow NL 0.32-1.19 S
'Birch': '46', # Birch leaves Birch NL 0.36-2.48 V
'WetSoil': '47', # Wet sandy soil WetSSoil NL 0.48-2.48 S
'Gravel': '48', # Gravel Gravel NL 0.32-1.3 S
'WetClay2': '49', # Wet red clay WetClay2 NL 0.52-2.48 S
'WetSilt': '50', # Wet silt WetSilt NL 0.52-2.48 S
'LngGrass': '51', # Dry long grass LngGrass NL 0.277-2.976 V
'LwnGrass': '52', # Lawn grass (generic bluegrass) LwnGrass NL 0.305-2.944 V
'OakTree': '53', # Deciduous oak tree leaves OakTree NL 0.35-2.5 V
'Pinion': '54', # Pinion pinetree needles Pinion NL 0.301-2.592 V
'MeltSnow': '55', # Melting snow (slush) MeltSnow NL 0.35-2.5 W
'Plywood': '56', # Plywood sheet (new, pine, 4-ply) Plywood NL 0.35-2.5 M
'WiteVinl': '57', # White vinyl plastic sheet, 0.15 mm WiteVinl NL 0.35-2.5 M
'FibrGlss': '58', # Clear fiberglass greenhouse roofing FibrGlss NL 0.35-2.5 M
'ShtMetal': '59', # Galvanized corrugated sheet metal, new ShtMetal NL 0.35-2.5 M
'Wetland': '60', # Wetland vegetation canopy, Yellowstone Wetland NL 0.409-2.478 V
'SageBrsh': '61', # Sagebrush canopy, Yellowstone SageBrsh NL 0.409-2.478 V
'FirTrees': '62', # Fir trees, Colorado FirTrees NL 0.353-2.592 V
'CSeaWatr': '63', # Coastal seawater, Pacific CSeaWatr NL 0.277-2.976 W
'OSeaWatr': '64', # Open ocean seawater, Atlantic OSeaWatr NL 0.277-2.976 W
'GrazingField':'65', # Grazing field (unfertilized) GrazingField NL 0.401-2.499 V
'Spruce': '66' # Young Norway spruce tree (needles) Spruce NL 0.39-0.845 V
}
if not material:
return material_map.keys()
if material not in material_map:
print(f"Unknown material specified: '{material}'")
return None
return material_map.get(material)
def SMARTSTimeLocation(IOUT,YEAR,MONTH,DAY,HOUR, LATIT, LONGIT, ALTIT, ZONE, material='LiteSoil', min_wvl='280', max_wvl='4000'):
r'''
This function calculates the spectral albedo for a given material. If no
material is provided, the function will return a list of all valid
materials.
Parameters
----------
material : string
Unique identifier for ground cover. Pass None to retreive a list of
all valid materials.
WLMN : string
Minimum wavelength to retreive
WLMX : string
Maximum wavelength to retreive
YEAR : string
Year
MONTH : string
Month
DAY : string
Day
HOUR : string
Hour, in 24 hour format.
LATIT : string
Latitude of the location.
LONGIT : string
Longitude of the location.
ALTIT : string
elevation of the ground surface above sea level [km]
ZONE : string
Timezone
Returns
-------
data : pandas
Matrix with first column representing wavelength (in nm) and second
column representing albedo of specified material at the wavelength
Updates:
6/20 Creation of second function to use zenith and azimuth M. Monarch
'''
## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores.
CMNT = 'ASTMG173-03 (AM1.5 Standard)'
## Card 2: ISPR is an option for site's pressure.
# ISPR = 0 to input SPR on Card 2a
# ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a
# ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a.
ISPR = '1'
# Card 2a (if ISPR = 0): SPR
SPR = '1013.25' #mbar
# Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT
# SPR: Surface pressure (mb).
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
# Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT
# LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for
# Papeete, Tahiti. If LATIT is unknown, enter 45.0.
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
ALTIT = ALTIT
HEIGHT = '0'
#LATIT = LATIT
## Card 3: IATMOS is an option to select the proper default atmosphere
# Its value can be either 0 or 1.
# Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to
# provide TAIR, RH, SEASON, TDAY.
# Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The
# shortened name of this atmosphere must be provided by ATMOS on Card 3a.
IATMOS = '1'
# Card 3a (if IATMOS = 1): ATMOS
# ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can
# be one of the following:
# USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer)
# MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer)
# SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer)
# STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter)
ATMOS = 'USSA'
# Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY.
# RH: Relative humidity at site level (%).
# SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and
# stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the
# true season is Spring. SEASON slightly affects the ozone effective temperature and the
# aerosol optical characteristics.
# TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50.
# TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this
# is a reference temperature for various calculations, therefore it is important to provide a
# realistic value in this case in particular. Acceptable range: -120 < TDAY < 50.
RH = ''
TAIR = ''
SEASON = ''
TDAY = ''
## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve
# precipitable water, W. The following values of IH2O are possible:
# 0, to input W on Card 4a
# 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site
# altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step.
# 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This
# calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not
# recommended.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IH2O = '1'
# Card 4a: (if IH2O = 0): W is precipitable water above the site altitude
# in units of cm, or equivalently, g/cm2; it must be <= 12.
W = ''
## Card 5: IO3 is an option to select the appropriate ozone abundance input.
# IO3 = 0 to input IALT and AbO3 on Card 5a
# IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by
# IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IO3 = '1'
# Card 5a (if IO3 = 0): IALT, AbO3
# IALT is an option to select the appropriate ozone column altitude correction.
# IALT = 0 bypasses the altitude correction, so that the value of AbO3 on
# Card 5a is used as is. IALT = 1 should be rather used if a vertical
# profile correction needs to be applied (in case of an elevated site when
# the value of AbO3 is known only at sea level).
IALT = ''
AbO3 = ''
## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution.
# IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations
# (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be
# initiated;
# IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a,
# and 7) are to be defaulted, using average vertical profiles.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IGAS = '0'
# Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0.
# For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants.
# ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly
# reduced abundances of some gases compared to the initial default obtained with the selected
# reference atmosphere.
# Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly
# represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE
# POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4).
ILOAD = '1'
# Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2,
# ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2
# ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv), Card 6b.
# ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
ApCH2O = ''
ApCH4 = ''
ApCO = ''
ApHNO2 = ''
ApHNO3 = ''
ApNO = ''
ApNO2 = ''
ApNO3 = ''
ApO3 = ''
ApSO2 =''
## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv).
qCO2 = '0.0'
# Card 7a ISPCTR
# is an option to select the proper extraterrestrial
# spectrum. This option allows to choose one out of ten possible spectral
# files (``Spctrm_n.dat``, where n = 0-8 or n = U).
# -1 Spctrm_U.dat N/A User User
# 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10
# 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00
# 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12
# 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75
# 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00
# 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16
# 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23
# 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00
# 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10
ISPCTR ='0'
## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices:
# S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices
# refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol
# models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN.
# SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer
# respectively to the Continental, Urban, and Maritime aerosol models of
# the IAMAP preliminary standard atmosphere (IAMAP, 1986).
# B&D_C , B&D_C1 , These two choices refer respectively to the Braslau &
# Dave aerosol type C and C1, themselves based on Deirmendjian's Haze L model.
# DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal)
# conditions in desert areas, whereas DESERT_MAX corresponds to extremely
# turbid conditions (sandstorms).
# 'USER' Card 8a is then necessary to input user-supplied aerosol information.
AEROS = 'S&F_TROPO'
# Card 8a:
# if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only!
# ALPHA1: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths < 500 nm
# (generally between 0.0 and 2.6).
# ALPHA2: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths >= 500 nm
# (generally between 0.0 and 2.6).
# OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0).
# GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9).
ALPHA1 = ''
ALPHA2 = ''
OMEGL = ''
GG = ''
## Card 9: ITURB is an option to select the correct turbidity data input. The different options are:
# 0, to read TAU5 on Card 9a
# 1, to read BETA on Card 9a
# 2, to read BCHUEP on Card 9a
# 3, to read RANGE on Card 9a
# 4, to read VISI on Card 9a
# 5, to read TAU550 on Card 9a (new option).
ITURB = '0'
#Card 9a Turbidity value
TAU5 = '0.00' #if ITURB == 0
BETA = '' #if ITURB == 1
BCHUEP = '' #if ITURB == 2
RANGE = '' #if ITURB == 3
VISI = '' #if ITURB == 4
TAU550 = '' #if ITURB == 5
## Card 10: Far Field Albedo for backscattering
IALBDX = _material_to_code(material)
# Card 10a:
RHOX = ''
# Zonal broadband Lambertian ground albedo (for backscattering calculations); must
# be between 0 and 1.
# Card 10b: ITILT is an option for tilted surface calculations.
#Select ITILT= 0 for no such calculation,
#ITILT = 1 to initiate these calculations using information on Card 10c.
ITILT = '1'
# Card 10c:
# IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local
# albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus
# extends from 1 to 64 (new).
# TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical
# plane. Use -999 for a sun-tracking surface.
# WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270
# deg. for a surface facing West. Use -999 for a sun-tracking surface.
IALBDG = IALBDX
TILT = '0.0'
WAZIM = '180.0'
# Card 10d:
# RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card
# 10d (if IALBDG = -1); usually between 0.05 and 0.90.
RHOG = ''
## Card 11: Spectral range for all Calculations
WLMN = min_wvl #Min wavelength
WLMX = max_wvl #Max wavelength
SUNCOR = '1.0'
#Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth
# distance; e.g., SUNCOR = 1.024.
# SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January
# and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated
# from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on
# Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if
# the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any
# other number between 0.966 and 1.034 to correct it for distance if so desired.
SOLARC = '1367.0' #Solar constant
## Card 12: Output results selection:
# IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are
# output (to File 16) if IPRT = 0. Spectral results are added to File 16,
# and Card 12a is read, if IPRT = 1. Spectral results are rather printed to
# File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral
# results are printed to both File 16 and 17 if IPRT = 3. Cards
# 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT).
IPRT = '2'
# Card 12a: Min, Max and Step wavelength (nm) (Output can be different than
# calculation...
WPMN = WLMN
WPMX = WLMX
INTVL = '.5'
# Card 12b: Total number of output variables:
#IOTOT = XXX #This is determined with the input of this function
# Card 12c: Variables to output selection
#(space separated numbers 1-43 according to the table below:
IOUT = IOUT
## Card 13: Circumsolar Calculation
# ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when
# simulating any type of radiometer (spectral or broadband) equipped with a collimator.
# ICIRC = 0 bypasses these calculations.
# ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator
# must then defined on Card 13a.
ICIRC = '0'
#Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT
SLOPE = ''
APERT = ''
LIMIT = ''
## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor.
# The smoothed results are output on a spreadsheet-ready file, File 18 (``smarts295.scn.txt``). This postprocessor is
# activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1.
ISCAN = '0'
# Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM
IFILT = ''
WV1 = ''
WV2 = ''
STEP = ''
FWHM = ''
## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR)
# calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0.
# With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda
# curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are
# done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note
# that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations
# are done between at least 360 and 830 nm.
# Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance
# calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280
# and 4000, respectively.
ILLUM = '0'
## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation,
# IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and
# different action weighted irradiances of interest in photobiology.
# Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280
# and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of
# the IPRT, WPMN, and WPMX values.
IUV = '0'
## Card 17:
# Option for solar position and air mass calculations. Set IMASS to:
# 0, if inputs are to be ZENIT, AZIM on Card 17a
# 1, if inputs are to be ELEV, AZIM on Card 17a
# 2, if input is to be AMASS on Card 17a
# 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a
# 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation).
IMASS = '3'
# Card 17a: IMASS = 0 Zenith and azimuth
ZENITH = ''
AZIM = ''
# Card 17a: IMASS = 1 Elevation and Azimuth
ELEV = ''
# Card 17a: IMASS = 2 Input air mass directly
AMASS = ''
# Card 17a: IMASS = 3 Input date, time and coordinates
YEAR = YEAR
MONTH = MONTH
DAY = DAY
HOUR = HOUR
LATIT = LATIT
LONGIT = LONGIT
ZONE = ZONE
# Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP
DSTEP = ''
output = _smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, AZIM, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP)
return output
def SMARTSAirMass(IOUT, material='LiteSoil', AMASS = '1.0', min_wvl='280', max_wvl='4000'):
r'''
This function calculates the spectral albedo for a given material. If no
material is provided, the function will return a list of all valid
materials.
Parameters
----------
material : string
Unique identifier for ground cover. Pass None to retreive a list of
all valid materials.
WLMN : string
Minimum wavelength to retreive
WLMX : string
Maximum wavelength to retreive
YEAR : string
Year
MONTH : string
Month
DAY : string
Day
HOUR : string
Hour, in 24 hour format.
LATIT : string
Latitude of the location.
LONGIT : string
Longitude of the location.
ALTIT : string
elevation of the ground surface above sea level [km]
ZONE : string
Timezone
Returns
-------
data : pandas
Matrix with first column representing wavelength (in nm) and second
column representing albedo of specified material at the wavelength
Updates:
6/20 Creation of second function to use zenith and azimuth M. Monarch
'''
## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores.
CMNT = 'ASTMG173-03 (AM1.5 Standard)'
## Card 2: ISPR is an option for site's pressure.
# ISPR = 0 to input SPR on Card 2a
# ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a
# ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a.
ISPR = '0'
# Card 2a (if ISPR = 0): SPR
SPR = '1013.25' #mbar
# Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT
# SPR: Surface pressure (mb).
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
# Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT
# LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for
# Papeete, Tahiti. If LATIT is unknown, enter 45.0.
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
ALTIT = ''
HEIGHT = ''
#LATIT = LATIT
## Card 3: IATMOS is an option to select the proper default atmosphere
# Its value can be either 0 or 1.
# Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to
# provide TAIR, RH, SEASON, TDAY.
# Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The
# shortened name of this atmosphere must be provided by ATMOS on Card 3a.
IATMOS = '1'
# Card 3a (if IATMOS = 1): ATMOS
# ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can
# be one of the following:
# USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer)
# MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer)
# SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer)
# STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter)
ATMOS = 'USSA'
# Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY.
# RH: Relative humidity at site level (%).
# SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and
# stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the
# true season is Spring. SEASON slightly affects the ozone effective temperature and the
# aerosol optical characteristics.
# TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50.
# TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this
# is a reference temperature for various calculations, therefore it is important to provide a
# realistic value in this case in particular. Acceptable range: -120 < TDAY < 50.
RH = ''
TAIR = ''
SEASON = ''
TDAY = ''
## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve
# precipitable water, W. The following values of IH2O are possible:
# 0, to input W on Card 4a
# 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site
# altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step.
# 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This
# calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not
# recommended.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IH2O = '1'
# Card 4a: (if IH2O = 0): W is precipitable water above the site altitude
# in units of cm, or equivalently, g/cm2; it must be <= 12.
W = ''
## Card 5: IO3 is an option to select the appropriate ozone abundance input.
# IO3 = 0 to input IALT and AbO3 on Card 5a
# IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by
# IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IO3 = '1'
# Card 5a (if IO3 = 0): IALT, AbO3
# IALT is an option to select the appropriate ozone column altitude correction.
# IALT = 0 bypasses the altitude correction, so that the value of AbO3 on
# Card 5a is used as is. IALT = 1 should be rather used if a vertical
# profile correction needs to be applied (in case of an elevated site when
# the value of AbO3 is known only at sea level).
IALT = ''
AbO3 = ''
## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution.
# IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations
# (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be
# initiated;
# IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a,
# and 7) are to be defaulted, using average vertical profiles.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IGAS = '1'
# Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0.
# For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants.
# ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly
# reduced abundances of some gases compared to the initial default obtained with the selected
# reference atmosphere.
# Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly
# represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE
# POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4).
ILOAD = '1'
# Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2,
# ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2
# ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv), Card 6b.
# ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
ApCH2O = ''
ApCH4 = ''
ApCO = ''
ApHNO2 = ''
ApHNO3 = ''
ApNO = ''
ApNO2 = ''
ApNO3 = ''
ApO3 = ''
ApSO2 =''
## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv).
qCO2 = '0.0'
# Card 7a ISPCTR
# is an option to select the proper extraterrestrial
# spectrum. This option allows to choose one out of ten possible spectral
# files (``Spctrm_n.dat``, where n = 0-8 or n = U).
# -1 Spctrm_U.dat N/A User User
# 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10
# 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00
# 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12
# 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75
# 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00
# 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16
# 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23
# 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00
# 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10
ISPCTR ='0'
## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices:
# S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices
# refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol
# models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN.
# SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer
# respectively to the Continental, Urban, and Maritime aerosol models of
# the IAMAP preliminary standard atmosphere (IAMAP, 1986).
# B&D_C , B&D_C1 , These two choices refer respectively to the Braslau &
# Dave aerosol type C and C1, themselves based on Deirmendjian's Haze L model.
# DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal)
# conditions in desert areas, whereas DESERT_MAX corresponds to extremely
# turbid conditions (sandstorms).
# 'USER' Card 8a is then necessary to input user-supplied aerosol information.
AEROS = 'S&F_TROPO'
# Card 8a:
# if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only!
# ALPHA1: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths < 500 nm
# (generally between 0.0 and 2.6).
# ALPHA2: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths >= 500 nm
# (generally between 0.0 and 2.6).
# OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0).
# GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9).
ALPHA1 = ''
ALPHA2 = ''
OMEGL = ''
GG = ''
## Card 9: ITURB is an option to select the correct turbidity data input. The different options are:
# 0, to read TAU5 on Card 9a
# 1, to read BETA on Card 9a
# 2, to read BCHUEP on Card 9a
# 3, to read RANGE on Card 9a
# 4, to read VISI on Card 9a
# 5, to read TAU550 on Card 9a (new option).
ITURB = '0'
#Card 9a Turbidity value
TAU5 = '0.00' #if ITURB == 0
BETA = '' #if ITURB == 1
BCHUEP = '' #if ITURB == 2
RANGE = '' #if ITURB == 3
VISI = '' #if ITURB == 4
TAU550 = '' #if ITURB == 5
## Card 10: Far Field Albedo for backscattering
IALBDX = _material_to_code(material)
# Card 10a:
RHOX = ''
# Zonal broadband Lambertian ground albedo (for backscattering calculations); must
# be between 0 and 1.
# Card 10b: ITILT is an option for tilted surface calculations.
#Select ITILT= 0 for no such calculation,
#ITILT = 1 to initiate these calculations using information on Card 10c.
ITILT = '1'
# Card 10c:
# IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local
# albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus
# extends from 1 to 64 (new).
# TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical
# plane. Use -999 for a sun-tracking surface.
# WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270
# deg. for a surface facing West. Use -999 for a sun-tracking surface.
IALBDG = IALBDX
TILT = '0.0'
WAZIM = '180.0'
# Card 10d:
# RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card
# 10d (if IALBDG = -1); usually between 0.05 and 0.90.
RHOG = ''
## Card 11: Spectral range for all Calculations
WLMN = min_wvl #Min wavelength
WLMX = max_wvl #Max wavelength
SUNCOR = '1.0'
#Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth
# distance; e.g., SUNCOR = 1.024.
# SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January
# and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated
# from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on
# Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if
# the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any
# other number between 0.966 and 1.034 to correct it for distance if so desired.
SOLARC = '1367.0' #Solar constant
## Card 12: Output results selection:
# IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are
# output (to File 16) if IPRT = 0. Spectral results are added to File 16,
# and Card 12a is read, if IPRT = 1. Spectral results are rather printed to
# File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral
# results are printed to both File 16 and 17 if IPRT = 3. Cards
# 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT).
IPRT = '2'
# Card 12a: Min, Max and Step wavelength (nm) (Output can be different than
# calculation...
WPMN = WLMN
WPMX = WLMX
INTVL = '.5'
# Card 12b: Total number of output variables:
#IOTOT = XXX #This is determined with the input of this function
# Card 12c: Variables to output selection
#(space separated numbers 1-43 according to the table below:
IOUT = IOUT
## Card 13: Circumsolar Calculation
# ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when
# simulating any type of radiometer (spectral or broadband) equipped with a collimator.
# ICIRC = 0 bypasses these calculations.
# ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator
# must then defined on Card 13a.
ICIRC = '0'
#Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT
SLOPE = ''
APERT = ''
LIMIT = ''
## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor.
# The smoothed results are output on a spreadsheet-ready file, File 18 (``smarts295.scn.txt``). This postprocessor is
# activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1.
ISCAN = '0'
# Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM
IFILT = ''
WV1 = ''
WV2 = ''
STEP = ''
FWHM = ''
## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR)
# calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0.
# With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda
# curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are
# done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note
# that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations
# are done between at least 360 and 830 nm.
# Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance
# calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280
# and 4000, respectively.
ILLUM = '0'
## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation,
# IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and
# different action weighted irradiances of interest in photobiology.
# Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280
# and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of
# the IPRT, WPMN, and WPMX values.
IUV = '0'
## Card 17:
# Option for solar position and air mass calculations. Set IMASS to:
# 0, if inputs are to be ZENIT, AZIM on Card 17a
# 1, if inputs are to be ELEV, AZIM on Card 17a
# 2, if input is to be AMASS on Card 17a
# 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a
# 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation).
IMASS = '2'
# Card 17a: IMASS = 0 Zenith and azimuth
ZENITH = ''
AZIM = ''
# Card 17a: IMASS = 1 Elevation and Azimuth
ELEV = ''
# Card 17a: IMASS = 2 Input air mass directly
AMASS = AMASS
# Card 17a: IMASS = 3 Input date, time and coordinates
YEAR = ''
MONTH = ''
DAY = ''
HOUR = ''
LATIT = ''
LONGIT = ''
ZONE = ''
# Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP
DSTEP = ''
output = _smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, AZIM, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP)
return output
def SMARTSSpectraZenAzm(IOUT, ZENITH, AZIM, material='LiteSoil', SPR='1013.25', min_wvl='280', max_wvl='4000'):
r'''
This function calculates the spectral albedo for a given material. If no
material is provided, the function will return a list of all valid
materials.
Parameters
----------
material : string
Unique identifier for ground cover. Pass None to retreive a list of
all valid materials.
WLMN : string
Minimum wavelength to retreive
WLMX : string
Maximum wavelength to retreive
ZENITH : string
Zenith angle of sun
AZIM : string
Azimuth of sun
SPR : string
Site Pressure [mbars]. Default: SPR = '1013.25'
Returns
-------
data : pandas
Matrix with first column representing wavelength (in nm) and second
column representing albedo of specified material at the wavelength
Updates:
6/20 Creation of second function to use zenith and azimuth M. Monarch
'''
## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores.
CMNT = 'ASTMG173-03 (AM1.5 Standard)'
## Card 2: ISPR is an option for site's pressure.
# ISPR = 0 to input SPR on Card 2a
# ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a
# ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a.
ISPR = '0'
# Card 2a (if ISPR = 0): SPR
SPR = SPR #mbar
# Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT
# SPR: Surface pressure (mb).
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
# Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT
# LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for
# Papeete, Tahiti. If LATIT is unknown, enter 45.0.
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
ALTIT = ''
HEIGHT = ''
#LATIT = LATIT
## Card 3: IATMOS is an option to select the proper default atmosphere
# Its value can be either 0 or 1.
# Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to
# provide TAIR, RH, SEASON, TDAY.
# Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The
# shortened name of this atmosphere must be provided by ATMOS on Card 3a.
IATMOS = '1'
# Card 3a (if IATMOS = 1): ATMOS
# ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can
# be one of the following:
# USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer)
# MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer)
# SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer)
# STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter)
ATMOS = 'USSA'
# Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY.
# RH: Relative humidity at site level (%).
# SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and
# stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the
# true season is Spring. SEASON slightly affects the ozone effective temperature and the
# aerosol optical characteristics.
# TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50.
# TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this
# is a reference temperature for various calculations, therefore it is important to provide a
# realistic value in this case in particular. Acceptable range: -120 < TDAY < 50.
RH = ''
TAIR = ''
SEASON = ''
TDAY = ''
## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve
# precipitable water, W. The following values of IH2O are possible:
# 0, to input W on Card 4a
# 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site
# altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step.
# 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This
# calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not
# recommended.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IH2O = '1'
# Card 4a: (if IH2O = 0): W is precipitable water above the site altitude
# in units of cm, or equivalently, g/cm2; it must be <= 12.
W = ''
## Card 5: IO3 is an option to select the appropriate ozone abundance input.
# IO3 = 0 to input IALT and AbO3 on Card 5a
# IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by
# IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IO3 = '1'
# Card 5a (if IO3 = 0): IALT, AbO3
# IALT is an option to select the appropriate ozone column altitude correction.
# IALT = 0 bypasses the altitude correction, so that the value of AbO3 on
# Card 5a is used as is. IALT = 1 should be rather used if a vertical
# profile correction needs to be applied (in case of an elevated site when
# the value of AbO3 is known only at sea level).
IALT = ''
AbO3 = ''
## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution.
# IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations
# (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be
# initiated;
# IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a,
# and 7) are to be defaulted, using average vertical profiles.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IGAS = '0'
# Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0.
# For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants.
# ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly
# reduced abundances of some gases compared to the initial default obtained with the selected
# reference atmosphere.
# Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly
# represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE
# POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4).
ILOAD = '1'
# Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2,
# ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2
# ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv), Card 6b.
# ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
ApCH2O = ''
ApCH4 = ''
ApCO = ''
ApHNO2 = ''
ApHNO3 = ''
ApNO = ''
ApNO2 = ''
ApNO3 = ''
ApO3 = ''
ApSO2 =''
## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv).
qCO2 = '0.0'
# Card 7a ISPCTR
# is an option to select the proper extraterrestrial
# spectrum. This option allows to choose one out of ten possible spectral
# files (``Spctrm_n.dat``, where n = 0-8 or n = U).
# -1 Spctrm_U.dat N/A User User
# 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10
# 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00
# 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12
# 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75
# 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00
# 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16
# 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23
# 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00
# 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10
ISPCTR ='0'
## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices:
# S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices
# refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol
# models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN.
# SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer
# respectively to the Continental, Urban, and Maritime aerosol models of
# the IAMAP preliminary standard atmosphere (IAMAP, 1986).
# B&D_C , B&D_C1 , These two choices refer respectively to the Braslau &
# Dave aerosol type C and C1, themselves based on Deirmendjian s Haze L model.
# DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal)
# conditions in desert areas, whereas DESERT_MAX corresponds to extremely
# turbid conditions (sandstorms).
# 'USER' Card 8a is then necessary to input user-supplied aerosol information.
AEROS = 'S&F_TROPO'
# Card 8a:
# if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only!
# ALPHA1: Average value of Ångström`s wavelength exponent $\alpha$ for wavelengths < 500 nm
# (generally between 0.0 and 2.6).
# ALPHA2: Average value of Ångström`s wavelength exponent $\alpha$ for wavelengths >= 500 nm
# (generally between 0.0 and 2.6).
# OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0).
# GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9).
ALPHA1 = ''
ALPHA2 = ''
OMEGL = ''
GG = ''
## Card 9: ITURB is an option to select the correct turbidity data input. The different options are:
# 0, to read TAU5 on Card 9a
# 1, to read BETA on Card 9a
# 2, to read BCHUEP on Card 9a
# 3, to read RANGE on Card 9a
# 4, to read VISI on Card 9a
# 5, to read TAU550 on Card 9a (new option).
ITURB = '0'
#Card 9a Turbidity value
TAU5 = '0.00' #if ITURB == 0
BETA = '' #if ITURB == 1
BCHUEP = '' #if ITURB == 2
RANGE = '' #if ITURB == 3
VISI = '' #if ITURB == 4
TAU550 = '' #if ITURB == 5
## Card 10: Far Field Albedo for backscattering
IALBDX = _material_to_code(material)
# Card 10a:
RHOX = ''
# Zonal broadband Lambertian ground albedo (for backscattering calculations); must
# be between 0 and 1.
# Card 10b: ITILT is an option for tilted surface calculations.
#Select ITILT= 0 for no such calculation,
#ITILT = 1 to initiate these calculations using information on Card 10c.
ITILT = '1'
# Card 10c:
# IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local
# albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus
# extends from 1 to 64 (new).
# TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical
# plane. Use -999 for a sun-tracking surface.
# WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270
# deg. for a surface facing West. Use -999 for a sun-tracking surface.
IALBDG = IALBDX
TILT = '0.0'
WAZIM = '180.0'
# Card 10d:
# RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card
# 10d (if IALBDG = -1); usually between 0.05 and 0.90.
RHOG = ''
## Card 11: Spectral range for all Calculations
WLMN = min_wvl #Min wavelength
WLMX = max_wvl #Max wavelength
SUNCOR = '1.0'
#Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth
# distance; e.g., SUNCOR = 1.024.
# SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January
# and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated
# from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on
# Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if
# the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any
# other number between 0.966 and 1.034 to correct it for distance if so desired.
SOLARC = '1367.0' #Solar constant
## Card 12: Output results selection:
# IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are
# output (to File 16) if IPRT = 0. Spectral results are added to File 16,
# and Card 12a is read, if IPRT = 1. Spectral results are rather printed to
# File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral
# results are printed to both File 16 and 17 if IPRT = 3. Cards
# 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT).
IPRT = '2'
# Card 12a: Min, Max and Step wavelength (nm) (Output can be different than
# calculation...
WPMN = WLMN
WPMX = WLMX
INTVL = '.5'
# Card 12b: Total number of output variables:
#IOTOT = XXX #This is determined with the input of this function
# Card 12c: Variables to output selection
#(space separated numbers 1-43 according to the table below:
#IOUT = '30 31'
## Card 13: Circumsolar Calculation
# ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when
# simulating any type of radiometer (spectral or broadband) equipped with a collimator.
# ICIRC = 0 bypasses these calculations.
# ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator
# must then defined on Card 13a.
ICIRC = '0'
#Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT
SLOPE = ''
APERT = ''
LIMIT = ''
## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor.
# The smoothed results are output on a spreadsheet-ready file, File 18 (``smarts295.scn.txt``). This postprocessor is
# activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1.
ISCAN = '0'
# Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM
IFILT = ''
WV1 = ''
WV2 = ''
STEP = ''
FWHM = ''
## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR)
# calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0.
# With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda
# curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are
# done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note
# that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations
# are done between at least 360 and 830 nm.
# Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance
# calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280
# and 4000, respectively.
ILLUM = '0'
## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation,
# IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and
# different action weighted irradiances of interest in photobiology.
# Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280
# and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of
# the IPRT, WPMN, and WPMX values.
IUV = '0'
## Card 17:
# Option for solar position and air mass calculations. Set IMASS to:
# 0, if inputs are to be ZENIT, AZIM on Card 17a
# 1, if inputs are to be ELEV, AZIM on Card 17a
# 2, if input is to be AMASS on Card 17a
# 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a
# 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation).
IMASS = '0'
# Card 17a: IMASS = 0 Zenith and azimuth
#ZENITH = ''
#AZIM = ''
# Card 17a: IMASS = 1 Elevation and Azimuth
ELEV = ''
# Card 17a: IMASS = 2 Input air mass directly
AMASS = ''
# Card 17a: IMASS = 3 Input date, time and coordinates
YEAR = ''
MONTH = ''
DAY = ''
HOUR = ''
LATIT = ''
LONGIT = ''
ZONE = ''
# Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP
DSTEP = ''
output = _smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, AZIM, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP)
return output
def SMARTSTMY3(IOUT,YEAR,MONTH,DAY,HOUR, LATIT, LONGIT, ALTIT, ZONE, RHOG,
W, RH, TAIR, SEASON, TDAY, SPR, HEIGHT='0',
material='DryGrass', min_wvl='280', max_wvl='4000'):
r'''
This function calculates the spectral albedo for a given material. If no
material is provided, the function will return a list of all valid
materials.
Parameters
----------
material : string
Unique identifier for ground cover. Pass None to retreive a list of
all valid materials.
WLMN : string
Minimum wavelength to retreive
WLMX : string
Maximum wavelength to retreive
YEAR : string
Year
MONTH : string
Month
DAY : string
Day
HOUR : string
Hour, in 24 hour format.
LATIT : string
Latitude of the location.
LONGIT : string
Longitude of the location.
ALTIT : string
elevation of the ground surface above sea level [km].
WARNING: Please note that TMY3 data is in meters, convert before using this
function.
ZONE : string
Timezone
RHOG : string
Local broadband Lambertian foreground albedo (for tilted plane calculations)
W : string
Precipitable water above the site altitude, in units of cm or equivalently
g/cm2/
RH : string
Relative Humidity
TAIR : string
Temperature.
SEASON : string
Season, either 'WINTER' or 'SUMMER'. If Spring, use 'SUMMER'. If
Autumn, use 'WINTER'.
TDAY : string
Average of the day's temperature.
HEIGHT : string
Altitude of the simulated object over the surface, in km.
SPR : string
Site pressure, in mbars.
Returns
-------
data : pandas
Matrix with first column representing wavelength (in nm) and second
column representing albedo of specified material at the wavelength
'''
if float(ALTIT) > 800:
print("Altitude should be in km. Are you in Mt. Everest or above or",
"using meters? This might fail but we'll attempt to continue.")
## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores.
CMNT = 'TMY Parameters Spectra'
## Card 2: ISPR is an option for site's pressure.
# ISPR = 0 to input SPR on Card 2a
# ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a
# ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a.
ISPR = '1'
# Card 2a (if ISPR = 0): SPR
SPR = SPR #mbar
# Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT
# SPR: Surface pressure (mb).
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
# Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT
# LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for
# Papeete, Tahiti. If LATIT is unknown, enter 45.0.
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
ALTIT = ALTIT
HEIGHT = HEIGHT
#LATIT = LATIT
## Card 3: IATMOS is an option to select the proper default atmosphere
# Its value can be either 0 or 1.
# Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to
# provide TAIR, RH, SEASON, TDAY.
# Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The
# shortened name of this atmosphere must be provided by ATMOS on Card 3a.
IATMOS = '0'
# Card 3a (if IATMOS = 1): ATMOS
# ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can
# be one of the following:
# USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer)
# MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer)
# SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer)
# STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter)
ATMOS = 'USSA'
# Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY.
# RH: Relative humidity at site level (%).
# SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and
# stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the
# true season is Spring. SEASON slightly affects the ozone effective temperature and the
# aerosol optical characteristics.
# TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50.
# TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this
# is a reference temperature for various calculations, therefore it is important to provide a
# realistic value in this case in particular. Acceptable range: -120 < TDAY < 50.
RH = RH
TAIR = TAIR
SEASON = SEASON
TDAY = TDAY
## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve
# precipitable water, W. The following values of IH2O are possible:
# 0, to input W on Card 4a
# 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site
# altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step.
# 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This
# calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not
# recommended.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IH2O = '0'
# Card 4a: (if IH2O = 0): W is precipitable water above the site altitude
# in units of cm, or equivalently, g/cm2; it must be <= 12.
W = W
if float(W) == 0 or float(W) > 12:
print("Switching to calculating W")
IH2O = '2'
## Card 5: IO3 is an option to select the appropriate ozone abundance input.
# IO3 = 0 to input IALT and AbO3 on Card 5a
# IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by
# IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IO3 = '1'
# Card 5a (if IO3 = 0): IALT, AbO3
# IALT is an option to select the appropriate ozone column altitude correction.
# IALT = 0 bypasses the altitude correction, so that the value of AbO3 on
# Card 5a is used as is. IALT = 1 should be rather used if a vertical
# profile correction needs to be applied (in case of an elevated site when
# the value of AbO3 is known only at sea level).
IALT = ''
AbO3 = ''
## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution.
# IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations
# (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be
# initiated;
# IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a,
# and 7) are to be defaulted, using average vertical profiles.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IGAS = '0'
# Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0.
# For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants.
# ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly
# reduced abundances of some gases compared to the initial default obtained with the selected
# reference atmosphere.
# Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly
# represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE
# POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4).
ILOAD = '1'
# Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2,
# ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2
# ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv), Card 6b.
# ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
ApCH2O = ''
ApCH4 = ''
ApCO = ''
ApHNO2 = ''
ApHNO3 = ''
ApNO = ''
ApNO2 = ''
ApNO3 = ''
ApO3 = ''
ApSO2 =''
## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv).
qCO2 = '0.0'
# Card 7a ISPCTR
# is an option to select the proper extraterrestrial
# spectrum. This option allows to choose one out of ten possible spectral
# files (``Spctrm_n.dat``, where n = 0-8 or n = U).
# -1 Spctrm_U.dat N/A User User
# 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10
# 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00
# 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12
# 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75
# 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00
# 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16
# 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23
# 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00
# 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10
ISPCTR ='0'
## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices:
# S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices
# refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol
# models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN.
# SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer
# respectively to the Continental, Urban, and Maritime aerosol models of
# the IAMAP preliminary standard atmosphere (IAMAP, 1986).
# B&D_C , B&D_C1 , These two choices refer respectively to the Braslau &
# Dave aerosol type C and C1, themselves based on Deirmendjian's Haze L model.
# DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal)
# conditions in desert areas, whereas DESERT_MAX corresponds to extremely
# turbid conditions (sandstorms).
# 'USER' Card 8a is then necessary to input user-supplied aerosol information.
AEROS = 'S&F_TROPO'
# Card 8a:
# if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only!
# ALPHA1: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths < 500 nm
# (generally between 0.0 and 2.6).
# ALPHA2: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths >= 500 nm
# (generally between 0.0 and 2.6).
# OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0).
# GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9).
ALPHA1 = ''
ALPHA2 = ''
OMEGL = ''
GG = ''
## Card 9: ITURB is an option to select the correct turbidity data input. The different options are:
# 0, to read TAU5 on Card 9a
# 1, to read BETA on Card 9a
# 2, to read BCHUEP on Card 9a
# 3, to read RANGE on Card 9a
# 4, to read VISI on Card 9a
# 5, to read TAU550 on Card 9a (new option).
ITURB = '0'
#Card 9a Turbidity value
TAU5 = '0.00' #if ITURB == 0
BETA = '' #if ITURB == 1
BCHUEP = '' #if ITURB == 2
RANGE = '' #if ITURB == 3
VISI = '' #if ITURB == 4
TAU550 = '' #if ITURB == 5
## Card 10: Far Field Albedo for backscattering
IALBDX = _material_to_code(material)
# Card 10a:
RHOX = ''
# Zonal broadband Lambertian ground albedo (for backscattering calculations); must
# be between 0 and 1.
# Card 10b: ITILT is an option for tilted surface calculations.
#Select ITILT= 0 for no such calculation,
#ITILT = 1 to initiate these calculations using information on Card 10c.
ITILT = '1'
# Card 10c:
# IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local
# albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus
# extends from 1 to 64 (new).
# TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical
# plane. Use -999 for a sun-tracking surface.
# WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270
# deg. for a surface facing West. Use -999 for a sun-tracking surface.
IALBDG = '-1' #Sil check if this should be -1 or 1.
TILT = '0.0'
WAZIM = '180.0'
# Card 10d:
# RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card
# 10d (if IALBDG = -1); usually between 0.05 and 0.90.
RHOG = RHOG
## Card 11: Spectral range for all Calculations
WLMN = min_wvl #Min wavelength
WLMX = max_wvl #Max wavelength
SUNCOR = '1.0'
#Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth
# distance; e.g., SUNCOR = 1.024.
# SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January
# and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated
# from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on
# Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if
# the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any
# other number between 0.966 and 1.034 to correct it for distance if so desired.
SOLARC = '1367.0' #Solar constant
## Card 12: Output results selection:
# IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are
# output (to File 16) if IPRT = 0. Spectral results are added to File 16,
# and Card 12a is read, if IPRT = 1. Spectral results are rather printed to
# File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral
# results are printed to both File 16 and 17 if IPRT = 3. Cards
# 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT).
IPRT = '2'
# Card 12a: Min, Max and Step wavelength (nm) (Output can be different than
# calculation...
WPMN = WLMN
WPMX = WLMX
INTVL = '.5'
# Card 12b: Total number of output variables:
#IOTOT = XXX #This is determined with the input of this function
# Card 12c: Variables to output selection
#(space separated numbers 1-43 according to the table below:
#IOUT = '30 31'
## Card 13: Circumsolar Calculation
# ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when
# simulating any type of radiometer (spectral or broadband) equipped with a collimator.
# ICIRC = 0 bypasses these calculations.
# ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator
# must then defined on Card 13a.
ICIRC = '0'
#Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT
SLOPE = ''
APERT = ''
LIMIT = ''
## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor.
# The smoothed results are output on a spreadsheet-ready file, File 18 (``smarts295.scn.txt``). This postprocessor is
# activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1.
ISCAN = '0'
# Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM
IFILT = ''
WV1 = ''
WV2 = ''
STEP = ''
FWHM = ''
## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR)
# calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0.
# With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda
# curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are
# done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note
# that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations
# are done between at least 360 and 830 nm.
# Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance
# calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280
# and 4000, respectively.
ILLUM = '0'
## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation,
# IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and
# different action weighted irradiances of interest in photobiology.
# Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280
# and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of
# the IPRT, WPMN, and WPMX values.
IUV = '0'
## Card 17:
# Option for solar position and air mass calculations. Set IMASS to:
# 0, if inputs are to be ZENIT, AZIM on Card 17a
# 1, if inputs are to be ELEV, AZIM on Card 17a
# 2, if input is to be AMASS on Card 17a
# 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a
# 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation).
IMASS = '3'
# Card 17a: IMASS = 0 Zenith and azimuth
ZENITH = ''
AZIM = ''
# Card 17a: IMASS = 1 Elevation and Azimuth
ELEV = ''
# Card 17a: IMASS = 2 Input air mass directly
AMASS = ''
# Card 17a: IMASS = 3 Input date, time and coordinates
YEAR = YEAR
MONTH = MONTH
DAY = DAY
HOUR = HOUR
LATIT = LATIT
LONGIT = LONGIT
ZONE = ZONE
# Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP
DSTEP = ''
output = _smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, AZIM, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP)
return output
def SMARTSSRRL(IOUT,YEAR,MONTH,DAY,HOUR, LATIT, LONGIT, ALTIT, ZONE,
W, RH, TAIR, SEASON, TDAY, SPR, TILT, WAZIM,
RHOG, ALPHA1, ALPHA2, OMEGL, GG, BETA, TAU5, HEIGHT='0',
material='DryGrass', min_wvl='280', max_wvl='4000', POA='TRUE'):
r'''
This function calculates the spectra with inputs available on the Solar
Radiation Research Laboratory (SRRL).
Data accessible by API or website on:
https://midcdmz.nrel.gov/
Main Datasets:
SRRL Baseline Measuremnet System
https://midcdmz.nrel.gov/apps/sitehome.pl?site=BMS
SRRL AOD SkyNet Level 1.1
http://midc.nrel.gov/apps/sitehome.pl?site=AODSRRL
SRRL GPS-based PWV
http://midc.nrel.gov/apps/sitehome.pl?site=PWVSRRL
Parameters
----------
YEAR : string
Year
MONTH : string
Month
DAY : string
Day
HOUR : string
Hour, in 24 hour format.
LATIT : string
Latitude of the location.
LONGIT : string
Longitude of the location.
ALTIT : string
elevation of the ground surface above sea level [km].
WARNING: Please note that TMY3 data is in meters, convert before using this
function.
ZONE : string
Timezone
W : string
Precipitable water above the site altitude, in units of cm or equivalently
g/cm2/
This is, for example, SRRL_PWD['Precipitable Water [mm]']/10
Remember to input the correct units -- SRRL database is [mm] and this
function expects [cm].
RH : string
Relative Humidity.
This is, for example, SRRL_BMS['Tower RH [%]']
TAIR : string
Temperature.
This is, for example, SRRL_BMS['Tower Dry Bulb Temp [deg C]']
SEASON : string
Season, either 'WINTER' or 'SUMMER'. If Spring, use 'SUMMER'. If
Autumn, use 'WINTER'.
TDAY : string
Average of the day's temperature.
HEIGHT : string
Altitude of the simulated object over the surface, in km. Usually 0.
SPR : string
Site pressure, in mbars.
This is, for example, SRRL_BMS['Station Pressure [mBar]']
BETA : string
Ångström’s turbidity coefficient, ß (i.e., aerosol optical depth at 1000 nm)
If BETA and TAU5 are used as inputs, BETA is selected as priority since
TAU5 would be used to calcualte an internal SMARTS BETA value.
This is, for example, SRRL_AOD_SkyNet1['Beta']
TAU5 : string
Aerosol optical depth at 500 nm, τ5.
If BETA and TAU5 are used as inputs, BETA is selected as priority since
TAU5 would be used to calcualte an internal SMARTS BETA value.
This is, for example, SRRL_AOD_SkyNet1['AOD [500nm]']
TILT : string
Tilt angel of the receiving surface (0 to 90 decimal deg.), e.g. '90.0'
for a vertical plane. Use '-999' for a sun-tracking surface.
WAZIM : string
Surface azimuth (0 to 360 decimal deg.) counted clockwise from North;
e.g., 270 deg. for a surface facing West. Use -999 for a sun-tracking
surface.
RHOG : string
Local broadband Lambertian foreground albedo (for tilted plane calculations),
usually between 0.05 and 0.90.
This is, for example, SRRL_BMS['Albedo (CMP11)']
material : string
Unique identifier for ground cover. Pass None to retrieve a list of
all valid materials.
WLMN : string
Minimum wavelength to retreive, e.g. '280.0'
WLMX : string
Maximum wavelength to retreive, e.g. '4000'
Returns
-------
data : pandas
Matrix with first column representing wavelength (in nm) and second
column representing albedo of specified material at the wavelength
'''
if float(ALTIT) > 800:
print("Altitude should be in km. Are you in Mt. Everest or above or",
"using meters? This might fail but we'll attempt to continue.")
## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores.
CMNT = 'SRRL Spectra'
## Card 2: ISPR is an option for site's pressure.
# ISPR = 0 to input SPR on Card 2a
# ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a
# ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a.
ISPR = '1'
# Card 2a (if ISPR = 0): SPR
SPR = SPR #mbar
# Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT
# SPR: Surface pressure (mb).
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
# Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT
# LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for
# Papeete, Tahiti. If LATIT is unknown, enter 45.0.
# ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be
# <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it.
# HEIGHT: Height of the simulated object above the ground surface underneath (km); must be
# <= 100 km (new input).
# The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and
# must be <= 100 km.
ALTIT = ALTIT
HEIGHT = HEIGHT
#LATIT = LATIT
## Card 3: IATMOS is an option to select the proper default atmosphere
# Its value can be either 0 or 1.
# Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to
# provide TAIR, RH, SEASON, TDAY.
# Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The
# shortened name of this atmosphere must be provided by ATMOS on Card 3a.
IATMOS = '0'
# Card 3a (if IATMOS = 1): ATMOS
# ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can
# be one of the following:
# USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer)
# MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer)
# SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer)
# STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter)
ATMOS = 'USSA'
# Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY.
# RH: Relative humidity at site level (%).
# SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and
# stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the
# true season is Spring. SEASON slightly affects the ozone effective temperature and the
# aerosol optical characteristics.
# TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50.
# TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this
# is a reference temperature for various calculations, therefore it is important to provide a
# realistic value in this case in particular. Acceptable range: -120 < TDAY < 50.
RH = RH
TAIR = TAIR
SEASON = SEASON
TDAY = TDAY
## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve
# precipitable water, W. The following values of IH2O are possible:
# 0, to input W on Card 4a
# 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site
# altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step.
# 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This
# calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not
# recommended.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IH2O = '0'
# Card 4a: (if IH2O = 0): W is precipitable water above the site altitude
# in units of cm, or equivalently, g/cm2; it must be <= 12.
W = W
## Card 5: IO3 is an option to select the appropriate ozone abundance input.
# IO3 = 0 to input IALT and AbO3 on Card 5a
# IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by
# IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IO3 = '1'
# Card 5a (if IO3 = 0): IALT, AbO3
# IALT is an option to select the appropriate ozone column altitude correction.
# IALT = 0 bypasses the altitude correction, so that the value of AbO3 on
# Card 5a is used as is. IALT = 1 should be rather used if a vertical
# profile correction needs to be applied (in case of an elevated site when
# the value of AbO3 is known only at sea level).
IALT = ''
AbO3 = ''
## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution.
# IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations
# (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be
# initiated;
# IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a,
# and 7) are to be defaulted, using average vertical profiles.
# If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0.
# If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs
# have precedence over the defaults.
IGAS = '0'
# Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0.
# For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants.
# ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly
# reduced abundances of some gases compared to the initial default obtained with the selected
# reference atmosphere.
# Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly
# represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE
# POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4).
ILOAD = '1'
# Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2,
# ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2
# ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv), Card 6b.
# ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
# ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution
# layer (ppmv).
# ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric
# pollution layer (ppmv).
ApCH2O = ''
ApCH4 = ''
ApCO = ''
ApHNO2 = ''
ApHNO3 = ''
ApNO = ''
ApNO2 = ''
ApNO3 = ''
ApO3 = ''
ApSO2 =''
## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv).
qCO2 = '0.0'
# Card 7a ISPCTR
# is an option to select the proper extraterrestrial
# spectrum. This option allows to choose one out of ten possible spectral
# files (``Spctrm_n.dat``, where n = 0-8 or n = U).
# -1 Spctrm_U.dat N/A User User
# 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10
# 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00
# 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12
# 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75
# 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00
# 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16
# 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23
# 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00
# 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10
ISPCTR ='0'
## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices:
# S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices
# refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol
# models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN.
# SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer
# respectively to the Continental, Urban, and Maritime aerosol models of
# the IAMAP preliminary standard atmosphere (IAMAP, 1986).
# B&D_C , B&D_C1 , These two choices refer respectively to the Braslau &
# Dave aerosol type C and C1, themselves based on Deirmendjian's Haze L model.
# DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal)
# conditions in desert areas, whereas DESERT_MAX corresponds to extremely
# turbid conditions (sandstorms).
# 'USER' Card 8a is then necessary to input user-supplied aerosol information.
AEROS = 'USER' #'S&F_TROPO'
# Card 8a:
# if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only!
# ALPHA1: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths < 500 nm
# (generally between 0.0 and 2.6).
# ALPHA2: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths >= 500 nm
# (generally between 0.0 and 2.6).
# OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0).
# GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9).
ALPHA1 = ALPHA1
ALPHA2 = ALPHA2
OMEGL = OMEGL
GG = GG
## Card 9: ITURB is an option to select the correct turbidity data input. The different options are:
# 0, to read TAU5 on Card 9a
# 1, to read BETA on Card 9a
# 2, to read BCHUEP on Card 9a
# 3, to read RANGE on Card 9a
# 4, to read VISI on Card 9a
# 5, to read TAU550 on Card 9a (new option).
ITURB = '1'
#Card 9a Turbidity value
if BETA is not None:
BETA = BETA
TAU5 = ''
else:
TAU5 = TAU5
BETA = ''
BCHUEP = '' #if ITURB == 2
RANGE = '' #if ITURB == 3
VISI = '' #if ITURB == 4
TAU550 = '' #if ITURB == 5
## Card 10: Far Field Albedo for backscattering
IALBDX = _material_to_code(material)
# Card 10a:
RHOX = ''
# Zonal broadband Lambertian ground albedo (for backscattering calculations); must
# be between 0 and 1.
# Card 10b: ITILT is an option for tilted surface calculations.
#Select ITILT= 0 for no such calculation,
#ITILT = 1 to initiate these calculations using information on Card 10c.
if POA:
ITILT = '1'
else:
ITILT = '0'
# Card 10c:
# IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local
# albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus
# extends from 1 to 64 (new).
# TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical
# plane. Use -999 for a sun-tracking surface.
# WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270
# deg. for a surface facing West. Use -999 for a sun-tracking surface.
IALBDG = '-1'
TILT = TILT
WAZIM = WAZIM
# Card 10d:
# RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card
# 10d (if IALBDG = -1); usually between 0.05 and 0.90.
RHOG = RHOG
## Card 11: Spectral range for all Calculations
WLMN = min_wvl #Min wavelength
WLMX = max_wvl #Max wavelength
SUNCOR = '1.0'
#Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth
# distance; e.g., SUNCOR = 1.024.
# SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January
# and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated
# from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on
# Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if
# the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any
# other number between 0.966 and 1.034 to correct it for distance if so desired.
SOLARC = '1367.0' #Solar constant
## Card 12: Output results selection:
# IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are
# output (to File 16) if IPRT = 0. Spectral results are added to File 16,
# and Card 12a is read, if IPRT = 1. Spectral results are rather printed to
# File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral
# results are printed to both File 16 and 17 if IPRT = 3. Cards
# 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT).
IPRT = '2'
# Card 12a: Min, Max and Step wavelength (nm) (Output can be different than
# calculation...
WPMN = WLMN
WPMX = WLMX
INTVL = '.5'
# Card 12b: Total number of output variables:s
#IOTOT = XXX #This is determined with the input of this function
# Card 12c: Variables to output selection
#(space separated numbers 1-43 according to the table below:
IOUT = IOUT
## Card 13: Circumsolar Calculation
# ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when
# simulating any type of radiometer (spectral or broadband) equipped with a collimator.
# ICIRC = 0 bypasses these calculations.
# ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator
# must then defined on Card 13a.
ICIRC = '0'
#Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT
SLOPE = ''
APERT = ''
LIMIT = ''
## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor.
# The smoothed results are output on a spreadsheet-ready file, File 18 (smarts295.scn.txt). This postprocessor is
# activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1.
ISCAN = '0'
# Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM
IFILT = ''
WV1 = ''
WV2 = ''
STEP = ''
FWHM = ''
## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR)
# calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0.
# With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda
# curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are
# done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note
# that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations
# are done between at least 360 and 830 nm.
# Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance
# calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280
# and 4000, respectively.
ILLUM = '0'
## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation,
# IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and
# different action weighted irradiances of interest in photobiology.
# Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280
# and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of
# the IPRT, WPMN, and WPMX values.
IUV = '0'
## Card 17:
# Option for solar position and air mass calculations. Set IMASS to:
# 0, if inputs are to be ZENIT, AZIM on Card 17a
# 1, if inputs are to be ELEV, AZIM on Card 17a
# 2, if input is to be AMASS on Card 17a
# 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a
# 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation).
IMASS = '3'
# Card 17a: IMASS = 0 Zenith and azimuth
ZENITH = ''
AZIM = ''
# Card 17a: IMASS = 1 Elevation and Azimuth
ELEV = ''
# Card 17a: IMASS = 2 Input air mass directly
AMASS = ''
# Card 17a: IMASS = 3 Input date, time and coordinates
YEAR = YEAR
MONTH = MONTH
DAY = DAY
HOUR = HOUR
LATIT = LATIT
LONGIT = LONGIT
ZONE = ZONE
# Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP
DSTEP = ''
output = _smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, AZIM, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP)
return output
def _smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, AZIM, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP):
r'''
#data = smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP)
# SMARTS Control Function
#
# Inputs:
# All variables are labeled according to the SMARTS 2.9.5 documentation.
# NOTICE THAT "IOTOT" is not an input variable of the function since is determined in the function
# by sizing the IOUT variable.
# Outputs:
# data, is a matrix containing the outputs with as many rows as
# wavelengths+1 (includes header) and as many columns as IOTOT+1 (column 1 is wavelengths)
#
'''
## Init
import os
import pandas as pd
import subprocess
# Check if SMARTSPATH environment variable exists and change working
# directory if it does.
original_wd = None
if 'SMARTSPATH' in os.environ:
original_wd = os.getcwd()
os.chdir(os.environ['SMARTSPATH'])
try:
os.remove('smarts295.inp.txt')
except:
pass
try:
os.remove('smarts295.out.txt')
except:
pass
try:
os.remove('smarts295.ext.txt')
except:
pass
try:
os.remove('smarts295.scn.txt')
except:
pass
f = open('smarts295.inp.txt', 'w')
IOTOT = len(IOUT.split())
## Card 1: Comment.
if len(CMNT)>62:
CMNT = CMNT[0:61]
CMNT = CMNT.replace(" ", "_")
CMNT = "'"+CMNT+"'"
print('{}' . format(CMNT), file=f)
## Card 2: Site Pressure
print('{}'.format(ISPR), file=f)
##Card 2a:
if ISPR=='0':
# case '0' #Just input pressure.
print('{}'.format(SPR), file=f)
elif ISPR=='1':
# case '1' #Input pressure, altitude and height.
print('{} {} {}'.format(SPR, ALTIT, HEIGHT), file=f)
elif ISPR=='2':
#case '2' #Input lat, alt and height
print('{} {} {}'.format(LATIT, ALTIT, HEIGHT), file=f)
else:
print("ISPR Error. ISPR should be 0, 1 or 2. Currently ISPR = ", ISPR)
## Card 3: Atmosphere model
print('{}'.format(IATMOS), file=f)
## Card 3a:
if IATMOS=='0':
#case '0' #Input TAIR, RH, SEASON, TDAY
print('{} {} {} {}'.format(TAIR, RH, SEASON, TDAY), file=f)
elif IATMOS=='1':
#case '1' #Input reference atmosphere
ATMOS = "'"+ATMOS+"'"
print('{}'.format(ATMOS), file=f)
## Card 4: Water vapor data
print('{}'.format(IH2O), file=f)
## Card 4a
if IH2O=='0':
#case '0'
print('{}'.format(W), file=f)
elif IH2O=='1':
#case '1'
#The subcard 4a is skipped
pass # print("")
## Card 5: Ozone abundance
print('{}'.format(IO3), file=f)
## Card 5a
if IO3=='0':
#case '0'
print('{} {}'.format(IALT, AbO3), file=f)
elif IO3=='1':
#case '1'
#The subcard 5a is skipped and default values are used from selected
#reference atmosphere in Card 3.
pass # print("")
## Card 6: Gaseous absorption and atmospheric pollution
print('{}'.format(IGAS), file=f)
## Card 6a: Option for tropospheric pollution
if IGAS=='0':
# case '0'
print('{}'.format(ILOAD), file=f)
## Card 6b: Concentration of Pollutants
if ILOAD=='0':
#case '0'
print('{} {} {} {} {} {} {} {} {} {} '.format(ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2), file=f)
elif ILOAD=='1':
#case '1'
#The subcard 6b is skipped and values of PRISTINE
#ATMOSPHERIC conditions are assumed
pass # print("")
elif ILOAD=='2' or ILOAD =='3' or ILOAD == '4':
#case {'2', '3', '4'}
#The subcard 6b is skipped and value of ILOAD will be used
#as LIGHT POLLUTION (ILOAD = 2), MODERATE POLLUTION (ILOAD = 3),
#and SEVERE POLLUTION (ILOAD = 4).
pass # print("")
elif IGAS=='1':
#case '1'
#The subcard 6a is skipped, and values are for default average
#profiles.
print("")
## Card 7: CO2 columnar volumetric concentration (ppmv)
print('{}'.format(qCO2), file=f)
## Card 7a: Option of proper extraterrestrial spectrum
print('{}'.format(ISPCTR), file=f)
## Card 8: Aerosol model selection out of twelve
AEROS = "'"+AEROS+"'"
print('{}'.format(AEROS), file=f)
## Card 8a: If the aerosol model is 'USER' for user supplied information
if AEROS=="'USER'":
print('{} {} {} {}'.format(ALPHA1, ALPHA2, OMEGL, GG), file=f)
else:
#The subcard 8a is skipped
pass # print("")
## Card 9: Option to select turbidity model
print('{}'.format(ITURB), file=f)
## Card 9a
if ITURB=='0':
#case '0'
print('{}'.format(TAU5), file=f)
elif ITURB=='1':
#case '1'
print('{}'.format(BETA), file=f)
elif ITURB=='2':
#case '2'
print('{}'.format(BCHUEP), file=f)
elif ITURB=='3':
#case '3'
print('{}'.format(RANGE), file=f)
elif ITURB=='4':
#case '4'
print('{}'.format(VISI), file=f)
elif ITURB=='5':
#case '5'
print('{}'.format(TAU550), file=f)
else:
print("Error: Card 9 needs to be input. Assign a valid value to ITURB = ", ITURB)
## Card 10: Select zonal albedo
print('{}'.format(IALBDX), file=f)
## Card 10a: Input fix broadband lambertial albedo RHOX
if IALBDX == '-1':
print('{}'.format(RHOX), file=f)
else:
pass # print("")
#The subcard 10a is skipped.
## Card 10b: Tilted surface calculation flag
print('{}'.format(ITILT), file=f)
## Card 10c: Tilt surface calculation parameters
if ITILT == '1':
print('{} {} {}'.format(IALBDG, TILT, WAZIM), file=f)
##Card 10d: If tilt calculations are performed and zonal albedo of
##foreground.
if IALBDG == '-1':
print('{}'.format(RHOG), file=f)
else:
pass # print("")
#The subcard is skipped
## Card 11: Spectral ranges for calculations
print('{} {} {} {}'.format(WLMN, WLMX, SUNCOR, SOLARC), file=f)
## Card 12: Output selection.
print('{}'.format(IPRT), file=f)
## Card 12a: For spectral results (IPRT >= 1)
if float(IPRT) >= 1:
print('{} {} {}'.format(WPMN, WPMX, INTVL), file=f)
## Card 12b & Card 12c:
if float(IPRT) == 2 or float(IPRT) == 3:
print('{}'.format(IOTOT), file=f)
print('{}'.format(IOUT), file=f)
else:
pass # print("")
#The subcards 12b and 12c are skipped.
else:
pass # print("")
#The subcard 12a is skipped
## Card 13: Circumsolar calculations
print('{}'.format(ICIRC), file=f)
## Card 13a: Simulated radiometer parameters
if ICIRC == '1':
print('{} {} {}'.format(SLOPE, APERT, LIMIT), file=f)
else:
pass # print("")
#The subcard 13a is skipped since no circumsolar calculations or
#simulated radiometers have been requested.
## Card 14: Scanning/Smoothing virtual filter postprocessor
print('{}'.format(ISCAN), file=f)
## Card 14a: Simulated radiometer parameters
if ISCAN == '1':
print('{} {} {} {} {}'.format(IFILT, WV1, WV2, STEP, FWHM), file=f)
else:
pass # print("")
#The subcard 14a is skipped since no postprocessing is simulated.
## Card 15: Illuminace, luminous efficacy and photosythetically active radiarion calculations
print('{}'.format(ILLUM), file=f)
## Card 16: Special broadband UV calculations
print('{}'.format(IUV), file=f)
## Card 17: Option for solar position and air mass calculations
print('{}'.format(IMASS), file=f)
## Card 17a: Solar position parameters:
if IMASS=='0':
#case '0' #Enter Zenith and Azimuth of the sun
print('{} {}'.format(ZENITH, AZIM), file=f)
elif IMASS=='1':
#case '1' #Enter Elevation and Azimuth of the sun
print('{} {}'.format(ELEV, AZIM), file=f)
elif IMASS=='2':
#case '2' #Enter air mass directly
print('{}'.format(AMASS), file=f)
elif IMASS=='3':
#case '3' #Enter date, time and latitude
print('{} {} {} {} {} {} {}'.format(YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE), file=f)
elif IMASS=='4':
#case '4' #Enter date and time and step in min for a daily calculation.
print('{}, {}, {}'.format(MONTH, LATIT, DSTEP), file=f)
## Input Finalization
print('', file=f)
f.close()
## Run SMARTS 2.9.5
#dump = os.system('smarts295bat.exe')
commands = ['smarts295bat', 'smarts295bat.exe']
command = None
for cmd in commands:
if os.path.exists(cmd):
command = cmd
break
if not command:
print('Could not find SMARTS2 executable.')
data = None
else:
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=open("output.txt", "w"), shell=True)
p.wait()
## Read SMARTS 2.9.5 Output File
data = pd.read_csv('smarts295.ext.txt', delim_whitespace=True)
try:
os.remove('smarts295.inp.txt')
except:
pass # print("")
try:
os.remove('smarts295.out.txt')
except:
pass # print("")
try:
os.remove('smarts295.ext.txt')
except:
pass # print("")
try:
os.remove('smarts295.scn.txt')
except:
pass # print("")
# Return to original working directory.
if original_wd:
os.chdir(original_wd)
return data | en | 0.768622 | # -*- coding: utf-8 -*- The ``smarts`` module contains functions for calling SMARTS: Simple Model of the Atmoshperic Radiative Transfer of Sunshine, from NREL, developed by Dr. <NAME>. SMARTS software can be obtained from: https://www.nrel.gov/grid/solar-resource/smarts.html Users will be responsible to obtain a copy of SMARTS from NREL, honor it’s license, and download the SMART files into their PVLib folder. This wrapper is shared under a BSD-3-Clause License, and was originally coded in Matlab by <NAME> (2001), updated and ported to python by <NAME> (2019-2020). Original Matlab wrapper was made for graduate studies at the University of Arizona, python porting by NREL. Please read the license and Readme files for more information, proper use, citing, and copyrights. Function to display the options of outputs that SMARTS has. If run without input (IOUT = None), it prints in a list all possible outputs. If IOUT is passed to equal one of the outputs (i.e. (i.e. IOUT = 'Global horizontal irradiance W m-2'), it returns the code number for that output (returns '4' for this example). PARAMETERS ----------- IOUT: String Can be None or a SMARTS output description RETURNS ------- IOUT_Key: String Key code to SMARTS cards input. # Comments include Description, File name(.DAT extension), Reflection, Type*, Spectral range(um), Category* # *KEYS: L Lambertian, NL Non-Lambertian, SP Specular, M Manmade materials, S Soils and rocks, U User defined, V Vegetation, W Water, snow, or ice # User-defined spectral reflectance Albedo L Userdefined # User-defined spectral reflectance Albedo NL Userdefined # Water or calm ocean (calculated) SP 0.28 4.0 W # Fresh dry snow Snow NL 0.3 2.48 W # Snow on a mountain neve Neve NL 0.45 1.65 W # Basalt rock Basalt NL 0.3 2.48 S # Dry sand Dry_sand NL 0.32 0.99 S # Sand from White Sands, NM WiteSand NL 0.5 2.48 S # Bare soil Soil NL 0.28 4.0 S # Dry clay soil Dry_clay NL 0.5 2.48 S # Wet clay soil Wet_clay NL 0.5 2.48 S # Alfalfa Alfalfa NL 0.3 0.8 V # Green grass Grass NL 0.3 1.19 V # Perennial rye grass RyeGrass NL 0.44 2.28 V # Alpine meadow Meadow1 NL 0.4 0.85 V # Lush meadow Meadow2 NL 0.4 0.9 V # Wheat crop Wheat NL 0.42 2.26 V # Ponderosa pine tree PineTree NL 0.34 2.48 V # Concrete slab Concrete NL 0.3 1.3 M # Black loam BlckLoam NL 0.4 4.0 S # Brown loam BrwnLoam NL 0.4 4.0 S # Brown sand BrwnSand NL 0.4 4.0 S # Conifer trees Conifers NL 0.302 4.0 V # Dark loam DarkLoam NL 0.46-4.0 S # Dark sand DarkSand NL 0.4 4.0 S # Decidous trees Decidous NL 0.302 4.0 V # Dry grass (sod) DryGrass NL 0.38 4.0 V # Dune sand DuneSand NL 0.4 4.0 S # Fresh fine snow FineSnow NL 0.3 4.0 W # Green rye grass (sod) GrnGrass NL 0.302 4.0 V # Granular snow GrnlSnow NL 0.3 4.0 W # Light clay LiteClay NL 0.4 4.0 S # Light loam LiteLoam NL 0.431 4.0 S # Light sand LiteSand NL 0.4 4.0 S # Pale loam PaleLoam NL 0.4 4.0 S # Sea water Seawater NL 2.079 4.0 W # Solid ice SolidIce NL 0.3 4.0 W # Dry soil Dry_Soil NL 0.28 4.0 S # Light soil LiteSoil NL 0.28 4.0 S # Old runway concrete RConcrte NL 0.3 4.0 M # Terracota roofing clay tile RoofTile NL 0.3 4.0 M # Red construction brick RedBrick NL 0.3 4.0 M # Old runway asphalt Asphalt NL 0.3 4.0 M # Tall green corn TallCorn NL 0.36-1.0 V # Sand & gravel SndGravl NL 0.45-1.04 S # Fallow field Fallow NL 0.32-1.19 S # Birch leaves Birch NL 0.36-2.48 V # Wet sandy soil WetSSoil NL 0.48-2.48 S # Gravel Gravel NL 0.32-1.3 S # Wet red clay WetClay2 NL 0.52-2.48 S # Wet silt WetSilt NL 0.52-2.48 S # Dry long grass LngGrass NL 0.277-2.976 V # Lawn grass (generic bluegrass) LwnGrass NL 0.305-2.944 V # Deciduous oak tree leaves OakTree NL 0.35-2.5 V # Pinion pinetree needles Pinion NL 0.301-2.592 V # Melting snow (slush) MeltSnow NL 0.35-2.5 W # Plywood sheet (new, pine, 4-ply) Plywood NL 0.35-2.5 M # White vinyl plastic sheet, 0.15 mm WiteVinl NL 0.35-2.5 M # Clear fiberglass greenhouse roofing FibrGlss NL 0.35-2.5 M # Galvanized corrugated sheet metal, new ShtMetal NL 0.35-2.5 M # Wetland vegetation canopy, Yellowstone Wetland NL 0.409-2.478 V # Sagebrush canopy, Yellowstone SageBrsh NL 0.409-2.478 V # Fir trees, Colorado FirTrees NL 0.353-2.592 V # Coastal seawater, Pacific CSeaWatr NL 0.277-2.976 W # Open ocean seawater, Atlantic OSeaWatr NL 0.277-2.976 W # Grazing field (unfertilized) GrazingField NL 0.401-2.499 V # Young Norway spruce tree (needles) Spruce NL 0.39-0.845 V This function calculates the spectral albedo for a given material. If no material is provided, the function will return a list of all valid materials. Parameters ---------- material : string Unique identifier for ground cover. Pass None to retreive a list of all valid materials. WLMN : string Minimum wavelength to retreive WLMX : string Maximum wavelength to retreive YEAR : string Year MONTH : string Month DAY : string Day HOUR : string Hour, in 24 hour format. LATIT : string Latitude of the location. LONGIT : string Longitude of the location. ALTIT : string elevation of the ground surface above sea level [km] ZONE : string Timezone Returns ------- data : pandas Matrix with first column representing wavelength (in nm) and second column representing albedo of specified material at the wavelength Updates: 6/20 Creation of second function to use zenith and azimuth M. Monarch ## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores. ## Card 2: ISPR is an option for site's pressure. # ISPR = 0 to input SPR on Card 2a # ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a # ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a. # Card 2a (if ISPR = 0): SPR #mbar # Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT # SPR: Surface pressure (mb). # ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be # <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it. # HEIGHT: Height of the simulated object above the ground surface underneath (km); must be # <= 100 km (new input). # The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and # must be <= 100 km. # Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT # LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for # Papeete, Tahiti. If LATIT is unknown, enter 45.0. # ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be # <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it. # HEIGHT: Height of the simulated object above the ground surface underneath (km); must be # <= 100 km (new input). # The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and # must be <= 100 km. #LATIT = LATIT ## Card 3: IATMOS is an option to select the proper default atmosphere # Its value can be either 0 or 1. # Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to # provide TAIR, RH, SEASON, TDAY. # Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The # shortened name of this atmosphere must be provided by ATMOS on Card 3a. # Card 3a (if IATMOS = 1): ATMOS # ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can # be one of the following: # USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer) # MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer) # SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer) # STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter) # Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY. # RH: Relative humidity at site level (%). # SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and # stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the # true season is Spring. SEASON slightly affects the ozone effective temperature and the # aerosol optical characteristics. # TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50. # TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this # is a reference temperature for various calculations, therefore it is important to provide a # realistic value in this case in particular. Acceptable range: -120 < TDAY < 50. ## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve # precipitable water, W. The following values of IH2O are possible: # 0, to input W on Card 4a # 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site # altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step. # 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This # calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not # recommended. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 4a: (if IH2O = 0): W is precipitable water above the site altitude # in units of cm, or equivalently, g/cm2; it must be <= 12. ## Card 5: IO3 is an option to select the appropriate ozone abundance input. # IO3 = 0 to input IALT and AbO3 on Card 5a # IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by # IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 5a (if IO3 = 0): IALT, AbO3 # IALT is an option to select the appropriate ozone column altitude correction. # IALT = 0 bypasses the altitude correction, so that the value of AbO3 on # Card 5a is used as is. IALT = 1 should be rather used if a vertical # profile correction needs to be applied (in case of an elevated site when # the value of AbO3 is known only at sea level). ## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution. # IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations # (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be # initiated; # IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a, # and 7) are to be defaulted, using average vertical profiles. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0. # For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants. # ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly # reduced abundances of some gases compared to the initial default obtained with the selected # reference atmosphere. # Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly # represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE # POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4). # Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2, # ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2 # ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution # layer (ppmv). # ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv), Card 6b. # ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution # layer (ppmv). # ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). ## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv). # Card 7a ISPCTR # is an option to select the proper extraterrestrial # spectrum. This option allows to choose one out of ten possible spectral # files (``Spctrm_n.dat``, where n = 0-8 or n = U). # -1 Spctrm_U.dat N/A User User # 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10 # 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00 # 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12 # 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75 # 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00 # 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16 # 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23 # 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00 # 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10 ## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices: # S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices # refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol # models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN. # SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer # respectively to the Continental, Urban, and Maritime aerosol models of # the IAMAP preliminary standard atmosphere (IAMAP, 1986). # B&D_C , B&D_C1 , These two choices refer respectively to the Braslau & # Dave aerosol type C and C1, themselves based on Deirmendjian's Haze L model. # DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal) # conditions in desert areas, whereas DESERT_MAX corresponds to extremely # turbid conditions (sandstorms). # 'USER' Card 8a is then necessary to input user-supplied aerosol information. # Card 8a: # if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only! # ALPHA1: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths < 500 nm # (generally between 0.0 and 2.6). # ALPHA2: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths >= 500 nm # (generally between 0.0 and 2.6). # OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0). # GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9). ## Card 9: ITURB is an option to select the correct turbidity data input. The different options are: # 0, to read TAU5 on Card 9a # 1, to read BETA on Card 9a # 2, to read BCHUEP on Card 9a # 3, to read RANGE on Card 9a # 4, to read VISI on Card 9a # 5, to read TAU550 on Card 9a (new option). #Card 9a Turbidity value #if ITURB == 0 #if ITURB == 1 #if ITURB == 2 #if ITURB == 3 #if ITURB == 4 #if ITURB == 5 ## Card 10: Far Field Albedo for backscattering # Card 10a: # Zonal broadband Lambertian ground albedo (for backscattering calculations); must # be between 0 and 1. # Card 10b: ITILT is an option for tilted surface calculations. #Select ITILT= 0 for no such calculation, #ITILT = 1 to initiate these calculations using information on Card 10c. # Card 10c: # IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local # albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus # extends from 1 to 64 (new). # TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical # plane. Use -999 for a sun-tracking surface. # WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270 # deg. for a surface facing West. Use -999 for a sun-tracking surface. # Card 10d: # RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card # 10d (if IALBDG = -1); usually between 0.05 and 0.90. ## Card 11: Spectral range for all Calculations #Min wavelength #Max wavelength #Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth # distance; e.g., SUNCOR = 1.024. # SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January # and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated # from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on # Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if # the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any # other number between 0.966 and 1.034 to correct it for distance if so desired. #Solar constant ## Card 12: Output results selection: # IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are # output (to File 16) if IPRT = 0. Spectral results are added to File 16, # and Card 12a is read, if IPRT = 1. Spectral results are rather printed to # File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral # results are printed to both File 16 and 17 if IPRT = 3. Cards # 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT). # Card 12a: Min, Max and Step wavelength (nm) (Output can be different than # calculation... # Card 12b: Total number of output variables: #IOTOT = XXX #This is determined with the input of this function # Card 12c: Variables to output selection #(space separated numbers 1-43 according to the table below: ## Card 13: Circumsolar Calculation # ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when # simulating any type of radiometer (spectral or broadband) equipped with a collimator. # ICIRC = 0 bypasses these calculations. # ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator # must then defined on Card 13a. #Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT ## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor. # The smoothed results are output on a spreadsheet-ready file, File 18 (``smarts295.scn.txt``). This postprocessor is # activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1. # Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM ## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR) # calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0. # With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda # curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are # done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note # that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations # are done between at least 360 and 830 nm. # Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance # calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280 # and 4000, respectively. ## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation, # IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and # different action weighted irradiances of interest in photobiology. # Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280 # and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of # the IPRT, WPMN, and WPMX values. ## Card 17: # Option for solar position and air mass calculations. Set IMASS to: # 0, if inputs are to be ZENIT, AZIM on Card 17a # 1, if inputs are to be ELEV, AZIM on Card 17a # 2, if input is to be AMASS on Card 17a # 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a # 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation). # Card 17a: IMASS = 0 Zenith and azimuth # Card 17a: IMASS = 1 Elevation and Azimuth # Card 17a: IMASS = 2 Input air mass directly # Card 17a: IMASS = 3 Input date, time and coordinates # Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP This function calculates the spectral albedo for a given material. If no material is provided, the function will return a list of all valid materials. Parameters ---------- material : string Unique identifier for ground cover. Pass None to retreive a list of all valid materials. WLMN : string Minimum wavelength to retreive WLMX : string Maximum wavelength to retreive YEAR : string Year MONTH : string Month DAY : string Day HOUR : string Hour, in 24 hour format. LATIT : string Latitude of the location. LONGIT : string Longitude of the location. ALTIT : string elevation of the ground surface above sea level [km] ZONE : string Timezone Returns ------- data : pandas Matrix with first column representing wavelength (in nm) and second column representing albedo of specified material at the wavelength Updates: 6/20 Creation of second function to use zenith and azimuth M. Monarch ## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores. ## Card 2: ISPR is an option for site's pressure. # ISPR = 0 to input SPR on Card 2a # ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a # ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a. # Card 2a (if ISPR = 0): SPR #mbar # Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT # SPR: Surface pressure (mb). # ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be # <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it. # HEIGHT: Height of the simulated object above the ground surface underneath (km); must be # <= 100 km (new input). # The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and # must be <= 100 km. # Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT # LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for # Papeete, Tahiti. If LATIT is unknown, enter 45.0. # ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be # <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it. # HEIGHT: Height of the simulated object above the ground surface underneath (km); must be # <= 100 km (new input). # The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and # must be <= 100 km. #LATIT = LATIT ## Card 3: IATMOS is an option to select the proper default atmosphere # Its value can be either 0 or 1. # Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to # provide TAIR, RH, SEASON, TDAY. # Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The # shortened name of this atmosphere must be provided by ATMOS on Card 3a. # Card 3a (if IATMOS = 1): ATMOS # ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can # be one of the following: # USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer) # MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer) # SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer) # STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter) # Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY. # RH: Relative humidity at site level (%). # SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and # stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the # true season is Spring. SEASON slightly affects the ozone effective temperature and the # aerosol optical characteristics. # TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50. # TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this # is a reference temperature for various calculations, therefore it is important to provide a # realistic value in this case in particular. Acceptable range: -120 < TDAY < 50. ## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve # precipitable water, W. The following values of IH2O are possible: # 0, to input W on Card 4a # 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site # altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step. # 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This # calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not # recommended. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 4a: (if IH2O = 0): W is precipitable water above the site altitude # in units of cm, or equivalently, g/cm2; it must be <= 12. ## Card 5: IO3 is an option to select the appropriate ozone abundance input. # IO3 = 0 to input IALT and AbO3 on Card 5a # IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by # IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 5a (if IO3 = 0): IALT, AbO3 # IALT is an option to select the appropriate ozone column altitude correction. # IALT = 0 bypasses the altitude correction, so that the value of AbO3 on # Card 5a is used as is. IALT = 1 should be rather used if a vertical # profile correction needs to be applied (in case of an elevated site when # the value of AbO3 is known only at sea level). ## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution. # IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations # (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be # initiated; # IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a, # and 7) are to be defaulted, using average vertical profiles. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0. # For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants. # ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly # reduced abundances of some gases compared to the initial default obtained with the selected # reference atmosphere. # Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly # represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE # POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4). # Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2, # ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2 # ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution # layer (ppmv). # ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv), Card 6b. # ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution # layer (ppmv). # ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). ## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv). # Card 7a ISPCTR # is an option to select the proper extraterrestrial # spectrum. This option allows to choose one out of ten possible spectral # files (``Spctrm_n.dat``, where n = 0-8 or n = U). # -1 Spctrm_U.dat N/A User User # 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10 # 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00 # 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12 # 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75 # 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00 # 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16 # 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23 # 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00 # 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10 ## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices: # S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices # refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol # models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN. # SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer # respectively to the Continental, Urban, and Maritime aerosol models of # the IAMAP preliminary standard atmosphere (IAMAP, 1986). # B&D_C , B&D_C1 , These two choices refer respectively to the Braslau & # Dave aerosol type C and C1, themselves based on Deirmendjian's Haze L model. # DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal) # conditions in desert areas, whereas DESERT_MAX corresponds to extremely # turbid conditions (sandstorms). # 'USER' Card 8a is then necessary to input user-supplied aerosol information. # Card 8a: # if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only! # ALPHA1: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths < 500 nm # (generally between 0.0 and 2.6). # ALPHA2: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths >= 500 nm # (generally between 0.0 and 2.6). # OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0). # GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9). ## Card 9: ITURB is an option to select the correct turbidity data input. The different options are: # 0, to read TAU5 on Card 9a # 1, to read BETA on Card 9a # 2, to read BCHUEP on Card 9a # 3, to read RANGE on Card 9a # 4, to read VISI on Card 9a # 5, to read TAU550 on Card 9a (new option). #Card 9a Turbidity value #if ITURB == 0 #if ITURB == 1 #if ITURB == 2 #if ITURB == 3 #if ITURB == 4 #if ITURB == 5 ## Card 10: Far Field Albedo for backscattering # Card 10a: # Zonal broadband Lambertian ground albedo (for backscattering calculations); must # be between 0 and 1. # Card 10b: ITILT is an option for tilted surface calculations. #Select ITILT= 0 for no such calculation, #ITILT = 1 to initiate these calculations using information on Card 10c. # Card 10c: # IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local # albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus # extends from 1 to 64 (new). # TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical # plane. Use -999 for a sun-tracking surface. # WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270 # deg. for a surface facing West. Use -999 for a sun-tracking surface. # Card 10d: # RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card # 10d (if IALBDG = -1); usually between 0.05 and 0.90. ## Card 11: Spectral range for all Calculations #Min wavelength #Max wavelength #Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth # distance; e.g., SUNCOR = 1.024. # SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January # and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated # from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on # Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if # the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any # other number between 0.966 and 1.034 to correct it for distance if so desired. #Solar constant ## Card 12: Output results selection: # IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are # output (to File 16) if IPRT = 0. Spectral results are added to File 16, # and Card 12a is read, if IPRT = 1. Spectral results are rather printed to # File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral # results are printed to both File 16 and 17 if IPRT = 3. Cards # 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT). # Card 12a: Min, Max and Step wavelength (nm) (Output can be different than # calculation... # Card 12b: Total number of output variables: #IOTOT = XXX #This is determined with the input of this function # Card 12c: Variables to output selection #(space separated numbers 1-43 according to the table below: ## Card 13: Circumsolar Calculation # ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when # simulating any type of radiometer (spectral or broadband) equipped with a collimator. # ICIRC = 0 bypasses these calculations. # ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator # must then defined on Card 13a. #Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT ## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor. # The smoothed results are output on a spreadsheet-ready file, File 18 (``smarts295.scn.txt``). This postprocessor is # activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1. # Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM ## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR) # calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0. # With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda # curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are # done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note # that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations # are done between at least 360 and 830 nm. # Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance # calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280 # and 4000, respectively. ## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation, # IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and # different action weighted irradiances of interest in photobiology. # Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280 # and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of # the IPRT, WPMN, and WPMX values. ## Card 17: # Option for solar position and air mass calculations. Set IMASS to: # 0, if inputs are to be ZENIT, AZIM on Card 17a # 1, if inputs are to be ELEV, AZIM on Card 17a # 2, if input is to be AMASS on Card 17a # 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a # 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation). # Card 17a: IMASS = 0 Zenith and azimuth # Card 17a: IMASS = 1 Elevation and Azimuth # Card 17a: IMASS = 2 Input air mass directly # Card 17a: IMASS = 3 Input date, time and coordinates # Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP This function calculates the spectral albedo for a given material. If no material is provided, the function will return a list of all valid materials. Parameters ---------- material : string Unique identifier for ground cover. Pass None to retreive a list of all valid materials. WLMN : string Minimum wavelength to retreive WLMX : string Maximum wavelength to retreive ZENITH : string Zenith angle of sun AZIM : string Azimuth of sun SPR : string Site Pressure [mbars]. Default: SPR = '1013.25' Returns ------- data : pandas Matrix with first column representing wavelength (in nm) and second column representing albedo of specified material at the wavelength Updates: 6/20 Creation of second function to use zenith and azimuth M. Monarch ## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores. ## Card 2: ISPR is an option for site's pressure. # ISPR = 0 to input SPR on Card 2a # ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a # ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a. # Card 2a (if ISPR = 0): SPR #mbar # Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT # SPR: Surface pressure (mb). # ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be # <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it. # HEIGHT: Height of the simulated object above the ground surface underneath (km); must be # <= 100 km (new input). # The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and # must be <= 100 km. # Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT # LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for # Papeete, Tahiti. If LATIT is unknown, enter 45.0. # ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be # <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it. # HEIGHT: Height of the simulated object above the ground surface underneath (km); must be # <= 100 km (new input). # The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and # must be <= 100 km. #LATIT = LATIT ## Card 3: IATMOS is an option to select the proper default atmosphere # Its value can be either 0 or 1. # Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to # provide TAIR, RH, SEASON, TDAY. # Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The # shortened name of this atmosphere must be provided by ATMOS on Card 3a. # Card 3a (if IATMOS = 1): ATMOS # ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can # be one of the following: # USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer) # MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer) # SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer) # STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter) # Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY. # RH: Relative humidity at site level (%). # SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and # stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the # true season is Spring. SEASON slightly affects the ozone effective temperature and the # aerosol optical characteristics. # TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50. # TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this # is a reference temperature for various calculations, therefore it is important to provide a # realistic value in this case in particular. Acceptable range: -120 < TDAY < 50. ## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve # precipitable water, W. The following values of IH2O are possible: # 0, to input W on Card 4a # 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site # altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step. # 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This # calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not # recommended. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 4a: (if IH2O = 0): W is precipitable water above the site altitude # in units of cm, or equivalently, g/cm2; it must be <= 12. ## Card 5: IO3 is an option to select the appropriate ozone abundance input. # IO3 = 0 to input IALT and AbO3 on Card 5a # IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by # IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 5a (if IO3 = 0): IALT, AbO3 # IALT is an option to select the appropriate ozone column altitude correction. # IALT = 0 bypasses the altitude correction, so that the value of AbO3 on # Card 5a is used as is. IALT = 1 should be rather used if a vertical # profile correction needs to be applied (in case of an elevated site when # the value of AbO3 is known only at sea level). ## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution. # IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations # (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be # initiated; # IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a, # and 7) are to be defaulted, using average vertical profiles. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0. # For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants. # ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly # reduced abundances of some gases compared to the initial default obtained with the selected # reference atmosphere. # Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly # represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE # POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4). # Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2, # ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2 # ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution # layer (ppmv). # ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv), Card 6b. # ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution # layer (ppmv). # ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). ## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv). # Card 7a ISPCTR # is an option to select the proper extraterrestrial # spectrum. This option allows to choose one out of ten possible spectral # files (``Spctrm_n.dat``, where n = 0-8 or n = U). # -1 Spctrm_U.dat N/A User User # 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10 # 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00 # 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12 # 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75 # 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00 # 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16 # 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23 # 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00 # 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10 ## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices: # S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices # refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol # models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN. # SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer # respectively to the Continental, Urban, and Maritime aerosol models of # the IAMAP preliminary standard atmosphere (IAMAP, 1986). # B&D_C , B&D_C1 , These two choices refer respectively to the Braslau & # Dave aerosol type C and C1, themselves based on Deirmendjian s Haze L model. # DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal) # conditions in desert areas, whereas DESERT_MAX corresponds to extremely # turbid conditions (sandstorms). # 'USER' Card 8a is then necessary to input user-supplied aerosol information. # Card 8a: # if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only! # ALPHA1: Average value of Ångström`s wavelength exponent $\alpha$ for wavelengths < 500 nm # (generally between 0.0 and 2.6). # ALPHA2: Average value of Ångström`s wavelength exponent $\alpha$ for wavelengths >= 500 nm # (generally between 0.0 and 2.6). # OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0). # GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9). ## Card 9: ITURB is an option to select the correct turbidity data input. The different options are: # 0, to read TAU5 on Card 9a # 1, to read BETA on Card 9a # 2, to read BCHUEP on Card 9a # 3, to read RANGE on Card 9a # 4, to read VISI on Card 9a # 5, to read TAU550 on Card 9a (new option). #Card 9a Turbidity value #if ITURB == 0 #if ITURB == 1 #if ITURB == 2 #if ITURB == 3 #if ITURB == 4 #if ITURB == 5 ## Card 10: Far Field Albedo for backscattering # Card 10a: # Zonal broadband Lambertian ground albedo (for backscattering calculations); must # be between 0 and 1. # Card 10b: ITILT is an option for tilted surface calculations. #Select ITILT= 0 for no such calculation, #ITILT = 1 to initiate these calculations using information on Card 10c. # Card 10c: # IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local # albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus # extends from 1 to 64 (new). # TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical # plane. Use -999 for a sun-tracking surface. # WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270 # deg. for a surface facing West. Use -999 for a sun-tracking surface. # Card 10d: # RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card # 10d (if IALBDG = -1); usually between 0.05 and 0.90. ## Card 11: Spectral range for all Calculations #Min wavelength #Max wavelength #Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth # distance; e.g., SUNCOR = 1.024. # SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January # and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated # from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on # Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if # the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any # other number between 0.966 and 1.034 to correct it for distance if so desired. #Solar constant ## Card 12: Output results selection: # IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are # output (to File 16) if IPRT = 0. Spectral results are added to File 16, # and Card 12a is read, if IPRT = 1. Spectral results are rather printed to # File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral # results are printed to both File 16 and 17 if IPRT = 3. Cards # 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT). # Card 12a: Min, Max and Step wavelength (nm) (Output can be different than # calculation... # Card 12b: Total number of output variables: #IOTOT = XXX #This is determined with the input of this function # Card 12c: Variables to output selection #(space separated numbers 1-43 according to the table below: #IOUT = '30 31' ## Card 13: Circumsolar Calculation # ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when # simulating any type of radiometer (spectral or broadband) equipped with a collimator. # ICIRC = 0 bypasses these calculations. # ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator # must then defined on Card 13a. #Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT ## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor. # The smoothed results are output on a spreadsheet-ready file, File 18 (``smarts295.scn.txt``). This postprocessor is # activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1. # Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM ## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR) # calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0. # With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda # curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are # done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note # that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations # are done between at least 360 and 830 nm. # Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance # calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280 # and 4000, respectively. ## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation, # IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and # different action weighted irradiances of interest in photobiology. # Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280 # and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of # the IPRT, WPMN, and WPMX values. ## Card 17: # Option for solar position and air mass calculations. Set IMASS to: # 0, if inputs are to be ZENIT, AZIM on Card 17a # 1, if inputs are to be ELEV, AZIM on Card 17a # 2, if input is to be AMASS on Card 17a # 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a # 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation). # Card 17a: IMASS = 0 Zenith and azimuth #ZENITH = '' #AZIM = '' # Card 17a: IMASS = 1 Elevation and Azimuth # Card 17a: IMASS = 2 Input air mass directly # Card 17a: IMASS = 3 Input date, time and coordinates # Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP This function calculates the spectral albedo for a given material. If no material is provided, the function will return a list of all valid materials. Parameters ---------- material : string Unique identifier for ground cover. Pass None to retreive a list of all valid materials. WLMN : string Minimum wavelength to retreive WLMX : string Maximum wavelength to retreive YEAR : string Year MONTH : string Month DAY : string Day HOUR : string Hour, in 24 hour format. LATIT : string Latitude of the location. LONGIT : string Longitude of the location. ALTIT : string elevation of the ground surface above sea level [km]. WARNING: Please note that TMY3 data is in meters, convert before using this function. ZONE : string Timezone RHOG : string Local broadband Lambertian foreground albedo (for tilted plane calculations) W : string Precipitable water above the site altitude, in units of cm or equivalently g/cm2/ RH : string Relative Humidity TAIR : string Temperature. SEASON : string Season, either 'WINTER' or 'SUMMER'. If Spring, use 'SUMMER'. If Autumn, use 'WINTER'. TDAY : string Average of the day's temperature. HEIGHT : string Altitude of the simulated object over the surface, in km. SPR : string Site pressure, in mbars. Returns ------- data : pandas Matrix with first column representing wavelength (in nm) and second column representing albedo of specified material at the wavelength ## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores. ## Card 2: ISPR is an option for site's pressure. # ISPR = 0 to input SPR on Card 2a # ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a # ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a. # Card 2a (if ISPR = 0): SPR #mbar # Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT # SPR: Surface pressure (mb). # ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be # <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it. # HEIGHT: Height of the simulated object above the ground surface underneath (km); must be # <= 100 km (new input). # The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and # must be <= 100 km. # Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT # LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for # Papeete, Tahiti. If LATIT is unknown, enter 45.0. # ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be # <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it. # HEIGHT: Height of the simulated object above the ground surface underneath (km); must be # <= 100 km (new input). # The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and # must be <= 100 km. #LATIT = LATIT ## Card 3: IATMOS is an option to select the proper default atmosphere # Its value can be either 0 or 1. # Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to # provide TAIR, RH, SEASON, TDAY. # Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The # shortened name of this atmosphere must be provided by ATMOS on Card 3a. # Card 3a (if IATMOS = 1): ATMOS # ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can # be one of the following: # USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer) # MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer) # SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer) # STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter) # Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY. # RH: Relative humidity at site level (%). # SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and # stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the # true season is Spring. SEASON slightly affects the ozone effective temperature and the # aerosol optical characteristics. # TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50. # TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this # is a reference temperature for various calculations, therefore it is important to provide a # realistic value in this case in particular. Acceptable range: -120 < TDAY < 50. ## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve # precipitable water, W. The following values of IH2O are possible: # 0, to input W on Card 4a # 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site # altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step. # 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This # calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not # recommended. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 4a: (if IH2O = 0): W is precipitable water above the site altitude # in units of cm, or equivalently, g/cm2; it must be <= 12. ## Card 5: IO3 is an option to select the appropriate ozone abundance input. # IO3 = 0 to input IALT and AbO3 on Card 5a # IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by # IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 5a (if IO3 = 0): IALT, AbO3 # IALT is an option to select the appropriate ozone column altitude correction. # IALT = 0 bypasses the altitude correction, so that the value of AbO3 on # Card 5a is used as is. IALT = 1 should be rather used if a vertical # profile correction needs to be applied (in case of an elevated site when # the value of AbO3 is known only at sea level). ## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution. # IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations # (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be # initiated; # IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a, # and 7) are to be defaulted, using average vertical profiles. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0. # For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants. # ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly # reduced abundances of some gases compared to the initial default obtained with the selected # reference atmosphere. # Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly # represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE # POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4). # Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2, # ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2 # ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution # layer (ppmv). # ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv), Card 6b. # ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution # layer (ppmv). # ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). ## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv). # Card 7a ISPCTR # is an option to select the proper extraterrestrial # spectrum. This option allows to choose one out of ten possible spectral # files (``Spctrm_n.dat``, where n = 0-8 or n = U). # -1 Spctrm_U.dat N/A User User # 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10 # 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00 # 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12 # 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75 # 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00 # 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16 # 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23 # 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00 # 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10 ## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices: # S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices # refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol # models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN. # SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer # respectively to the Continental, Urban, and Maritime aerosol models of # the IAMAP preliminary standard atmosphere (IAMAP, 1986). # B&D_C , B&D_C1 , These two choices refer respectively to the Braslau & # Dave aerosol type C and C1, themselves based on Deirmendjian's Haze L model. # DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal) # conditions in desert areas, whereas DESERT_MAX corresponds to extremely # turbid conditions (sandstorms). # 'USER' Card 8a is then necessary to input user-supplied aerosol information. # Card 8a: # if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only! # ALPHA1: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths < 500 nm # (generally between 0.0 and 2.6). # ALPHA2: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths >= 500 nm # (generally between 0.0 and 2.6). # OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0). # GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9). ## Card 9: ITURB is an option to select the correct turbidity data input. The different options are: # 0, to read TAU5 on Card 9a # 1, to read BETA on Card 9a # 2, to read BCHUEP on Card 9a # 3, to read RANGE on Card 9a # 4, to read VISI on Card 9a # 5, to read TAU550 on Card 9a (new option). #Card 9a Turbidity value #if ITURB == 0 #if ITURB == 1 #if ITURB == 2 #if ITURB == 3 #if ITURB == 4 #if ITURB == 5 ## Card 10: Far Field Albedo for backscattering # Card 10a: # Zonal broadband Lambertian ground albedo (for backscattering calculations); must # be between 0 and 1. # Card 10b: ITILT is an option for tilted surface calculations. #Select ITILT= 0 for no such calculation, #ITILT = 1 to initiate these calculations using information on Card 10c. # Card 10c: # IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local # albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus # extends from 1 to 64 (new). # TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical # plane. Use -999 for a sun-tracking surface. # WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270 # deg. for a surface facing West. Use -999 for a sun-tracking surface. #Sil check if this should be -1 or 1. # Card 10d: # RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card # 10d (if IALBDG = -1); usually between 0.05 and 0.90. ## Card 11: Spectral range for all Calculations #Min wavelength #Max wavelength #Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth # distance; e.g., SUNCOR = 1.024. # SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January # and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated # from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on # Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if # the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any # other number between 0.966 and 1.034 to correct it for distance if so desired. #Solar constant ## Card 12: Output results selection: # IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are # output (to File 16) if IPRT = 0. Spectral results are added to File 16, # and Card 12a is read, if IPRT = 1. Spectral results are rather printed to # File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral # results are printed to both File 16 and 17 if IPRT = 3. Cards # 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT). # Card 12a: Min, Max and Step wavelength (nm) (Output can be different than # calculation... # Card 12b: Total number of output variables: #IOTOT = XXX #This is determined with the input of this function # Card 12c: Variables to output selection #(space separated numbers 1-43 according to the table below: #IOUT = '30 31' ## Card 13: Circumsolar Calculation # ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when # simulating any type of radiometer (spectral or broadband) equipped with a collimator. # ICIRC = 0 bypasses these calculations. # ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator # must then defined on Card 13a. #Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT ## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor. # The smoothed results are output on a spreadsheet-ready file, File 18 (``smarts295.scn.txt``). This postprocessor is # activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1. # Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM ## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR) # calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0. # With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda # curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are # done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note # that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations # are done between at least 360 and 830 nm. # Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance # calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280 # and 4000, respectively. ## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation, # IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and # different action weighted irradiances of interest in photobiology. # Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280 # and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of # the IPRT, WPMN, and WPMX values. ## Card 17: # Option for solar position and air mass calculations. Set IMASS to: # 0, if inputs are to be ZENIT, AZIM on Card 17a # 1, if inputs are to be ELEV, AZIM on Card 17a # 2, if input is to be AMASS on Card 17a # 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a # 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation). # Card 17a: IMASS = 0 Zenith and azimuth # Card 17a: IMASS = 1 Elevation and Azimuth # Card 17a: IMASS = 2 Input air mass directly # Card 17a: IMASS = 3 Input date, time and coordinates # Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP This function calculates the spectra with inputs available on the Solar Radiation Research Laboratory (SRRL). Data accessible by API or website on: https://midcdmz.nrel.gov/ Main Datasets: SRRL Baseline Measuremnet System https://midcdmz.nrel.gov/apps/sitehome.pl?site=BMS SRRL AOD SkyNet Level 1.1 http://midc.nrel.gov/apps/sitehome.pl?site=AODSRRL SRRL GPS-based PWV http://midc.nrel.gov/apps/sitehome.pl?site=PWVSRRL Parameters ---------- YEAR : string Year MONTH : string Month DAY : string Day HOUR : string Hour, in 24 hour format. LATIT : string Latitude of the location. LONGIT : string Longitude of the location. ALTIT : string elevation of the ground surface above sea level [km]. WARNING: Please note that TMY3 data is in meters, convert before using this function. ZONE : string Timezone W : string Precipitable water above the site altitude, in units of cm or equivalently g/cm2/ This is, for example, SRRL_PWD['Precipitable Water [mm]']/10 Remember to input the correct units -- SRRL database is [mm] and this function expects [cm]. RH : string Relative Humidity. This is, for example, SRRL_BMS['Tower RH [%]'] TAIR : string Temperature. This is, for example, SRRL_BMS['Tower Dry Bulb Temp [deg C]'] SEASON : string Season, either 'WINTER' or 'SUMMER'. If Spring, use 'SUMMER'. If Autumn, use 'WINTER'. TDAY : string Average of the day's temperature. HEIGHT : string Altitude of the simulated object over the surface, in km. Usually 0. SPR : string Site pressure, in mbars. This is, for example, SRRL_BMS['Station Pressure [mBar]'] BETA : string Ångström’s turbidity coefficient, ß (i.e., aerosol optical depth at 1000 nm) If BETA and TAU5 are used as inputs, BETA is selected as priority since TAU5 would be used to calcualte an internal SMARTS BETA value. This is, for example, SRRL_AOD_SkyNet1['Beta'] TAU5 : string Aerosol optical depth at 500 nm, τ5. If BETA and TAU5 are used as inputs, BETA is selected as priority since TAU5 would be used to calcualte an internal SMARTS BETA value. This is, for example, SRRL_AOD_SkyNet1['AOD [500nm]'] TILT : string Tilt angel of the receiving surface (0 to 90 decimal deg.), e.g. '90.0' for a vertical plane. Use '-999' for a sun-tracking surface. WAZIM : string Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270 deg. for a surface facing West. Use -999 for a sun-tracking surface. RHOG : string Local broadband Lambertian foreground albedo (for tilted plane calculations), usually between 0.05 and 0.90. This is, for example, SRRL_BMS['Albedo (CMP11)'] material : string Unique identifier for ground cover. Pass None to retrieve a list of all valid materials. WLMN : string Minimum wavelength to retreive, e.g. '280.0' WLMX : string Maximum wavelength to retreive, e.g. '4000' Returns ------- data : pandas Matrix with first column representing wavelength (in nm) and second column representing albedo of specified material at the wavelength ## Card 1: Comment. 64 characters max. In theory no spaces but yes underscores. ## Card 2: ISPR is an option for site's pressure. # ISPR = 0 to input SPR on Card 2a # ISPR = 1 to input SPR, ALTIT and HEIGHT on Card 2a # ISPR = 2 to input LATIT, ALTIT and HEIGHT on Card 2a. # Card 2a (if ISPR = 0): SPR #mbar # Card 2a (if ISPR = 1): SPR, ALTIT, HEIGHT # SPR: Surface pressure (mb). # ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be # <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it. # HEIGHT: Height of the simulated object above the ground surface underneath (km); must be # <= 100 km (new input). # The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and # must be <= 100 km. # Card 2a (if ISPR = 2): LATIT, ALTIT, HEIGHT # LATIT: Site's latitude (decimal degrees, positive North, negative South); e.g., -17.533 for # Papeete, Tahiti. If LATIT is unknown, enter 45.0. # ALTIT: Site's altitude, i.e., elevation of the ground surface above sea level (km); must be # <= 100 km. In case of a flying object, ALTIT refers to the ground surface below it. # HEIGHT: Height of the simulated object above the ground surface underneath (km); must be # <= 100 km (new input). # The total ALTIT + HEIGHT is the altitude of the simulated object above sea level and # must be <= 100 km. #LATIT = LATIT ## Card 3: IATMOS is an option to select the proper default atmosphere # Its value can be either 0 or 1. # Set IATMOS = 0 to define a realistic (i.e., non-reference) atmosphere. Card 3a will then have to # provide TAIR, RH, SEASON, TDAY. # Set IATMOS = 1 to select one of 10 default reference atmospheres (i.e., for ideal conditions). The # shortened name of this atmosphere must be provided by ATMOS on Card 3a. # Card 3a (if IATMOS = 1): ATMOS # ATMOS is the name of the selected reference atmosphere; 4 characters max. This name can # be one of the following: # USSA (U.S. Standard Atmosphere) MLS (Mid-Latitude Summer) # MLW (Mid-Latitude Winter) SAS (Sub-Arctic Summer) # SAW (Sub-Arctic Winter) TRL (Tropical) STS (Sub-Tropical Summer) # STW (Sub-Tropical Winter) AS (Arctic Summer) AW (Arctic Winter) # Card 3a(if IATMOS = 0): TAIR, RH, SEASON, TDAY. # RH: Relative humidity at site level (%). # SEASON: Can be either `WINTER` or `SUMMER`, for calculation of precipitable water and # stratospheric temperature. If the true season is Fall, select WINTER. Select SUMMER if the # true season is Spring. SEASON slightly affects the ozone effective temperature and the # aerosol optical characteristics. # TAIR: Atmospheric temperature at site level (°C). Acceptable range: -120 < TAIR < 50. # TDAY: Average daily temperature at site level (°C). For a flying object (HEIGHT > 0), this # is a reference temperature for various calculations, therefore it is important to provide a # realistic value in this case in particular. Acceptable range: -120 < TDAY < 50. ## Card 4: IH2O is an option to select the correct water vapor data. All water vapor calculations involve # precipitable water, W. The following values of IH2O are possible: # 0, to input W on Card 4a # 1, if W is to be defaulted to a value prescribed by the selected reference atmosphere and the site # altitude (thus if IATMOS = 1 on Card 3). If IATMOS != 1, USSA will be defaulted for this step. # 2, if W is to be calculated by the program from TAIR and RH (thus if IATMOS = 0 on Card 3). This # calculation is only approximate (particularly if HEIGHT > 0) and therefore this option is not # recommended. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 4a: (if IH2O = 0): W is precipitable water above the site altitude # in units of cm, or equivalently, g/cm2; it must be <= 12. ## Card 5: IO3 is an option to select the appropriate ozone abundance input. # IO3 = 0 to input IALT and AbO3 on Card 5a # IO3 = 1 to use a default value for AbO3 according to the reference atmosphere selected by # IATMOS. If IATMOS != 1, USSA will be defaulted for this calculation. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 5a (if IO3 = 0): IALT, AbO3 # IALT is an option to select the appropriate ozone column altitude correction. # IALT = 0 bypasses the altitude correction, so that the value of AbO3 on # Card 5a is used as is. IALT = 1 should be rather used if a vertical # profile correction needs to be applied (in case of an elevated site when # the value of AbO3 is known only at sea level). ## Card 6 IGAS is an option to define the correct conditions for gaseous absorption and atmospheric pollution. # IGAS = 0 if ILOAD on Card 6a is to be read so that extra gaseous absorption calculations # (corresponding to the gas load in the lower troposphere due to pollution or absence thereof) can be # initiated; # IGAS =1 if all gas abundances (except carbon dioxide, ozone and water vapor see Cards 4a, 5a, # and 7) are to be defaulted, using average vertical profiles. # If IATMOS = 0 is selected, then IH2O should be 0 or 2; IO3 and IGAS should be 0. # If IATMOS = 1 is selected, then IH2O, IO3, and IGAS may take any value. All user inputs # have precedence over the defaults. # Card 6a (if IGAS = 0): ILOAD is an option for tropospheric pollution, only used if IGAS = 0. # For ILOAD = 0, Card 6b will be read with the concentrations of 10 pollutants. # ILOAD = 1 selects default PRISTINE ATMOSPHERIC conditions, leading to slightly # reduced abundances of some gases compared to the initial default obtained with the selected # reference atmosphere. # Setting ILOAD to 2-4 will increase the concentration of the 10 pollutants to possibly # represent typical urban conditions: LIGHT POLLUTION (ILOAD = 2), MODERATE # POLLUTION (ILOAD = 3), and SEVERE POLLUTION (ILOAD = 4). # Card 6b (if IGAS = 0 and ILOAD = 0): ApCH2O, ApCH4, ApCO, ApHNO2, # ApHNO3, ApNO, ApNO2, ApNO3, ApO3, ApSO2 # ApCH2O: Formaldehyde volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApCH4: Methane volumetric concentration in the assumed 1-km deep tropospheric pollution # layer (ppmv). # ApCO: Carbon monoxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv), Card 6b. # ApHNO2: Nitrous acid volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApHNO3: Nitric acid volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO: Nitric oxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO2: Nitrogen dioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApNO3: Nitrogen trioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). # ApO3: Ozone volumetric concentration in the assumed 1-km deep tropospheric pollution # layer (ppmv). # ApSO2: Sulfur dioxide volumetric concentration in the assumed 1-km deep tropospheric # pollution layer (ppmv). ## Card 7 qCO2 carbon dioxide columnar volumetric concentration (ppmv). # Card 7a ISPCTR # is an option to select the proper extraterrestrial # spectrum. This option allows to choose one out of ten possible spectral # files (``Spctrm_n.dat``, where n = 0-8 or n = U). # -1 Spctrm_U.dat N/A User User # 0 Spctrm_0.dat N/A Gueymard, 2004 (synthetic) 1366.10 # 1 Spctrm_1.dat N/A Gueymard, unpublished (synthetic) 1367.00 # 2 Spctrm_2.dat cebchkur MODTRAN, Cebula/Chance/Kurucz 1362.12 # 3 Spctrm_3.dat chkur MODTRAN, Chance/Kurucz 1359.75 # 4 Spctrm_4.dat newkur MODTRAN, New Kurucz 1368.00 # 5 Spctrm_5.dat oldkur MODTRAN, Old Kurucz 1373.16 # 6 Spctrm_6.dat thkur MODTRAN, Thuillier/Kurucz 1376.23 # 7 Spctrm_7.dat MODTRAN2 Wehrli/WRC/WMO, 1985 1367.00 # 8 Spctrm_8.dat N/A ASTM E490, 2000 (synthetic) 1366.10 ## Card 8: AEROS selects the aerosol model, with one of the following twelve possible choices: # S&F_RURAL , S&F_URBAN , S&F_MARIT , S&F_TROPO , These four choices # refer respectively to the Rural, Urban, Maritime and Tropospheric aerosol # models (Shettle and Fenn, 1979), which are humidity dependent and common with MODTRAN. # SRA_CONTL , SRA_URBAN , SRA_MARIT , These three choices refer # respectively to the Continental, Urban, and Maritime aerosol models of # the IAMAP preliminary standard atmosphere (IAMAP, 1986). # B&D_C , B&D_C1 , These two choices refer respectively to the Braslau & # Dave aerosol type C and C1, themselves based on Deirmendjian's Haze L model. # DESERT_MIN , DESERT_MAX DESERT_MIN corresponds to background (normal) # conditions in desert areas, whereas DESERT_MAX corresponds to extremely # turbid conditions (sandstorms). # 'USER' Card 8a is then necessary to input user-supplied aerosol information. #'S&F_TROPO' # Card 8a: # if AEROS = USER : ALPHA1, ALPHA2, OMEGL, GG These 4 variables must represent broadband average values only! # ALPHA1: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths < 500 nm # (generally between 0.0 and 2.6). # ALPHA2: Average value of Ångström's wavelength exponent $\alpha$ for wavelengths >= 500 nm # (generally between 0.0 and 2.6). # OMEGL: Aerosol single scattering albedo (generally between 0.6 and 1.0). # GG: Aerosol asymmetry parameter (generally between 0.5 and 0.9). ## Card 9: ITURB is an option to select the correct turbidity data input. The different options are: # 0, to read TAU5 on Card 9a # 1, to read BETA on Card 9a # 2, to read BCHUEP on Card 9a # 3, to read RANGE on Card 9a # 4, to read VISI on Card 9a # 5, to read TAU550 on Card 9a (new option). #Card 9a Turbidity value #if ITURB == 2 #if ITURB == 3 #if ITURB == 4 #if ITURB == 5 ## Card 10: Far Field Albedo for backscattering # Card 10a: # Zonal broadband Lambertian ground albedo (for backscattering calculations); must # be between 0 and 1. # Card 10b: ITILT is an option for tilted surface calculations. #Select ITILT= 0 for no such calculation, #ITILT = 1 to initiate these calculations using information on Card 10c. # Card 10c: # IALBDG is identical to IALBDX (see Card 10) except that it relates to the foreground local # albedo seen by a tilted surface. The list of options is identical to that of IALBDG and thus # extends from 1 to 64 (new). # TILT: Tilt angle of the receiving surface (0 to 90 decimal deg.); e.g. 90.0 for a vertical # plane. Use -999 for a sun-tracking surface. # WAZIM: Surface azimuth (0 to 360 decimal deg.) counted clockwise from North; e.g., 270 # deg. for a surface facing West. Use -999 for a sun-tracking surface. # Card 10d: # RHOG: Local broadband Lambertian foreground albedo (for tilted plane calculations), Card # 10d (if IALBDG = -1); usually between 0.05 and 0.90. ## Card 11: Spectral range for all Calculations #Min wavelength #Max wavelength #Correction factor for irradiance is a correction factor equal to the inverse squared actual radius vector, or true Sun-Earth # distance; e.g., SUNCOR = 1.024. # SUNCOR varies naturally between 0.966 and 1.034, adding 3.4% to the irradiance in January # and reducing it by 3.4% in July. It is calculated by the program if the solar position is calculated # from date & time, i.e., if IMASS = 3 on Card 17, thus overwriting the input SUNCOR value on # Card 11. If solar position is directly input instead (IMASS = 3), SUNCOR should be set to 1.0 if # the average extraterrestrial irradiance (or solar constant, see SOLARC) is to be used, or to any # other number between 0.966 and 1.034 to correct it for distance if so desired. #Solar constant ## Card 12: Output results selection: # IPRT is an option to select the results to be printed on Files 16 and 17. Only broadband results are # output (to File 16) if IPRT = 0. Spectral results are added to File 16, # and Card 12a is read, if IPRT = 1. Spectral results are rather printed to # File 17 (in a spreadsheet-like format) if IPRT = 2. Finally, spectral # results are printed to both File 16 and 17 if IPRT = 3. Cards # 12b and 12c are read if IPRT = 2 or 3 (see IOTOT and IOUT). # Card 12a: Min, Max and Step wavelength (nm) (Output can be different than # calculation... # Card 12b: Total number of output variables:s #IOTOT = XXX #This is determined with the input of this function # Card 12c: Variables to output selection #(space separated numbers 1-43 according to the table below: ## Card 13: Circumsolar Calculation # ICIRC is an option controlling the calculation of circumsolar radiation, which is useful when # simulating any type of radiometer (spectral or broadband) equipped with a collimator. # ICIRC = 0 bypasses these calculations. # ICIRC = 1 indicates that a typical radiometer needs to be simulated. The geometry of its collimator # must then defined on Card 13a. #Card 13a (if ICIRC = 1): SLOPE, APERT, LIMIT ## Card 14 Option for using the scanning/smoothing virtual filter of the postprocessor. # The smoothed results are output on a spreadsheet-ready file, File 18 (smarts295.scn.txt). This postprocessor is # activated if ISCAN = 1, not if ISCAN = 0. Card 14a is read if ISCAN = 1. # Card 14a (if ISCAN = 1): IFILT, WV1, WV2, STEP, FWHM ## Card 15 ILLUM: Option for illuminance, luminous efficacy and photosynthetically active radiation (PAR) # calculations. These calculations take place if ILLUM = -1, 1, -2 or 2, and are bypassed if ILLUM = 0. # With ILLUM = -1 or 1, illuminance calculations are based on the CIE photopic curve (or Vlambda # curve) of 1924, as supplied in File ``VLambda.dat``. With ILLUM = -2 or 2, the same calculations are # done but the revised CIE photopic curve of 1988 is rather used (from File ``VMLambda.dat``). Note # that selecting ILLUM = 1 or -1 will override WLMN and WLMX (see Card 11) so that calculations # are done between at least 360 and 830 nm. # Moreover, if ILLUM = 1 or 2, luminous efficacy calculations are added to the illuminance # calculations. This overrides the values of WLMN and WLMX on Card 11, and replaces them by 280 # and 4000, respectively. ## Card 16: Option for special broadband UV calculations. Select IUV = 0 for no special UV calculation, # IUV = 1 to initiate such calculations. These include UVA, UVB, UV index, and # different action weighted irradiances of interest in photobiology. # Note that IUV = 1 overrides WLMN and WLMX so that calculations are done between at least 280 # and 400 nm. The spectral results are also printed between at least 280 and 400 nm, irrespective of # the IPRT, WPMN, and WPMX values. ## Card 17: # Option for solar position and air mass calculations. Set IMASS to: # 0, if inputs are to be ZENIT, AZIM on Card 17a # 1, if inputs are to be ELEV, AZIM on Card 17a # 2, if input is to be AMASS on Card 17a # 3, if inputs are to be YEAR, MONTH, DAY, HOUR, LATIT, LONGIT, ZONE on Card 17a # 4, if inputs are to be MONTH, LATIT, DSTEP on Card 17a (for a daily calculation). # Card 17a: IMASS = 0 Zenith and azimuth # Card 17a: IMASS = 1 Elevation and Azimuth # Card 17a: IMASS = 2 Input air mass directly # Card 17a: IMASS = 3 Input date, time and coordinates # Card 17a: IMASS = 4 Input Moth, Latitude and DSTEP #data = smartsAll(CMNT, ISPR, SPR, ALTIT, HEIGHT, LATIT, IATMOS, ATMOS, RH, TAIR, SEASON, TDAY, IH2O, W, IO3, IALT, AbO3, IGAS, ILOAD, ApCH2O, ApCH4, ApCO, ApHNO2, ApHNO3, ApNO,ApNO2, ApNO3, ApO3, ApSO2, qCO2, ISPCTR, AEROS, ALPHA1, ALPHA2, OMEGL, GG, ITURB, TAU5, BETA, BCHUEP, RANGE, VISI, TAU550, IALBDX, RHOX, ITILT, IALBDG,TILT, WAZIM, RHOG, WLMN, WLMX, SUNCOR, SOLARC, IPRT, WPMN, WPMX, INTVL, IOUT, ICIRC, SLOPE, APERT, LIMIT, ISCAN, IFILT, WV1, WV2, STEP, FWHM, ILLUM,IUV, IMASS, ZENITH, ELEV, AMASS, YEAR, MONTH, DAY, HOUR, LONGIT, ZONE, DSTEP) # SMARTS Control Function # # Inputs: # All variables are labeled according to the SMARTS 2.9.5 documentation. # NOTICE THAT "IOTOT" is not an input variable of the function since is determined in the function # by sizing the IOUT variable. # Outputs: # data, is a matrix containing the outputs with as many rows as # wavelengths+1 (includes header) and as many columns as IOTOT+1 (column 1 is wavelengths) # ## Init # Check if SMARTSPATH environment variable exists and change working # directory if it does. ## Card 1: Comment. ## Card 2: Site Pressure ##Card 2a: # case '0' #Just input pressure. # case '1' #Input pressure, altitude and height. #case '2' #Input lat, alt and height ## Card 3: Atmosphere model ## Card 3a: #case '0' #Input TAIR, RH, SEASON, TDAY #case '1' #Input reference atmosphere ## Card 4: Water vapor data ## Card 4a #case '0' #case '1' #The subcard 4a is skipped # print("") ## Card 5: Ozone abundance ## Card 5a #case '0' #case '1' #The subcard 5a is skipped and default values are used from selected #reference atmosphere in Card 3. # print("") ## Card 6: Gaseous absorption and atmospheric pollution ## Card 6a: Option for tropospheric pollution # case '0' ## Card 6b: Concentration of Pollutants #case '0' #case '1' #The subcard 6b is skipped and values of PRISTINE #ATMOSPHERIC conditions are assumed # print("") #case {'2', '3', '4'} #The subcard 6b is skipped and value of ILOAD will be used #as LIGHT POLLUTION (ILOAD = 2), MODERATE POLLUTION (ILOAD = 3), #and SEVERE POLLUTION (ILOAD = 4). # print("") #case '1' #The subcard 6a is skipped, and values are for default average #profiles. ## Card 7: CO2 columnar volumetric concentration (ppmv) ## Card 7a: Option of proper extraterrestrial spectrum ## Card 8: Aerosol model selection out of twelve ## Card 8a: If the aerosol model is 'USER' for user supplied information #The subcard 8a is skipped # print("") ## Card 9: Option to select turbidity model ## Card 9a #case '0' #case '1' #case '2' #case '3' #case '4' #case '5' ## Card 10: Select zonal albedo ## Card 10a: Input fix broadband lambertial albedo RHOX # print("") #The subcard 10a is skipped. ## Card 10b: Tilted surface calculation flag ## Card 10c: Tilt surface calculation parameters ##Card 10d: If tilt calculations are performed and zonal albedo of ##foreground. # print("") #The subcard is skipped ## Card 11: Spectral ranges for calculations ## Card 12: Output selection. ## Card 12a: For spectral results (IPRT >= 1) ## Card 12b & Card 12c: # print("") #The subcards 12b and 12c are skipped. # print("") #The subcard 12a is skipped ## Card 13: Circumsolar calculations ## Card 13a: Simulated radiometer parameters # print("") #The subcard 13a is skipped since no circumsolar calculations or #simulated radiometers have been requested. ## Card 14: Scanning/Smoothing virtual filter postprocessor ## Card 14a: Simulated radiometer parameters # print("") #The subcard 14a is skipped since no postprocessing is simulated. ## Card 15: Illuminace, luminous efficacy and photosythetically active radiarion calculations ## Card 16: Special broadband UV calculations ## Card 17: Option for solar position and air mass calculations ## Card 17a: Solar position parameters: #case '0' #Enter Zenith and Azimuth of the sun #case '1' #Enter Elevation and Azimuth of the sun #case '2' #Enter air mass directly #case '3' #Enter date, time and latitude #case '4' #Enter date and time and step in min for a daily calculation. ## Input Finalization ## Run SMARTS 2.9.5 #dump = os.system('smarts295bat.exe') ## Read SMARTS 2.9.5 Output File # print("") # print("") # print("") # print("") # Return to original working directory. | 2.912663 | 3 |
dispytorch/mapreduce/node.py | LIBBLE/LIBBLE-DisPyTorch | 16 | 6614663 | '''
* Copyright (c) 2017 LIBBLE team supervised by Dr. <NAME> at Nanjing University.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. '''
import torch.distributed as dist
class node:
def __init__(self, rank, num_workers, model,
data_loader, num_epochs, criterion,
cuda, bucket_comm, start_epoch=0):
self.rank = rank
self.num_workers = num_workers
assert dist.get_world_size() - 1 == self.num_workers
self.model = model
self.num_params = sum(1 for _ in self.model.parameters())
self.data_loader = data_loader
self.num_batches = len(self.data_loader)
self.num_epochs = num_epochs
self.criterion = criterion(size_average=True)
MB = 1024 * 1024
self.mpi_size = 10 * MB
self.cuda = cuda
self.bucket_comm = bucket_comm
self.num_grads = sum([1 for p in self.model.parameters() if p.requires_grad])
self.start_epoch = start_epoch
| '''
* Copyright (c) 2017 LIBBLE team supervised by Dr. <NAME> at Nanjing University.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. '''
import torch.distributed as dist
class node:
def __init__(self, rank, num_workers, model,
data_loader, num_epochs, criterion,
cuda, bucket_comm, start_epoch=0):
self.rank = rank
self.num_workers = num_workers
assert dist.get_world_size() - 1 == self.num_workers
self.model = model
self.num_params = sum(1 for _ in self.model.parameters())
self.data_loader = data_loader
self.num_batches = len(self.data_loader)
self.num_epochs = num_epochs
self.criterion = criterion(size_average=True)
MB = 1024 * 1024
self.mpi_size = 10 * MB
self.cuda = cuda
self.bucket_comm = bucket_comm
self.num_grads = sum([1 for p in self.model.parameters() if p.requires_grad])
self.start_epoch = start_epoch
| en | 0.867431 | * Copyright (c) 2017 LIBBLE team supervised by Dr. <NAME> at Nanjing University. * All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. | 2.084811 | 2 |
elegant/gui/pose_annotation.py | drew-sinha/elegant | 0 | 6614664 | # This code is licensed under the MIT License (see LICENSE file for details)
import collections
from PyQt5 import Qt
from ris_widget.qwidgets import annotator
from .spline_overlay import spline_outline
from .. import edge_detection
from .. import worm_widths
class PoseAnnotation(annotator.AnnotationField):
ENABLABLE = True
@classmethod
def from_experiment_metadata(cls, metadata, ris_widget, name='pose', age_factor=1):
width_estimator = worm_widths.WidthEstimator.from_experiment_metadata(metadata, age_factor)
return cls(ris_widget, name, width_estimator, metadata['objective'], metadata['optocoupler'])
def __init__(self, ris_widget, name='pose', width_estimator=None, objective=5, optocoupler=1):
"""Annotation field to record worm positions.
Shortcuts:
Note: these shortcuts apply to the centerline or width spline based
on which sub-window was last clicked in.
f / shift-f: increase/decrease overall smoothing factor of the
centerline or width spline
s: perform a smoothing operation on the centerline or width spline
r: reverse the spline direction
escape: start drawing centerline or width spline if none is extant
delete: delete selected centerline or width spline
shift while dragging: "fine control" mode to warp smaller areas.
double-click: append a new endpoint to the centerline
control-z / shift-control-z (command-z / shift-command-z on mac):
undo / redo spline edits.
Parameters:
ris_widget: RisWidget instance
name: name that the annotations will be stored in.
width_estimator: worm_widths.WidthEstimator instance, or None.
objective: magnification (as a float) of the objective used
optocoupler: magnification (as a float) of the optocoupler used
"""
self.ris_widget = ris_widget
self.outline = spline_outline.SplineOutline(ris_widget, Qt.QColor(0, 255, 0, 128))
self.outline.geometry_change_callbacks.append(self.on_geometry_change)
self.undo_stack = collections.deque(maxlen=100)
self.redo_stack = collections.deque(maxlen=100)
self.width_estimator = width_estimator
self.objective = objective
self.optocoupler = optocoupler
super().__init__(name, default=(None, None))
def init_widget(self):
self.widget = Qt.QGroupBox(self.name)
layout = Qt.QVBoxLayout()
self._hbox_spacing = self.widget.style().layoutSpacing(Qt.QSizePolicy.PushButton, Qt.QSizePolicy.PushButton, Qt.Qt.Horizontal)
layout.setSpacing(0)
self.widget.setLayout(layout)
self.show_centerline = Qt.QCheckBox('Center')
self.show_centerline.setChecked(True)
self.show_centerline.toggled.connect(self.show_or_hide_centerline)
self.show_outline = Qt.QCheckBox('Outline')
self.show_outline.setChecked(True)
self.show_outline.toggled.connect(self.show_or_hide_outline)
self._add_row(layout, Qt.QLabel('Show:'), self.show_centerline, self.show_outline)
self.undo_button = Qt.QPushButton('Undo')
self.undo_button.clicked.connect(self.undo)
Qt.QShortcut(Qt.QKeySequence.Undo, self.widget, self.undo, context=Qt.Qt.ApplicationShortcut)
self.redo_button = Qt.QPushButton('Redo')
self.redo_button.clicked.connect(self.redo)
Qt.QShortcut(Qt.QKeySequence.Redo, self.widget, self.redo, context=Qt.Qt.ApplicationShortcut)
self._add_row(layout, self.undo_button, self.redo_button)
self.draw_center_button = Qt.QPushButton('Center')
self.draw_center_button.setCheckable(True)
self.draw_center_button.clicked.connect(self.draw_centerline)
self.draw_width_button = Qt.QPushButton('Widths')
self.draw_width_button.setCheckable(True)
self.draw_width_button.clicked.connect(self.draw_widths)
self._add_row(layout, Qt.QLabel('Draw:'), self.draw_center_button, self.draw_width_button)
self.smooth_center_button = Qt.QPushButton('Center')
self.smooth_center_button.clicked.connect(self.outline.center_spline.smooth)
self.smooth_width_button = Qt.QPushButton('Widths')
self.smooth_width_button.clicked.connect(self.outline.width_spline.smooth)
self._add_row(layout, Qt.QLabel('Smooth:'), self.smooth_center_button, self.smooth_width_button)
self.default_button = Qt.QPushButton('Default')
self.default_button.clicked.connect(self.set_widths_to_default)
self.pca_button = Qt.QPushButton('PCA')
self.pca_button.clicked.connect(self.pca_smooth_widths)
self._add_row(layout, Qt.QLabel('Widths:'), self.default_button, self.pca_button)
self.auto_center_button = Qt.QPushButton('All')
self.auto_center_button.clicked.connect(self.auto_center)
self.auto_widths_button = Qt.QPushButton('Widths')
self.auto_widths_button.clicked.connect(self.auto_widths)
self._add_row(layout, Qt.QLabel('Auto:'), self.auto_center_button, self.auto_widths_button)
self.reverse_button = Qt.QPushButton('Reverse')
self.reverse_button.clicked.connect(self.outline.reverse_spline)
Qt.QShortcut(Qt.Qt.Key_R, self.widget, self.outline.reverse_spline, context=Qt.Qt.ApplicationShortcut)
self.fine_mode = Qt.QCheckBox('Fine')
self.fine_mode.setChecked(False)
self.fine_mode.toggled.connect(self.outline.set_fine_warp)
lock_warp = Qt.QCheckBox('Lock')
lock_warp.setChecked(False)
lock_warp.toggled.connect(self.set_locked)
self._add_row(layout, lock_warp, self.fine_mode, self.reverse_button)
def _add_row(self, layout, *widgets):
hbox = Qt.QHBoxLayout()
hbox.setSpacing(self._hbox_spacing)
layout.addLayout(hbox)
for widget in widgets:
sp = Qt.QSizePolicy(Qt.QSizePolicy.Ignored, Qt.QSizePolicy.Preferred)
widget.setSizePolicy(sp)
hbox.addWidget(widget, stretch=1)
def on_geometry_change(self, tcks):
center_tck, width_tck = tcks
self.show_or_hide_centerline(self.show_centerline.isChecked())
if not (self.outline.center_spline.warping or self.outline.width_spline.warping):
self.undo_stack.append(self.get_annotation()) # put current value on the undo stack
self.redo_stack.clear()
self._enable_buttons()
self.update_annotation((center_tck, width_tck))
def update_widget(self, tcks):
# called when switching pages
if tcks is None:
tcks = None, None
self.undo_stack.clear()
self.redo_stack.clear()
self.outline.geometry = tcks
self._enable_buttons()
def undo(self):
self._undo_redo(self.undo_stack, self.redo_stack)
def redo(self):
self._undo_redo(self.redo_stack, self.undo_stack)
def _undo_redo(self, from_stack, to_stack):
if len(from_stack) > 0:
to_stack.append(self.get_annotation())
new_state = from_stack.pop()
self.outline.geometry = new_state
self._enable_buttons()
self.update_annotation(new_state)
def _enable_buttons(self):
center_tck = self.outline.center_spline.geometry
width_tck = self.outline.width_spline.geometry
has_center = center_tck is not None
has_center_and_widths = has_center and width_tck is not None
unlocked = not self.outline.center_spline.locked
self.undo_button.setEnabled(len(self.undo_stack) > 0 and unlocked)
self.redo_button.setEnabled(len(self.redo_stack) > 0 and unlocked)
self.smooth_center_button.setEnabled(has_center and unlocked)
self.smooth_width_button.setEnabled(has_center_and_widths and unlocked)
self.draw_center_button.setEnabled(unlocked)
self.draw_center_button.setChecked(self.outline.center_spline.drawing)
self.draw_width_button.setEnabled(has_center and unlocked)
self.draw_width_button.setChecked(self.outline.width_spline.drawing)
self.default_button.setEnabled(self.width_estimator is not None and has_center and unlocked)
self.pca_button.setEnabled(self.width_estimator is not None and has_center_and_widths and unlocked)
self.reverse_button.setEnabled(has_center and unlocked)
self.auto_center_button.setEnabled(has_center and unlocked)
self.auto_widths_button.setEnabled(has_center and unlocked)
self.fine_mode.setEnabled(unlocked)
def set_locked(self, locked):
self.outline.set_locked(locked)
self._enable_buttons()
def _change_geometry(self, center_tck=None, width_tck=None):
"""Cause a geometry change programmatically. This function takes care
of updating the GUI and the annotation, and adding the new geometry to
the undo stack."""
if center_tck is None:
center_tck = self.outline.center_spline.geometry
if width_tck is None:
width_tck = self.outline.width_spline.geometry
self.outline.geometry = center_tck, width_tck
# now tell the outline to let all listeners (including us) know that
# the geometry has changed. This will lead to the annotation and undo
# stack getting properly updated via our on_geometry_change()
self.outline._geometry_changed()
def get_default_widths(self):
if self.width_estimator is None:
return None
else:
return self.width_estimator.width_tck_for_age(self.page.annotations.get('age'))
def set_widths_to_default(self):
self._change_geometry(width_tck=self.get_default_widths())
def _pca_smooth_widths(self, width_tck):
if self.width_estimator is None:
return None
mean_widths = self.width_estimator.width_profile_for_age(self.page.annotations.get('age'))
return self.width_estimator.pca_smooth_widths(width_tck, mean_widths)
def pca_smooth_widths(self):
width_tck = self._pca_smooth_widths(self.outline.width_spline.geometry)
if width_tck is not None:
self._change_geometry(width_tck=width_tck)
def _fit_to_image(self):
width_tck = self.outline.width_spline.geometry
if width_tck is None:
width_tck = self.get_default_widths()
center_tck, width_tck = edge_detection.detect_edges(
image=self.ris_widget.image.data, center_tck=self.outline.center_spline.geometry,
width_tck=width_tck, objective=self.objective, optocoupler=self.optocoupler)
smooth_width_tck = self._pca_smooth_widths(width_tck)
if smooth_width_tck is not None:
width_tck = smooth_width_tck
return center_tck, width_tck
def auto_center(self):
center_tck, width_tck = self._fit_to_image()
self._change_geometry(center_tck, width_tck)
def auto_widths(self):
center_tck, width_tck = self._fit_to_image()
self._change_geometry(width_tck=width_tck)
def draw_centerline(self, draw):
center_tck, width_tck = self.get_annotation()
if draw:
if width_tck is None:
width_tck = self.get_default_widths()
self.outline.geometry = None, width_tck
self.outline.center_spline.start_drawing()
else: # draw operation canceled by clicking button again
self.outline.geometry = center_tck, width_tck
self._enable_buttons()
def draw_widths(self, draw):
center_tck, width_tck = self.get_annotation()
if draw:
self.outline.geometry = center_tck, None
self.outline.width_spline.start_drawing()
else: # draw operation canceled by clicking button again
self.outline.geometry = center_tck, width_tck
self._enable_buttons()
def show_or_hide_centerline(self, show):
# 1: For the lab frame of reference:
# if show, then show the centerline.
# if not, then only show if there is *no* centerline set: this way,
# the line will be shown during manual drawing but hid once that line
# is converted to a spline tck.
if show or self.outline.center_spline.geometry is None:
self.outline.center_spline.setPen(self.outline.center_spline.display_pen)
else:
# "hide" by setting transparent pen. This still allows for dragging
# the hidden centerline -- which using its setVisible method prevents.
self.outline.center_spline.setPen(Qt.QPen(Qt.Qt.transparent))
# 2: hide or show midline in worm frame of reference
self.outline.width_spline.midline.setVisible(show and self.outline.center_spline.geometry is not None)
def show_or_hide_outline(self, show):
self.outline.setVisible(show) # in lab frame of reference
self.outline.width_spline.setVisible(show) # in worm frame | # This code is licensed under the MIT License (see LICENSE file for details)
import collections
from PyQt5 import Qt
from ris_widget.qwidgets import annotator
from .spline_overlay import spline_outline
from .. import edge_detection
from .. import worm_widths
class PoseAnnotation(annotator.AnnotationField):
ENABLABLE = True
@classmethod
def from_experiment_metadata(cls, metadata, ris_widget, name='pose', age_factor=1):
width_estimator = worm_widths.WidthEstimator.from_experiment_metadata(metadata, age_factor)
return cls(ris_widget, name, width_estimator, metadata['objective'], metadata['optocoupler'])
def __init__(self, ris_widget, name='pose', width_estimator=None, objective=5, optocoupler=1):
"""Annotation field to record worm positions.
Shortcuts:
Note: these shortcuts apply to the centerline or width spline based
on which sub-window was last clicked in.
f / shift-f: increase/decrease overall smoothing factor of the
centerline or width spline
s: perform a smoothing operation on the centerline or width spline
r: reverse the spline direction
escape: start drawing centerline or width spline if none is extant
delete: delete selected centerline or width spline
shift while dragging: "fine control" mode to warp smaller areas.
double-click: append a new endpoint to the centerline
control-z / shift-control-z (command-z / shift-command-z on mac):
undo / redo spline edits.
Parameters:
ris_widget: RisWidget instance
name: name that the annotations will be stored in.
width_estimator: worm_widths.WidthEstimator instance, or None.
objective: magnification (as a float) of the objective used
optocoupler: magnification (as a float) of the optocoupler used
"""
self.ris_widget = ris_widget
self.outline = spline_outline.SplineOutline(ris_widget, Qt.QColor(0, 255, 0, 128))
self.outline.geometry_change_callbacks.append(self.on_geometry_change)
self.undo_stack = collections.deque(maxlen=100)
self.redo_stack = collections.deque(maxlen=100)
self.width_estimator = width_estimator
self.objective = objective
self.optocoupler = optocoupler
super().__init__(name, default=(None, None))
def init_widget(self):
self.widget = Qt.QGroupBox(self.name)
layout = Qt.QVBoxLayout()
self._hbox_spacing = self.widget.style().layoutSpacing(Qt.QSizePolicy.PushButton, Qt.QSizePolicy.PushButton, Qt.Qt.Horizontal)
layout.setSpacing(0)
self.widget.setLayout(layout)
self.show_centerline = Qt.QCheckBox('Center')
self.show_centerline.setChecked(True)
self.show_centerline.toggled.connect(self.show_or_hide_centerline)
self.show_outline = Qt.QCheckBox('Outline')
self.show_outline.setChecked(True)
self.show_outline.toggled.connect(self.show_or_hide_outline)
self._add_row(layout, Qt.QLabel('Show:'), self.show_centerline, self.show_outline)
self.undo_button = Qt.QPushButton('Undo')
self.undo_button.clicked.connect(self.undo)
Qt.QShortcut(Qt.QKeySequence.Undo, self.widget, self.undo, context=Qt.Qt.ApplicationShortcut)
self.redo_button = Qt.QPushButton('Redo')
self.redo_button.clicked.connect(self.redo)
Qt.QShortcut(Qt.QKeySequence.Redo, self.widget, self.redo, context=Qt.Qt.ApplicationShortcut)
self._add_row(layout, self.undo_button, self.redo_button)
self.draw_center_button = Qt.QPushButton('Center')
self.draw_center_button.setCheckable(True)
self.draw_center_button.clicked.connect(self.draw_centerline)
self.draw_width_button = Qt.QPushButton('Widths')
self.draw_width_button.setCheckable(True)
self.draw_width_button.clicked.connect(self.draw_widths)
self._add_row(layout, Qt.QLabel('Draw:'), self.draw_center_button, self.draw_width_button)
self.smooth_center_button = Qt.QPushButton('Center')
self.smooth_center_button.clicked.connect(self.outline.center_spline.smooth)
self.smooth_width_button = Qt.QPushButton('Widths')
self.smooth_width_button.clicked.connect(self.outline.width_spline.smooth)
self._add_row(layout, Qt.QLabel('Smooth:'), self.smooth_center_button, self.smooth_width_button)
self.default_button = Qt.QPushButton('Default')
self.default_button.clicked.connect(self.set_widths_to_default)
self.pca_button = Qt.QPushButton('PCA')
self.pca_button.clicked.connect(self.pca_smooth_widths)
self._add_row(layout, Qt.QLabel('Widths:'), self.default_button, self.pca_button)
self.auto_center_button = Qt.QPushButton('All')
self.auto_center_button.clicked.connect(self.auto_center)
self.auto_widths_button = Qt.QPushButton('Widths')
self.auto_widths_button.clicked.connect(self.auto_widths)
self._add_row(layout, Qt.QLabel('Auto:'), self.auto_center_button, self.auto_widths_button)
self.reverse_button = Qt.QPushButton('Reverse')
self.reverse_button.clicked.connect(self.outline.reverse_spline)
Qt.QShortcut(Qt.Qt.Key_R, self.widget, self.outline.reverse_spline, context=Qt.Qt.ApplicationShortcut)
self.fine_mode = Qt.QCheckBox('Fine')
self.fine_mode.setChecked(False)
self.fine_mode.toggled.connect(self.outline.set_fine_warp)
lock_warp = Qt.QCheckBox('Lock')
lock_warp.setChecked(False)
lock_warp.toggled.connect(self.set_locked)
self._add_row(layout, lock_warp, self.fine_mode, self.reverse_button)
def _add_row(self, layout, *widgets):
hbox = Qt.QHBoxLayout()
hbox.setSpacing(self._hbox_spacing)
layout.addLayout(hbox)
for widget in widgets:
sp = Qt.QSizePolicy(Qt.QSizePolicy.Ignored, Qt.QSizePolicy.Preferred)
widget.setSizePolicy(sp)
hbox.addWidget(widget, stretch=1)
def on_geometry_change(self, tcks):
center_tck, width_tck = tcks
self.show_or_hide_centerline(self.show_centerline.isChecked())
if not (self.outline.center_spline.warping or self.outline.width_spline.warping):
self.undo_stack.append(self.get_annotation()) # put current value on the undo stack
self.redo_stack.clear()
self._enable_buttons()
self.update_annotation((center_tck, width_tck))
def update_widget(self, tcks):
# called when switching pages
if tcks is None:
tcks = None, None
self.undo_stack.clear()
self.redo_stack.clear()
self.outline.geometry = tcks
self._enable_buttons()
def undo(self):
self._undo_redo(self.undo_stack, self.redo_stack)
def redo(self):
self._undo_redo(self.redo_stack, self.undo_stack)
def _undo_redo(self, from_stack, to_stack):
if len(from_stack) > 0:
to_stack.append(self.get_annotation())
new_state = from_stack.pop()
self.outline.geometry = new_state
self._enable_buttons()
self.update_annotation(new_state)
def _enable_buttons(self):
center_tck = self.outline.center_spline.geometry
width_tck = self.outline.width_spline.geometry
has_center = center_tck is not None
has_center_and_widths = has_center and width_tck is not None
unlocked = not self.outline.center_spline.locked
self.undo_button.setEnabled(len(self.undo_stack) > 0 and unlocked)
self.redo_button.setEnabled(len(self.redo_stack) > 0 and unlocked)
self.smooth_center_button.setEnabled(has_center and unlocked)
self.smooth_width_button.setEnabled(has_center_and_widths and unlocked)
self.draw_center_button.setEnabled(unlocked)
self.draw_center_button.setChecked(self.outline.center_spline.drawing)
self.draw_width_button.setEnabled(has_center and unlocked)
self.draw_width_button.setChecked(self.outline.width_spline.drawing)
self.default_button.setEnabled(self.width_estimator is not None and has_center and unlocked)
self.pca_button.setEnabled(self.width_estimator is not None and has_center_and_widths and unlocked)
self.reverse_button.setEnabled(has_center and unlocked)
self.auto_center_button.setEnabled(has_center and unlocked)
self.auto_widths_button.setEnabled(has_center and unlocked)
self.fine_mode.setEnabled(unlocked)
def set_locked(self, locked):
self.outline.set_locked(locked)
self._enable_buttons()
def _change_geometry(self, center_tck=None, width_tck=None):
"""Cause a geometry change programmatically. This function takes care
of updating the GUI and the annotation, and adding the new geometry to
the undo stack."""
if center_tck is None:
center_tck = self.outline.center_spline.geometry
if width_tck is None:
width_tck = self.outline.width_spline.geometry
self.outline.geometry = center_tck, width_tck
# now tell the outline to let all listeners (including us) know that
# the geometry has changed. This will lead to the annotation and undo
# stack getting properly updated via our on_geometry_change()
self.outline._geometry_changed()
def get_default_widths(self):
if self.width_estimator is None:
return None
else:
return self.width_estimator.width_tck_for_age(self.page.annotations.get('age'))
def set_widths_to_default(self):
self._change_geometry(width_tck=self.get_default_widths())
def _pca_smooth_widths(self, width_tck):
if self.width_estimator is None:
return None
mean_widths = self.width_estimator.width_profile_for_age(self.page.annotations.get('age'))
return self.width_estimator.pca_smooth_widths(width_tck, mean_widths)
def pca_smooth_widths(self):
width_tck = self._pca_smooth_widths(self.outline.width_spline.geometry)
if width_tck is not None:
self._change_geometry(width_tck=width_tck)
def _fit_to_image(self):
width_tck = self.outline.width_spline.geometry
if width_tck is None:
width_tck = self.get_default_widths()
center_tck, width_tck = edge_detection.detect_edges(
image=self.ris_widget.image.data, center_tck=self.outline.center_spline.geometry,
width_tck=width_tck, objective=self.objective, optocoupler=self.optocoupler)
smooth_width_tck = self._pca_smooth_widths(width_tck)
if smooth_width_tck is not None:
width_tck = smooth_width_tck
return center_tck, width_tck
def auto_center(self):
center_tck, width_tck = self._fit_to_image()
self._change_geometry(center_tck, width_tck)
def auto_widths(self):
center_tck, width_tck = self._fit_to_image()
self._change_geometry(width_tck=width_tck)
def draw_centerline(self, draw):
center_tck, width_tck = self.get_annotation()
if draw:
if width_tck is None:
width_tck = self.get_default_widths()
self.outline.geometry = None, width_tck
self.outline.center_spline.start_drawing()
else: # draw operation canceled by clicking button again
self.outline.geometry = center_tck, width_tck
self._enable_buttons()
def draw_widths(self, draw):
center_tck, width_tck = self.get_annotation()
if draw:
self.outline.geometry = center_tck, None
self.outline.width_spline.start_drawing()
else: # draw operation canceled by clicking button again
self.outline.geometry = center_tck, width_tck
self._enable_buttons()
def show_or_hide_centerline(self, show):
# 1: For the lab frame of reference:
# if show, then show the centerline.
# if not, then only show if there is *no* centerline set: this way,
# the line will be shown during manual drawing but hid once that line
# is converted to a spline tck.
if show or self.outline.center_spline.geometry is None:
self.outline.center_spline.setPen(self.outline.center_spline.display_pen)
else:
# "hide" by setting transparent pen. This still allows for dragging
# the hidden centerline -- which using its setVisible method prevents.
self.outline.center_spline.setPen(Qt.QPen(Qt.Qt.transparent))
# 2: hide or show midline in worm frame of reference
self.outline.width_spline.midline.setVisible(show and self.outline.center_spline.geometry is not None)
def show_or_hide_outline(self, show):
self.outline.setVisible(show) # in lab frame of reference
self.outline.width_spline.setVisible(show) # in worm frame | en | 0.825096 | # This code is licensed under the MIT License (see LICENSE file for details) Annotation field to record worm positions. Shortcuts: Note: these shortcuts apply to the centerline or width spline based on which sub-window was last clicked in. f / shift-f: increase/decrease overall smoothing factor of the centerline or width spline s: perform a smoothing operation on the centerline or width spline r: reverse the spline direction escape: start drawing centerline or width spline if none is extant delete: delete selected centerline or width spline shift while dragging: "fine control" mode to warp smaller areas. double-click: append a new endpoint to the centerline control-z / shift-control-z (command-z / shift-command-z on mac): undo / redo spline edits. Parameters: ris_widget: RisWidget instance name: name that the annotations will be stored in. width_estimator: worm_widths.WidthEstimator instance, or None. objective: magnification (as a float) of the objective used optocoupler: magnification (as a float) of the optocoupler used # put current value on the undo stack # called when switching pages Cause a geometry change programmatically. This function takes care of updating the GUI and the annotation, and adding the new geometry to the undo stack. # now tell the outline to let all listeners (including us) know that # the geometry has changed. This will lead to the annotation and undo # stack getting properly updated via our on_geometry_change() # draw operation canceled by clicking button again # draw operation canceled by clicking button again # 1: For the lab frame of reference: # if show, then show the centerline. # if not, then only show if there is *no* centerline set: this way, # the line will be shown during manual drawing but hid once that line # is converted to a spline tck. # "hide" by setting transparent pen. This still allows for dragging # the hidden centerline -- which using its setVisible method prevents. # 2: hide or show midline in worm frame of reference # in lab frame of reference # in worm frame | 2.282204 | 2 |
hupwatch/args_parser.py | swistakm/hupwatch | 8 | 6614665 | # -*- coding: utf-8 -*-
import argparse
import sys
import logging
logger = logging.getLogger(__name__)
class CustomFormatter(argparse.HelpFormatter):
def __init__(self, prog):
# default max_help_position increased for readability
super(CustomFormatter, self).__init__(prog, max_help_position=50)
def add_usage(self, usage, actions, groups, prefix=None):
""" Hack add_usage to add fake "-- command [arguments]" to the usage
"""
actions.append(argparse._StoreAction(
option_strings=[],
dest="-- command [arguments]"
))
return super(CustomFormatter, self).add_usage(
usage, actions, groups, prefix
)
def get_parser():
""" Create hupwatch argument parser with a set of reasonable defaults
:return: argument parser
"""
parser = argparse.ArgumentParser(
"hupwatch",
description="Graceful reloader for services",
formatter_class=CustomFormatter,
)
parser.add_argument(
"-v", "--verbose",
action="count",
help="enable logging to stdout (use multiple times to increase verbosity)", # noqa
)
parser.add_argument(
'-w', '--warmup-time',
metavar='SEC',
type=float,
# note: there is small amount of warmup time by default because
# it is necessary in order to find if process actually started
# in case of obvious issues like syntax errors so hupwatch
# can abort the reload
default=1,
help="Time for warmup of new service before attempting to shutdown the old one", # noqa
)
parser.add_argument(
'-k', '--kill-at-exit',
action="store_true",
help="Kill the child process when HUP watch exits"
)
return parser
def parse_args():
""" Parse program arguments.
This function ensures that argv arguments after '--' won't be parsed by
`argparse` and will be returned as a separate list.
:return: (args, command) two-tuple
"""
parser = get_parser()
try:
split_point = sys.argv.index('--')
except ValueError:
if "--help" in sys.argv or "-h" in sys.argv or len(sys.argv) == 1:
parser.print_help()
exit(0)
else:
parser.print_usage()
print(parser.prog, ": error: command missing")
exit(1)
else:
argv = sys.argv[1:split_point]
invocation = sys.argv[split_point + 1:]
args = parser.parse_args(argv)
return args, invocation
| # -*- coding: utf-8 -*-
import argparse
import sys
import logging
logger = logging.getLogger(__name__)
class CustomFormatter(argparse.HelpFormatter):
def __init__(self, prog):
# default max_help_position increased for readability
super(CustomFormatter, self).__init__(prog, max_help_position=50)
def add_usage(self, usage, actions, groups, prefix=None):
""" Hack add_usage to add fake "-- command [arguments]" to the usage
"""
actions.append(argparse._StoreAction(
option_strings=[],
dest="-- command [arguments]"
))
return super(CustomFormatter, self).add_usage(
usage, actions, groups, prefix
)
def get_parser():
""" Create hupwatch argument parser with a set of reasonable defaults
:return: argument parser
"""
parser = argparse.ArgumentParser(
"hupwatch",
description="Graceful reloader for services",
formatter_class=CustomFormatter,
)
parser.add_argument(
"-v", "--verbose",
action="count",
help="enable logging to stdout (use multiple times to increase verbosity)", # noqa
)
parser.add_argument(
'-w', '--warmup-time',
metavar='SEC',
type=float,
# note: there is small amount of warmup time by default because
# it is necessary in order to find if process actually started
# in case of obvious issues like syntax errors so hupwatch
# can abort the reload
default=1,
help="Time for warmup of new service before attempting to shutdown the old one", # noqa
)
parser.add_argument(
'-k', '--kill-at-exit',
action="store_true",
help="Kill the child process when HUP watch exits"
)
return parser
def parse_args():
""" Parse program arguments.
This function ensures that argv arguments after '--' won't be parsed by
`argparse` and will be returned as a separate list.
:return: (args, command) two-tuple
"""
parser = get_parser()
try:
split_point = sys.argv.index('--')
except ValueError:
if "--help" in sys.argv or "-h" in sys.argv or len(sys.argv) == 1:
parser.print_help()
exit(0)
else:
parser.print_usage()
print(parser.prog, ": error: command missing")
exit(1)
else:
argv = sys.argv[1:split_point]
invocation = sys.argv[split_point + 1:]
args = parser.parse_args(argv)
return args, invocation
| en | 0.776624 | # -*- coding: utf-8 -*- # default max_help_position increased for readability Hack add_usage to add fake "-- command [arguments]" to the usage Create hupwatch argument parser with a set of reasonable defaults :return: argument parser # noqa # note: there is small amount of warmup time by default because # it is necessary in order to find if process actually started # in case of obvious issues like syntax errors so hupwatch # can abort the reload # noqa Parse program arguments. This function ensures that argv arguments after '--' won't be parsed by `argparse` and will be returned as a separate list. :return: (args, command) two-tuple | 2.452481 | 2 |
src/generative_playground/models/decoder/resnet_rnn.py | ZmeiGorynych/generative_playground | 9 | 6614666 | <gh_stars>1-10
import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn import LayerNorm, functional as F
from generative_playground.data_utils.to_one_hot import to_one_hot
from generative_playground.utils.gpu_utils import to_gpu
class NormGRUStepLayer(nn.Module):
def __init__(self,
hidden_n = 200,
drop_rate = 0.1):
super().__init__()
self.hidden_n = hidden_n
self.gru = nn.GRU(input_size=hidden_n,
hidden_size=hidden_n,
batch_first=True,
num_layers=1)
self.output_shape = [None, 1, hidden_n]
self.layer_norm = LayerNorm(self.output_shape[1:])
self.dropout = nn.Dropout(drop_rate)
self.hidden = None
def forward(self, x, remember_step=True):
out_1, new_hidden = self.gru(x, self.hidden)
if remember_step:
self.hidden = new_hidden
out_2 = self.dropout(out_1)
out_3 = self.layer_norm(out_2 + x)
return out_3
def reset_state(self, batch_size):
self.hidden = self.init_hidden(batch_size)
def init_hidden(self, batch_size):
# NOTE: assume only 1 layer no bi-direction
h1 = Variable(to_gpu(torch.zeros(1, batch_size, self.hidden_n)), requires_grad=False)
return h1
class ResNetRNNDecoder(nn.Module):
# implementation matches model_eq.py _buildDecoder, at least in intent
def __init__(self,
z_size=200,
hidden_n=200,
feature_len=12,
max_seq_length=15, # total max sequence length
steps=1, # how many steps to do at each call
drop_rate=0.0,
num_layers=3,
use_last_action=False):
super().__init__()
self.max_seq_length = max_seq_length
self.steps = steps
if use_last_action:
eff_z_size = z_size + feature_len
else:
eff_z_size = z_size
self.z_size = z_size
self.hidden_n = hidden_n
self.num_layers = num_layers
self.output_feature_size = feature_len
self.use_last_action = use_last_action
# TODO: is the batchNorm applied on the correct dimension?
#self.batch_norm = nn.BatchNorm1d(eff_z_size)
self.fc_input = nn.Linear(eff_z_size, hidden_n)
self.dropout_1 = nn.Dropout(drop_rate)
self.layer_stack = nn.ModuleList([NormGRUStepLayer(hidden_n, drop_rate) for _ in range(num_layers)])
self.fc_out = nn.Linear(hidden_n, feature_len)
self.output_shape = [None, 1, hidden_n] #[None, 1, feature_len]
def encode(self, enc_output, last_action):
if not self.use_last_action:
return enc_output
else:
if last_action is not None and last_action[0] is not None:
# if the above is false, it uses the original value of self.one_hot_action, which is zeros
self.one_hot_action = to_one_hot(last_action,
n_dims=self.output_feature_size,
out=self.one_hot_action)
encoded = torch.cat([enc_output, self.one_hot_action], 1)
return encoded
def forward(self, last_action=None, last_action_pos=None, remember_step=True):
'''
One step of the RNN model
:param enc_output: batch x z_size, so don't support sequences
:param last_action: batch of ints, all equaling None for first step
:param last_action_pos: ignored, used by the attention decoder, here just to get the signature right
:return: batch x steps x feature_len
'''
# check we don't exceed max sequence length
if self.n == self.max_seq_length:
raise StopIteration()
if remember_step:
self.n += self.steps
if self.one_hot_action is None: # first step after reset
# need to do it here as batch size might be different for each sequence
self.one_hot_action = to_gpu(torch.zeros(self.batch_size, self.output_feature_size))
encoded = self.encode(self.enc_output, last_action)
# copy the latent state to length of sequence, instead of sampling inputs
embedded = F.relu(self.fc_input(
#self.batch_norm(encoded)
encoded
)) \
.view(self.batch_size, 1, self.hidden_n)# \
#.repeat(1, self.steps, 1)
out = self.dropout_1(embedded)
# run the GRUs on it
for dec_layer in self.layer_stack:
out = dec_layer(out,remember_step)
# tmp has dim (batch_size*seq_len)xhidden_n, so we can apply the linear transform to it
#tmp = self.dropout_2(out.contiguous().view(-1, self.hidden_n))
tmp = out.contiguous().view(-1, self.hidden_n)
out = self.fc_out(tmp).view(self.batch_size,
1,
self.output_feature_size)
return out
def init_encoder_output(self, z):
'''
Must be called at the start of each new sequence
:param z:
:return:
'''
self.one_hot_action = None
self.enc_output = z
self.batch_size = z.size()[0]
for dec_layer in self.layer_stack:
dec_layer.reset_state(self.batch_size)
self.z_size = z.size()[-1]
self.n = 0 | import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn import LayerNorm, functional as F
from generative_playground.data_utils.to_one_hot import to_one_hot
from generative_playground.utils.gpu_utils import to_gpu
class NormGRUStepLayer(nn.Module):
def __init__(self,
hidden_n = 200,
drop_rate = 0.1):
super().__init__()
self.hidden_n = hidden_n
self.gru = nn.GRU(input_size=hidden_n,
hidden_size=hidden_n,
batch_first=True,
num_layers=1)
self.output_shape = [None, 1, hidden_n]
self.layer_norm = LayerNorm(self.output_shape[1:])
self.dropout = nn.Dropout(drop_rate)
self.hidden = None
def forward(self, x, remember_step=True):
out_1, new_hidden = self.gru(x, self.hidden)
if remember_step:
self.hidden = new_hidden
out_2 = self.dropout(out_1)
out_3 = self.layer_norm(out_2 + x)
return out_3
def reset_state(self, batch_size):
self.hidden = self.init_hidden(batch_size)
def init_hidden(self, batch_size):
# NOTE: assume only 1 layer no bi-direction
h1 = Variable(to_gpu(torch.zeros(1, batch_size, self.hidden_n)), requires_grad=False)
return h1
class ResNetRNNDecoder(nn.Module):
# implementation matches model_eq.py _buildDecoder, at least in intent
def __init__(self,
z_size=200,
hidden_n=200,
feature_len=12,
max_seq_length=15, # total max sequence length
steps=1, # how many steps to do at each call
drop_rate=0.0,
num_layers=3,
use_last_action=False):
super().__init__()
self.max_seq_length = max_seq_length
self.steps = steps
if use_last_action:
eff_z_size = z_size + feature_len
else:
eff_z_size = z_size
self.z_size = z_size
self.hidden_n = hidden_n
self.num_layers = num_layers
self.output_feature_size = feature_len
self.use_last_action = use_last_action
# TODO: is the batchNorm applied on the correct dimension?
#self.batch_norm = nn.BatchNorm1d(eff_z_size)
self.fc_input = nn.Linear(eff_z_size, hidden_n)
self.dropout_1 = nn.Dropout(drop_rate)
self.layer_stack = nn.ModuleList([NormGRUStepLayer(hidden_n, drop_rate) for _ in range(num_layers)])
self.fc_out = nn.Linear(hidden_n, feature_len)
self.output_shape = [None, 1, hidden_n] #[None, 1, feature_len]
def encode(self, enc_output, last_action):
if not self.use_last_action:
return enc_output
else:
if last_action is not None and last_action[0] is not None:
# if the above is false, it uses the original value of self.one_hot_action, which is zeros
self.one_hot_action = to_one_hot(last_action,
n_dims=self.output_feature_size,
out=self.one_hot_action)
encoded = torch.cat([enc_output, self.one_hot_action], 1)
return encoded
def forward(self, last_action=None, last_action_pos=None, remember_step=True):
'''
One step of the RNN model
:param enc_output: batch x z_size, so don't support sequences
:param last_action: batch of ints, all equaling None for first step
:param last_action_pos: ignored, used by the attention decoder, here just to get the signature right
:return: batch x steps x feature_len
'''
# check we don't exceed max sequence length
if self.n == self.max_seq_length:
raise StopIteration()
if remember_step:
self.n += self.steps
if self.one_hot_action is None: # first step after reset
# need to do it here as batch size might be different for each sequence
self.one_hot_action = to_gpu(torch.zeros(self.batch_size, self.output_feature_size))
encoded = self.encode(self.enc_output, last_action)
# copy the latent state to length of sequence, instead of sampling inputs
embedded = F.relu(self.fc_input(
#self.batch_norm(encoded)
encoded
)) \
.view(self.batch_size, 1, self.hidden_n)# \
#.repeat(1, self.steps, 1)
out = self.dropout_1(embedded)
# run the GRUs on it
for dec_layer in self.layer_stack:
out = dec_layer(out,remember_step)
# tmp has dim (batch_size*seq_len)xhidden_n, so we can apply the linear transform to it
#tmp = self.dropout_2(out.contiguous().view(-1, self.hidden_n))
tmp = out.contiguous().view(-1, self.hidden_n)
out = self.fc_out(tmp).view(self.batch_size,
1,
self.output_feature_size)
return out
def init_encoder_output(self, z):
'''
Must be called at the start of each new sequence
:param z:
:return:
'''
self.one_hot_action = None
self.enc_output = z
self.batch_size = z.size()[0]
for dec_layer in self.layer_stack:
dec_layer.reset_state(self.batch_size)
self.z_size = z.size()[-1]
self.n = 0 | en | 0.802899 | # NOTE: assume only 1 layer no bi-direction # implementation matches model_eq.py _buildDecoder, at least in intent # total max sequence length # how many steps to do at each call # TODO: is the batchNorm applied on the correct dimension? #self.batch_norm = nn.BatchNorm1d(eff_z_size) #[None, 1, feature_len] # if the above is false, it uses the original value of self.one_hot_action, which is zeros One step of the RNN model :param enc_output: batch x z_size, so don't support sequences :param last_action: batch of ints, all equaling None for first step :param last_action_pos: ignored, used by the attention decoder, here just to get the signature right :return: batch x steps x feature_len # check we don't exceed max sequence length # first step after reset # need to do it here as batch size might be different for each sequence # copy the latent state to length of sequence, instead of sampling inputs #self.batch_norm(encoded) # \ #.repeat(1, self.steps, 1) # run the GRUs on it # tmp has dim (batch_size*seq_len)xhidden_n, so we can apply the linear transform to it #tmp = self.dropout_2(out.contiguous().view(-1, self.hidden_n)) Must be called at the start of each new sequence :param z: :return: | 2.612597 | 3 |
19_wod/using_pandas.py | frank-gear/tiny_python_projects | 0 | 6614667 | <gh_stars>0
#!/usr/bin/env python3
import pandas as pd
df = pd.read_csv('inputs/exercises.csv')
print(df)
| #!/usr/bin/env python3
import pandas as pd
df = pd.read_csv('inputs/exercises.csv')
print(df) | fr | 0.221828 | #!/usr/bin/env python3 | 2.578818 | 3 |
Registration/optical_flow_tvl1.py | Joevaen/Scikit-image_On_CT | 0 | 6614668 | <reponame>Joevaen/Scikit-image_On_CT
# 粗略的光流量估算器。
#
# TV-L1求解器应用于图像金字塔的每个级别。 TV-L1是Zack等人介绍的一种流行的光流估计算法。 [1],在[2]中进行了改进,并在[3]中进行了详细说明。
import numpy as np
from matplotlib import pyplot as plt
from skimage.color import rgb2gray
from skimage.data import stereo_motorcycle, vortex
from skimage.transform import warp
from skimage.registration import optical_flow_tvl1, optical_flow_ilk
# --- Load the sequence
image0, image1, disp = stereo_motorcycle()
# --- Convert the images to gray level: color is not supported.
image0 = rgb2gray(image0)
image1 = rgb2gray(image1)
# --- Compute the optical flow
v, u = optical_flow_tvl1(image0, image1)
# --- Use the estimated optical flow for registration
nr, nc = image0.shape
row_coords, col_coords = np.meshgrid(np.arange(nr), np.arange(nc),
indexing='ij')
image1_warp = warp(image1, np.array([row_coords + v, col_coords + u]),
mode='nearest')
# build an RGB image with the unregistered sequence
seq_im = np.zeros((nr, nc, 3))
seq_im[..., 0] = image1
seq_im[..., 1] = image0
seq_im[..., 2] = image0
# build an RGB image with the registered sequence
reg_im = np.zeros((nr, nc, 3))
reg_im[..., 0] = image1_warp
reg_im[..., 1] = image0
reg_im[..., 2] = image0
# build an RGB image with the registered sequence
target_im = np.zeros((nr, nc, 3))
target_im[..., 0] = image0
target_im[..., 1] = image0
target_im[..., 2] = image0
# --- Show the result
fig, (ax0, ax1, ax2) = plt.subplots(3, 1, figsize=(5, 10))
ax0.imshow(seq_im)
ax0.set_title("Unregistered sequence")
ax0.set_axis_off()
ax1.imshow(reg_im)
ax1.set_title("Registered sequence")
ax1.set_axis_off()
ax2.imshow(target_im)
ax2.set_title("Target")
ax2.set_axis_off()
fig.tight_layout()
image0, image1 = vortex()
# --- Compute the optical flow
v, u = optical_flow_ilk(image0, image1, radius=15)
# --- Compute flow magnitude
norm = np.sqrt(u ** 2 + v ** 2)
# --- Display
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4))
# --- Sequence image sample
ax0.imshow(image0, cmap='gray')
ax0.set_title("Sequence image sample")
ax0.set_axis_off()
# --- Quiver plot arguments
nvec = 20 # Number of vectors to be displayed along each image dimension
nl, nc = image0.shape
step = max(nl//nvec, nc//nvec)
y, x = np.mgrid[:nl:step, :nc:step]
u_ = u[::step, ::step]
v_ = v[::step, ::step]
ax1.imshow(norm)
ax1.quiver(x, y, u_, v_, color='r', units='dots',
angles='xy', scale_units='xy', lw=3)
ax1.set_title("Optical flow magnitude and vector field")
ax1.set_axis_off()
fig.tight_layout()
plt.show() | # 粗略的光流量估算器。
#
# TV-L1求解器应用于图像金字塔的每个级别。 TV-L1是Zack等人介绍的一种流行的光流估计算法。 [1],在[2]中进行了改进,并在[3]中进行了详细说明。
import numpy as np
from matplotlib import pyplot as plt
from skimage.color import rgb2gray
from skimage.data import stereo_motorcycle, vortex
from skimage.transform import warp
from skimage.registration import optical_flow_tvl1, optical_flow_ilk
# --- Load the sequence
image0, image1, disp = stereo_motorcycle()
# --- Convert the images to gray level: color is not supported.
image0 = rgb2gray(image0)
image1 = rgb2gray(image1)
# --- Compute the optical flow
v, u = optical_flow_tvl1(image0, image1)
# --- Use the estimated optical flow for registration
nr, nc = image0.shape
row_coords, col_coords = np.meshgrid(np.arange(nr), np.arange(nc),
indexing='ij')
image1_warp = warp(image1, np.array([row_coords + v, col_coords + u]),
mode='nearest')
# build an RGB image with the unregistered sequence
seq_im = np.zeros((nr, nc, 3))
seq_im[..., 0] = image1
seq_im[..., 1] = image0
seq_im[..., 2] = image0
# build an RGB image with the registered sequence
reg_im = np.zeros((nr, nc, 3))
reg_im[..., 0] = image1_warp
reg_im[..., 1] = image0
reg_im[..., 2] = image0
# build an RGB image with the registered sequence
target_im = np.zeros((nr, nc, 3))
target_im[..., 0] = image0
target_im[..., 1] = image0
target_im[..., 2] = image0
# --- Show the result
fig, (ax0, ax1, ax2) = plt.subplots(3, 1, figsize=(5, 10))
ax0.imshow(seq_im)
ax0.set_title("Unregistered sequence")
ax0.set_axis_off()
ax1.imshow(reg_im)
ax1.set_title("Registered sequence")
ax1.set_axis_off()
ax2.imshow(target_im)
ax2.set_title("Target")
ax2.set_axis_off()
fig.tight_layout()
image0, image1 = vortex()
# --- Compute the optical flow
v, u = optical_flow_ilk(image0, image1, radius=15)
# --- Compute flow magnitude
norm = np.sqrt(u ** 2 + v ** 2)
# --- Display
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4))
# --- Sequence image sample
ax0.imshow(image0, cmap='gray')
ax0.set_title("Sequence image sample")
ax0.set_axis_off()
# --- Quiver plot arguments
nvec = 20 # Number of vectors to be displayed along each image dimension
nl, nc = image0.shape
step = max(nl//nvec, nc//nvec)
y, x = np.mgrid[:nl:step, :nc:step]
u_ = u[::step, ::step]
v_ = v[::step, ::step]
ax1.imshow(norm)
ax1.quiver(x, y, u_, v_, color='r', units='dots',
angles='xy', scale_units='xy', lw=3)
ax1.set_title("Optical flow magnitude and vector field")
ax1.set_axis_off()
fig.tight_layout()
plt.show() | en | 0.602946 | # 粗略的光流量估算器。 # # TV-L1求解器应用于图像金字塔的每个级别。 TV-L1是Zack等人介绍的一种流行的光流估计算法。 [1],在[2]中进行了改进,并在[3]中进行了详细说明。 # --- Load the sequence # --- Convert the images to gray level: color is not supported. # --- Compute the optical flow # --- Use the estimated optical flow for registration # build an RGB image with the unregistered sequence # build an RGB image with the registered sequence # build an RGB image with the registered sequence # --- Show the result # --- Compute the optical flow # --- Compute flow magnitude # --- Display # --- Sequence image sample # --- Quiver plot arguments # Number of vectors to be displayed along each image dimension | 2.623273 | 3 |
mro/__init__.py | Dark-Bob/mro | 1 | 6614669 | <reponame>Dark-Bob/mro<filename>mro/__init__.py
import mro.connection
import mro.data_types
import mro.table
import mro.sqlite
import mro.custom_types
import mro.routine
def disconnect():
mro.connection.disconnect()
def load_database(connection_function, hooks=None):
print("***********INITIALISING DATABASE************")
mro.connection.set_connection_function(connection_function)
mro.connection.set_on_reconnect(init_db)
mro.connection.set_hooks(hooks)
connection = mro.connection.connection
init_db(connection)
if hooks is not None:
for hook in hooks:
hook()
def init_db(connection):
if connection.__class__.__module__ == 'sqlite3':
tables = sqlite._load_sqllite_db(connection)
else:
tables = _load_standard_db(connection)
_create_classes(tables)
mro.routine._create_routines(connection)
def execute_sql(sql, values=None):
return mro.table.table._execute_sql(sql, values)
def _load_standard_db(connection):
print('Loading standard db')
cursor = connection.cursor()
tables = {}
# Create any custom types
print('Creating custom types')
mro.custom_types.create_custom_types(connection)
# Get tables
print('Getting tables')
cursor.execute("select * from information_schema.tables where table_schema='public';")
connection.commit()
for table in cursor:
table_name = table[2]
print(f'Getting info about table [{table_name}]')
cursor2 = connection.cursor()
# Get foreign keys (part 1)
# https://dba.stackexchange.com/a/218969
cursor2.execute(f"""
select
col.attname as fk_column_name
,ftbl.relname as referenced_table_name
,fcol.attname as referenced_column_name
from pg_catalog.pg_constraint con
join lateral unnest(con.conkey) with ordinality as u(attnum, attposition) on true
join pg_class tbl on tbl.oid = con.conrelid
join pg_attribute col on (col.attrelid = tbl.oid and col.attnum = u.attnum)
join lateral unnest(con.confkey) with ordinality as fu(attnum, attposition) on true
join pg_class ftbl on ftbl.oid = con.confrelid
join pg_attribute fcol on (fcol.attrelid = ftbl.oid and fcol.attnum = fu.attnum)
where
con.conrelid = '{table_name}'::regclass
and con.contype = 'f';
""")
connection.commit()
foreign_keys = {}
for foreign_key in cursor2:
foreign_keys[foreign_key[0]] = (foreign_key[1], foreign_key[2])
# Get foreign keys (part 2)
# https://dba.stackexchange.com/a/218969
cursor2.execute(f"""
select
tbl.relname
,col.attname
,fcol.attname
from pg_catalog.pg_constraint con
join lateral unnest(con.conkey) with ordinality as u(attnum, attposition) on true
join pg_class tbl on tbl.oid = con.conrelid
join pg_attribute col on (col.attrelid = tbl.oid and col.attnum = u.attnum)
join lateral unnest(con.confkey) with ordinality as fu(attnum, attposition) on true
join pg_class ftbl on ftbl.oid = con.confrelid
join pg_attribute fcol on (fcol.attrelid = ftbl.oid and fcol.attnum = fu.attnum)
where
con.confrelid = '{table_name}'::regclass
and con.contype = 'f';
""")
connection.commit()
foreign_key_targets = []
for foreign_key in cursor2:
foreign_key_targets.append((foreign_key[0], foreign_key[1], foreign_key[2]))
# Get primary keys
# https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns
cursor2.execute(f"""
select
a.attname
from pg_index i
join pg_attribute a on a.attrelid = i.indrelid and a.attnum = any(i.indkey)
where
i.indrelid = '{table_name}'::regclass
and i.indisprimary;
""")
connection.commit()
primary_key_columns = [row[0] for row in cursor2]
# Get columns
cursor2.execute(f"""
select
column_name
,data_type
,udt_name
,ordinal_position
,column_default
,is_nullable
,is_updatable
,character_maximum_length
from information_schema.columns
where
table_name='{table_name}';
""")
connection.commit()
columns = []
for column in cursor2:
col_data = {}
column_name = column[0]
postgres_type = column[1]
if postgres_type == 'USER-DEFINED':
postgres_type = column[2]
data_type = mro.data_types.type_map[postgres_type]
col_data['custom_type'] = eval(f'mro.custom_types.{postgres_type}')
else:
data_type = mro.data_types.type_map[postgres_type]
column_index = column[3]-1
column_default = column[4]
is_nullable = column[5] == 'YES'
is_updateable = column[6] == 'YES'
get_value_on_insert = False
is_primary_key = column_name in primary_key_columns
if column_default:
column_default, get_value_on_insert = data_type[2](column_default, postgres_type)
col_data['data_type'] = data_type[0]
col_data['column_name'] = column_name
col_data['column_index'] = column_index
col_data['column_default'] = column_default
col_data['not_null'] = not is_nullable
col_data['is_updateable'] = is_updateable
col_data['get_value_on_insert'] = get_value_on_insert
col_data['is_primary_key'] = is_primary_key
col_data['length'] = column[7]
if column_name in foreign_keys:
foreign_key = foreign_keys[column_name]
col_data['foreign_key'] = foreign_key
columns.append(col_data)
tables[table_name] = {}
tables[table_name]['columns'] = columns
tables[table_name]['foreign_key_targets'] = foreign_key_targets
return tables
def _create_classes(tables):
for table_name, table_data in tables.items():
table_columns = table_data['columns']
foreign_key_targets = table_data['foreign_key_targets']
def create_table_class(name, columns):
def init_function(self, **kwargs):
for column in columns:
self.__dict__[column['column_name']] = column['column_default']
custom_type = column.get('custom_type')
kwarg_for_column = kwargs.get(column['column_name'])
if kwarg_for_column is not None:
if custom_type is not None and type(kwarg_for_column) is not custom_type:
kwargs[column['column_name']] = custom_type(**kwarg_for_column)
for k, v in kwargs.items():
if not hasattr(self, k):
raise ValueError(f"{self.__class__.__name__} does not have an attribute {k}")
self.__dict__[k] = v
if not super(self.__class__, self)._insert.disabled:
obj = super(self.__class__, self).insert(**kwargs)
for c in self.__class__._get_value_on_insert_columns:
self.__dict__[c] = obj.__dict__[c]
def update_function(self, **kwargs):
primary_key_columns = self.__class__._primary_key_columns
primary_key_column_values = [self.__dict__[c] for c in primary_key_columns]
super(self.__class__, self).update(primary_key_columns, primary_key_column_values, **kwargs)
with mro.table.disable_insert():
for k, v in kwargs.items():
self.__dict__[k] = v
return self
attrib_dict = {'__init__': init_function,
'update': update_function}
table_class = type(name, (mro.table.table,), attrib_dict)
return table_class
dynamic_table_class = create_table_class(table_name, table_columns)
for column in table_columns:
kwargs = {"name": column['column_name'],
"column_index": column['column_index'],
"not_null": column['not_null'],
"is_updateable": column['is_updateable'],
"get_value_on_insert": column['get_value_on_insert'],
"is_primary_key": column['is_primary_key']}
if column['data_type'] == 'varchar':
kwargs['length'] = column['length']
if column.get('custom_type') is not None:
kwargs['python_type'] = column['custom_type']
col_value = mro.data_types.__dict__[column['data_type']](**kwargs)
# Add attributes to class
setattr(dynamic_table_class, column['column_name'], col_value)
# Add foreign key attributes to the class
if column.get('foreign_key') is not None:
setattr(dynamic_table_class,
column['column_name'],
mro.foreign_keys.foreign_key_data_type(column['column_name'],
col_value,
f'mro.{column["foreign_key"][0]}',
column["foreign_key"][1]))
for foreign_key_target in foreign_key_targets:
foreign_key_name = f"{foreign_key_target[0]}s"
# if they happen to have a column the same name as the reference list don't add it
if foreign_key_name not in [column['column_name'] for column in table_columns]:
setattr(dynamic_table_class,
foreign_key_name,
mro.foreign_keys.foreign_key_reference(foreign_key_target[2],
f"mro.{foreign_key_target[0]}",
foreign_key_target[1]))
setattr(mro, dynamic_table_class.__name__, dynamic_table_class)
dynamic_table_class._register()
| import mro.connection
import mro.data_types
import mro.table
import mro.sqlite
import mro.custom_types
import mro.routine
def disconnect():
mro.connection.disconnect()
def load_database(connection_function, hooks=None):
print("***********INITIALISING DATABASE************")
mro.connection.set_connection_function(connection_function)
mro.connection.set_on_reconnect(init_db)
mro.connection.set_hooks(hooks)
connection = mro.connection.connection
init_db(connection)
if hooks is not None:
for hook in hooks:
hook()
def init_db(connection):
if connection.__class__.__module__ == 'sqlite3':
tables = sqlite._load_sqllite_db(connection)
else:
tables = _load_standard_db(connection)
_create_classes(tables)
mro.routine._create_routines(connection)
def execute_sql(sql, values=None):
return mro.table.table._execute_sql(sql, values)
def _load_standard_db(connection):
print('Loading standard db')
cursor = connection.cursor()
tables = {}
# Create any custom types
print('Creating custom types')
mro.custom_types.create_custom_types(connection)
# Get tables
print('Getting tables')
cursor.execute("select * from information_schema.tables where table_schema='public';")
connection.commit()
for table in cursor:
table_name = table[2]
print(f'Getting info about table [{table_name}]')
cursor2 = connection.cursor()
# Get foreign keys (part 1)
# https://dba.stackexchange.com/a/218969
cursor2.execute(f"""
select
col.attname as fk_column_name
,ftbl.relname as referenced_table_name
,fcol.attname as referenced_column_name
from pg_catalog.pg_constraint con
join lateral unnest(con.conkey) with ordinality as u(attnum, attposition) on true
join pg_class tbl on tbl.oid = con.conrelid
join pg_attribute col on (col.attrelid = tbl.oid and col.attnum = u.attnum)
join lateral unnest(con.confkey) with ordinality as fu(attnum, attposition) on true
join pg_class ftbl on ftbl.oid = con.confrelid
join pg_attribute fcol on (fcol.attrelid = ftbl.oid and fcol.attnum = fu.attnum)
where
con.conrelid = '{table_name}'::regclass
and con.contype = 'f';
""")
connection.commit()
foreign_keys = {}
for foreign_key in cursor2:
foreign_keys[foreign_key[0]] = (foreign_key[1], foreign_key[2])
# Get foreign keys (part 2)
# https://dba.stackexchange.com/a/218969
cursor2.execute(f"""
select
tbl.relname
,col.attname
,fcol.attname
from pg_catalog.pg_constraint con
join lateral unnest(con.conkey) with ordinality as u(attnum, attposition) on true
join pg_class tbl on tbl.oid = con.conrelid
join pg_attribute col on (col.attrelid = tbl.oid and col.attnum = u.attnum)
join lateral unnest(con.confkey) with ordinality as fu(attnum, attposition) on true
join pg_class ftbl on ftbl.oid = con.confrelid
join pg_attribute fcol on (fcol.attrelid = ftbl.oid and fcol.attnum = fu.attnum)
where
con.confrelid = '{table_name}'::regclass
and con.contype = 'f';
""")
connection.commit()
foreign_key_targets = []
for foreign_key in cursor2:
foreign_key_targets.append((foreign_key[0], foreign_key[1], foreign_key[2]))
# Get primary keys
# https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns
cursor2.execute(f"""
select
a.attname
from pg_index i
join pg_attribute a on a.attrelid = i.indrelid and a.attnum = any(i.indkey)
where
i.indrelid = '{table_name}'::regclass
and i.indisprimary;
""")
connection.commit()
primary_key_columns = [row[0] for row in cursor2]
# Get columns
cursor2.execute(f"""
select
column_name
,data_type
,udt_name
,ordinal_position
,column_default
,is_nullable
,is_updatable
,character_maximum_length
from information_schema.columns
where
table_name='{table_name}';
""")
connection.commit()
columns = []
for column in cursor2:
col_data = {}
column_name = column[0]
postgres_type = column[1]
if postgres_type == 'USER-DEFINED':
postgres_type = column[2]
data_type = mro.data_types.type_map[postgres_type]
col_data['custom_type'] = eval(f'mro.custom_types.{postgres_type}')
else:
data_type = mro.data_types.type_map[postgres_type]
column_index = column[3]-1
column_default = column[4]
is_nullable = column[5] == 'YES'
is_updateable = column[6] == 'YES'
get_value_on_insert = False
is_primary_key = column_name in primary_key_columns
if column_default:
column_default, get_value_on_insert = data_type[2](column_default, postgres_type)
col_data['data_type'] = data_type[0]
col_data['column_name'] = column_name
col_data['column_index'] = column_index
col_data['column_default'] = column_default
col_data['not_null'] = not is_nullable
col_data['is_updateable'] = is_updateable
col_data['get_value_on_insert'] = get_value_on_insert
col_data['is_primary_key'] = is_primary_key
col_data['length'] = column[7]
if column_name in foreign_keys:
foreign_key = foreign_keys[column_name]
col_data['foreign_key'] = foreign_key
columns.append(col_data)
tables[table_name] = {}
tables[table_name]['columns'] = columns
tables[table_name]['foreign_key_targets'] = foreign_key_targets
return tables
def _create_classes(tables):
for table_name, table_data in tables.items():
table_columns = table_data['columns']
foreign_key_targets = table_data['foreign_key_targets']
def create_table_class(name, columns):
def init_function(self, **kwargs):
for column in columns:
self.__dict__[column['column_name']] = column['column_default']
custom_type = column.get('custom_type')
kwarg_for_column = kwargs.get(column['column_name'])
if kwarg_for_column is not None:
if custom_type is not None and type(kwarg_for_column) is not custom_type:
kwargs[column['column_name']] = custom_type(**kwarg_for_column)
for k, v in kwargs.items():
if not hasattr(self, k):
raise ValueError(f"{self.__class__.__name__} does not have an attribute {k}")
self.__dict__[k] = v
if not super(self.__class__, self)._insert.disabled:
obj = super(self.__class__, self).insert(**kwargs)
for c in self.__class__._get_value_on_insert_columns:
self.__dict__[c] = obj.__dict__[c]
def update_function(self, **kwargs):
primary_key_columns = self.__class__._primary_key_columns
primary_key_column_values = [self.__dict__[c] for c in primary_key_columns]
super(self.__class__, self).update(primary_key_columns, primary_key_column_values, **kwargs)
with mro.table.disable_insert():
for k, v in kwargs.items():
self.__dict__[k] = v
return self
attrib_dict = {'__init__': init_function,
'update': update_function}
table_class = type(name, (mro.table.table,), attrib_dict)
return table_class
dynamic_table_class = create_table_class(table_name, table_columns)
for column in table_columns:
kwargs = {"name": column['column_name'],
"column_index": column['column_index'],
"not_null": column['not_null'],
"is_updateable": column['is_updateable'],
"get_value_on_insert": column['get_value_on_insert'],
"is_primary_key": column['is_primary_key']}
if column['data_type'] == 'varchar':
kwargs['length'] = column['length']
if column.get('custom_type') is not None:
kwargs['python_type'] = column['custom_type']
col_value = mro.data_types.__dict__[column['data_type']](**kwargs)
# Add attributes to class
setattr(dynamic_table_class, column['column_name'], col_value)
# Add foreign key attributes to the class
if column.get('foreign_key') is not None:
setattr(dynamic_table_class,
column['column_name'],
mro.foreign_keys.foreign_key_data_type(column['column_name'],
col_value,
f'mro.{column["foreign_key"][0]}',
column["foreign_key"][1]))
for foreign_key_target in foreign_key_targets:
foreign_key_name = f"{foreign_key_target[0]}s"
# if they happen to have a column the same name as the reference list don't add it
if foreign_key_name not in [column['column_name'] for column in table_columns]:
setattr(dynamic_table_class,
foreign_key_name,
mro.foreign_keys.foreign_key_reference(foreign_key_target[2],
f"mro.{foreign_key_target[0]}",
foreign_key_target[1]))
setattr(mro, dynamic_table_class.__name__, dynamic_table_class)
dynamic_table_class._register() | en | 0.56055 | # Create any custom types # Get tables # Get foreign keys (part 1) # https://dba.stackexchange.com/a/218969 select col.attname as fk_column_name ,ftbl.relname as referenced_table_name ,fcol.attname as referenced_column_name from pg_catalog.pg_constraint con join lateral unnest(con.conkey) with ordinality as u(attnum, attposition) on true join pg_class tbl on tbl.oid = con.conrelid join pg_attribute col on (col.attrelid = tbl.oid and col.attnum = u.attnum) join lateral unnest(con.confkey) with ordinality as fu(attnum, attposition) on true join pg_class ftbl on ftbl.oid = con.confrelid join pg_attribute fcol on (fcol.attrelid = ftbl.oid and fcol.attnum = fu.attnum) where con.conrelid = '{table_name}'::regclass and con.contype = 'f'; # Get foreign keys (part 2) # https://dba.stackexchange.com/a/218969 select tbl.relname ,col.attname ,fcol.attname from pg_catalog.pg_constraint con join lateral unnest(con.conkey) with ordinality as u(attnum, attposition) on true join pg_class tbl on tbl.oid = con.conrelid join pg_attribute col on (col.attrelid = tbl.oid and col.attnum = u.attnum) join lateral unnest(con.confkey) with ordinality as fu(attnum, attposition) on true join pg_class ftbl on ftbl.oid = con.confrelid join pg_attribute fcol on (fcol.attrelid = ftbl.oid and fcol.attnum = fu.attnum) where con.confrelid = '{table_name}'::regclass and con.contype = 'f'; # Get primary keys # https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns select a.attname from pg_index i join pg_attribute a on a.attrelid = i.indrelid and a.attnum = any(i.indkey) where i.indrelid = '{table_name}'::regclass and i.indisprimary; # Get columns select column_name ,data_type ,udt_name ,ordinal_position ,column_default ,is_nullable ,is_updatable ,character_maximum_length from information_schema.columns where table_name='{table_name}'; # Add attributes to class # Add foreign key attributes to the class # if they happen to have a column the same name as the reference list don't add it | 2.662413 | 3 |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/lib/capa/capa/tests/test_capa_problem.py | osoco/better-ways-of-thinking-about-software | 3 | 6614670 | """
Test capa problem.
"""
import textwrap
import unittest
import pytest
import ddt
import six
from lxml import etree
from markupsafe import Markup
from mock import patch
from capa.responsetypes import LoncapaProblemError
from capa.tests.helpers import new_loncapa_problem
from openedx.core.djangolib.markup import HTML
@ddt.ddt
class CAPAProblemTest(unittest.TestCase):
""" CAPA problem related tests"""
@ddt.unpack
@ddt.data(
{'question': 'Select the correct synonym of paranoid?'},
{'question': 'Select the correct <em>synonym</em> of <strong>paranoid</strong>?'},
)
def test_label_and_description_inside_responsetype(self, question):
"""
Verify that
* label is extracted
* <label> tag is removed to avoid duplication
This is the case when we have a problem with single question or
problem with multiple-questions separated as per the new format.
"""
xml = """
<problem>
<choiceresponse>
<label>{question}</label>
<description>Only the paranoid survive.</description>
<checkboxgroup>
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""".format(question=question)
problem = new_loncapa_problem(xml)
assert problem.problem_data ==\
{'1_2_1': {'label': question, 'descriptions': {'description_1_1_1': 'Only the paranoid survive.'}}}
assert len(problem.tree.xpath('//label')) == 0
@ddt.unpack
@ddt.data(
{
'question': 'Once we become predictable, we become ______?',
'label_attr': 'Once we become predictable, we become ______?'
},
{
'question': 'Once we become predictable, we become ______?<img src="img/src"/>',
'label_attr': 'Once we become predictable, we become ______?'
},
)
def test_legacy_problem(self, question, label_attr):
"""
Verify that legacy problem is handled correctly.
"""
xml = """
<problem>
<p>Be sure to check your spelling.</p>
<p>{}</p>
<stringresponse answer="vulnerable" type="ci">
<textline label="{}" size="40"/>
</stringresponse>
</problem>
""".format(question, label_attr)
problem = new_loncapa_problem(xml)
assert problem.problem_data == {'1_2_1': {'label': question, 'descriptions': {}}}
assert len(problem.tree.xpath("//*[normalize-space(text())='{}']".format(question))) == 0
@ddt.unpack
@ddt.data(
{
'question1': 'People who say they have nothing to ____ almost always do?',
'question2': 'Select the correct synonym of paranoid?'
},
{
'question1': '<b>People</b> who say they have <mark>nothing</mark> to ____ almost always do?',
'question2': 'Select the <sup>correct</sup> synonym of <mark>paranoid</mark>?'
},
)
def test_neither_label_tag_nor_attribute(self, question1, question2):
"""
Verify that label is extracted correctly.
This is the case when we have a markdown problem with multiple-questions.
In this case when markdown is converted to xml, there will be no label
tag and label attribute inside responsetype. But we have a label tag
before the responsetype.
"""
xml = """
<problem>
<p>Be sure to check your spelling.</p>
<label>{}</label>
<stringresponse answer="hide" type="ci">
<textline size="40"/>
</stringresponse>
<choiceresponse>
<label>{}</label>
<checkboxgroup>
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""".format(question1, question2)
problem = new_loncapa_problem(xml)
assert problem.problem_data ==\
{'1_2_1': {'label': question1, 'descriptions': {}}, '1_3_1': {'label': question2, 'descriptions': {}}}
for question in (question1, question2):
assert len(problem.tree.xpath('//label[text()="{}"]'.format(question))) == 0
def test_multiple_descriptions(self):
"""
Verify that multiple descriptions are handled correctly.
"""
desc1 = "The problem with trying to be the <em>bad guy</em>, there's always someone <strong>worse</strong>."
desc2 = "Anyone who looks the world as if it was a game of chess deserves to lose."
xml = """
<problem>
<p>Be sure to check your spelling.</p>
<stringresponse answer="War" type="ci">
<label>___ requires sacrifices.</label>
<description>{}</description>
<description>{}</description>
<textline size="40"/>
</stringresponse>
</problem>
""".format(desc1, desc2)
problem = new_loncapa_problem(xml)
assert problem.problem_data ==\
{'1_2_1': {'label': '___ requires sacrifices.',
'descriptions': {'description_1_1_1': desc1, 'description_1_1_2': desc2}}}
def test_additional_answer_is_skipped_from_resulting_html(self):
"""Tests that additional_answer element is not present in transformed HTML"""
xml = """
<problem>
<p>Be sure to check your spelling.</p>
<stringresponse answer="War" type="ci">
<label>___ requires sacrifices.</label>
<description>Anyone who looks the world as if it was a game of chess deserves to lose.</description>
<additional_answer answer="optional acceptable variant of the correct answer"/>
<textline size="40"/>
</stringresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
assert len(problem.extracted_tree.xpath('//additional_answer')) == 0
assert 'additional_answer' not in problem.get_html()
def test_non_accessible_inputtype(self):
"""
Verify that tag with question text is not removed when inputtype is not fully accessible.
"""
question = "Click the country which is home to the Pyramids."
# lint-amnesty, pylint: disable=duplicate-string-formatting-argument
xml = """
<problem>
<p>{}</p>
<imageresponse>
<imageinput label="{}"
src="/static/Africa.png" width="600" height="638" rectangle="(338,98)-(412,168)"/>
</imageresponse>
</problem>
""".format(question, question)
problem = new_loncapa_problem(xml)
assert problem.problem_data == {'1_2_1': {'label': question, 'descriptions': {}}}
# <p> tag with question text should not be deleted
assert problem.tree.xpath("string(p[text()='{}'])".format(question)) == question
def test_label_is_empty_if_no_label_attribute(self):
"""
Verify that label in response_data is empty string when label
attribute is missing and responsetype is not fully accessible.
"""
question = "Click the country which is home to the Pyramids."
xml = """
<problem>
<p>{}</p>
<imageresponse>
<imageinput
src="/static/Africa.png" width="600" height="638" rectangle="(338,98)-(412,168)"/>
</imageresponse>
</problem>
""".format(question)
problem = new_loncapa_problem(xml)
assert problem.problem_data == {'1_2_1': {'label': '', 'descriptions': {}}}
def test_multiple_questions_problem(self):
"""
For a problem with multiple questions verify that for each question
* label is extracted
* descriptions info is constructed
* <label> tag is removed to avoid duplication
"""
xml = """
<problem>
<choiceresponse>
<label>Select the correct synonym of paranoid?</label>
<description>Only the paranoid survive.</description>
<checkboxgroup>
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<p>one more question</p>
<label>What Apple device competed with the portable CD player?</label>
<description>Device looks like an egg plant.</description>
<choicegroup type="MultipleChoice">
<choice correct="false">The iPad</choice>
<choice correct="false">Napster</choice>
<choice correct="true">The iPod</choice>
<choice correct="false">The vegetable peeler</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
assert problem.problem_data ==\
{'1_2_1': {'label': 'Select the correct synonym of paranoid?',
'descriptions': {'description_1_1_1': 'Only the paranoid survive.'}},
'1_3_1': {'label': 'What Apple device competed with the portable CD player?',
'descriptions': {'description_1_2_1': 'Device looks like an egg plant.'}}}
assert len(problem.tree.xpath('//label')) == 0
def test_question_title_not_removed_got_children(self):
"""
Verify that <p> question text before responsetype not deleted when
it contains other children and label is picked from label attribute of inputtype
This is the case when author updated the <p> immediately before
responsetype to contain other elements. We do not want to delete information in that case.
"""
question = 'Is egg plant a fruit?'
xml = """
<problem>
<p>Choose wisely.</p>
<p>Select the correct synonym of paranoid?</p>
<p><img src="" /></p>
<choiceresponse>
<checkboxgroup label="{}">
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""".format(question)
problem = new_loncapa_problem(xml)
assert problem.problem_data == {'1_2_1': {'label': '', 'descriptions': {}}}
assert len(problem.tree.xpath('//p/img')) == 1
@ddt.unpack
@ddt.data(
{'group_label': 'Choose the correct color'},
{'group_label': 'Choose the <b>correct</b> <mark>color</mark>'},
)
def test_multiple_inputtypes(self, group_label):
"""
Verify that group label and labels for individual inputtypes are extracted correctly.
"""
input1_label = 'What color is the sky?'
input2_label = 'What color are pine needles?'
xml = """
<problem>
<optionresponse>
<label>{}</label>
<optioninput options="('yellow','blue','green')" correct="blue" label="{}"/>
<optioninput options="('yellow','blue','green')" correct="green" label="{}"/>
</optionresponse>
</problem>
""".format(group_label, input1_label, input2_label)
problem = new_loncapa_problem(xml)
assert problem.problem_data ==\
{'1_2_1': {'group_label': group_label, 'label': input1_label, 'descriptions': {}},
'1_2_2': {'group_label': group_label, 'label': input2_label, 'descriptions': {}}}
def test_single_inputtypes(self):
"""
Verify that HTML is correctly rendered when there is single inputtype.
"""
question = 'Enter sum of 1+2'
xml = textwrap.dedent("""
<problem>
<customresponse cfn="test_sum" expect="3">
<script type="loncapa/python">
def test_sum(expect, ans):
return int(expect) == int(ans)
</script>
<label>{}</label>
<textline size="20" correct_answer="3" />
</customresponse>
</problem>
""".format(question))
problem = new_loncapa_problem(xml, use_capa_render_template=True)
problem_html = etree.XML(problem.get_html())
# verify that only no multi input group div is present
multi_inputs_group = problem_html.xpath('//div[@class="multi-inputs-group"]')
assert len(multi_inputs_group) == 0
# verify that question is rendered only once
question = problem_html.xpath("//*[normalize-space(text())='{}']".format(question))
assert len(question) == 1
def assert_question_tag(self, question1, question2, tag, label_attr=False):
"""
Verify question tag correctness.
"""
question1_tag = '<{tag}>{}</{tag}>'.format(question1, tag=tag) if question1 else ''
question2_tag = '<{tag}>{}</{tag}>'.format(question2, tag=tag) if question2 else ''
question1_label_attr = 'label="{}"'.format(question1) if label_attr else ''
question2_label_attr = 'label="{}"'.format(question2) if label_attr else ''
xml = """
<problem>
{question1_tag}
<choiceresponse>
<checkboxgroup {question1_label_attr}>
<choice correct="true">choice1</choice>
<choice correct="false">choice2</choice>
</checkboxgroup>
</choiceresponse>
{question2_tag}
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" {question2_label_attr}>
<choice correct="false">choice1</choice>
<choice correct="true">choice2</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""".format(
question1_tag=question1_tag,
question2_tag=question2_tag,
question1_label_attr=question1_label_attr,
question2_label_attr=question2_label_attr,
)
problem = new_loncapa_problem(xml)
assert problem.problem_data ==\
{'1_2_1': {'label': question1, 'descriptions': {}}, '1_3_1': {'label': question2, 'descriptions': {}}}
assert len(problem.tree.xpath('//{}'.format(tag))) == 0
@ddt.unpack
@ddt.data(
{'question1': 'question 1 label', 'question2': 'question 2 label'},
{'question1': '', 'question2': 'question 2 label'},
{'question1': 'question 1 label', 'question2': ''}
)
def test_correct_question_tag_is_picked(self, question1, question2):
"""
For a problem with multiple questions verify that correct question tag is picked.
"""
self.assert_question_tag(question1, question2, tag='label', label_attr=False)
self.assert_question_tag(question1, question2, tag='p', label_attr=True)
def test_optionresponse_xml_compatibility(self):
"""
Verify that an optionresponse problem with multiple correct answers is not instantiated.
Scenario:
Given an optionresponse/Dropdown problem
If there are multiple correct answers
Then the problem is not instantiated
And Loncapa problem error exception is raised
If the problem is corrected by including only one correct answer
Then the problem is created successfully
"""
xml = """
<problem>
<optionresponse>
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for dropdown problems. Edit this component to replace this template with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required.</label>
<description>You can add an optional tip or note related to the prompt like this. </description>
<optioninput>
<option correct="False">an incorrect answer</option>
<option correct="True">the correct answer</option>
<option correct="{correctness}">an incorrect answer</option>
</optioninput>
</optionresponse>
</problem>
"""
with pytest.raises(LoncapaProblemError):
new_loncapa_problem(xml.format(correctness=True))
problem = new_loncapa_problem(xml.format(correctness=False))
assert problem is not None
def test_optionresponse_option_with_empty_text(self):
"""
Verify successful instantiation of an optionresponse problem
with an option with empty text
"""
xml = """
<problem>
<optionresponse>
<label>Select True or False</label>
<optioninput>
<option correct="False">True <optionhint>Not this one</optionhint></option>
<option correct="True">False</option>
<option correct="False"><optionhint>Not this empty one either</optionhint></option>
</optioninput>
</optionresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
assert problem is not None
@ddt.ddt
class CAPAMultiInputProblemTest(unittest.TestCase):
""" TestCase for CAPA problems with multiple inputtypes """
def capa_problem(self, xml):
"""
Create capa problem.
"""
return new_loncapa_problem(xml, use_capa_render_template=True)
def assert_problem_data(self, problem_data):
"""Verify problem data is in expected state"""
for problem_value in six.viewvalues(problem_data):
assert isinstance(problem_value['label'], Markup)
def assert_problem_html(self, problem_html, group_label, *input_labels):
"""
Verify that correct html is rendered for multiple inputtypes.
Arguments:
problem_html (str): problem HTML
group_label (str or None): multi input group label or None if label is not present
input_labels (tuple): individual input labels
"""
html = etree.XML(problem_html)
# verify that only one multi input group div is present at correct path
multi_inputs_group = html.xpath(
'//div[@class="wrapper-problem-response"]/div[@class="multi-inputs-group"]'
)
assert len(multi_inputs_group) == 1
if group_label is None:
# if multi inputs group label is not present then there shouldn't be `aria-labelledby` attribute
assert multi_inputs_group[0].attrib.get('aria-labelledby') is None
else:
# verify that multi input group label <p> tag exists and its
# id matches with correct multi input group aria-labelledby
multi_inputs_group_label_id = multi_inputs_group[0].attrib.get('aria-labelledby')
multi_inputs_group_label = html.xpath('//p[@id="{}"]'.format(multi_inputs_group_label_id))
assert len(multi_inputs_group_label) == 1
assert multi_inputs_group_label[0].text == group_label
# verify that label for each input comes only once
for input_label in input_labels:
# normalize-space is used to remove whitespace around the text
input_label_element = multi_inputs_group[0].xpath('//*[normalize-space(text())="{}"]'.format(input_label))
assert len(input_label_element) == 1
@ddt.unpack
@ddt.data(
{'label_html': '<label>Choose the correct color</label>', 'group_label': 'Choose the correct color'},
{'label_html': '', 'group_label': None}
)
def test_optionresponse(self, label_html, group_label):
"""
Verify that optionresponse problem with multiple inputtypes is rendered correctly.
"""
input1_label = 'What color is the sky?'
input2_label = 'What color are pine needles?'
xml = """
<problem>
<optionresponse>
{label_html}
<optioninput options="('yellow','blue','green')" correct="blue" label="{input1_label}"/>
<optioninput options="('yellow','blue','green')" correct="green" label="{input2_label}"/>
</optionresponse>
</problem>
""".format(label_html=label_html, input1_label=input1_label, input2_label=input2_label)
problem = self.capa_problem(xml)
self.assert_problem_html(problem.get_html(), group_label, input1_label, input2_label)
self.assert_problem_data(problem.problem_data)
@ddt.unpack
@ddt.data(
{'inputtype': 'textline'},
{'inputtype': 'formulaequationinput'}
)
def test_customresponse(self, inputtype):
"""
Verify that customresponse problem with multiple textline
and formulaequationinput inputtypes is rendered correctly.
"""
group_label = 'Enter two integers that sum to 10.'
input1_label = 'Integer 1'
input2_label = 'Integer 2'
xml = textwrap.dedent("""
<problem>
<customresponse cfn="test_add_to_ten">
<script type="loncapa/python">
def test_add_to_ten(expect, ans):
return test_add(10, ans)
</script>
<label>{}</label>
<{inputtype} size="40" correct_answer="3" label="{}" /><br/>
<{inputtype} size="40" correct_answer="7" label="{}" />
</customresponse>
</problem>
""".format(group_label, input1_label, input2_label, inputtype=inputtype))
problem = self.capa_problem(xml)
self.assert_problem_html(problem.get_html(), group_label, input1_label, input2_label)
self.assert_problem_data(problem.problem_data)
@ddt.unpack
@ddt.data(
{
'descriptions': ('desc1', 'desc2'),
'descriptions_html': '<description>desc1</description><description>desc2</description>'
},
{
'descriptions': (),
'descriptions_html': ''
}
)
def test_descriptions(self, descriptions, descriptions_html):
"""
Verify that groups descriptions are rendered correctly.
"""
xml = """
<problem>
<optionresponse>
<label>group label</label>
{descriptions_html}
<optioninput options="('yellow','blue','green')" correct="blue" label="first label"/>
<optioninput options="('yellow','blue','green')" correct="green" label="second label"/>
</optionresponse>
</problem>
""".format(descriptions_html=descriptions_html)
problem = self.capa_problem(xml)
problem_html = etree.XML(problem.get_html())
multi_inputs_group = problem_html.xpath('//div[@class="multi-inputs-group"]')[0]
description_ids = multi_inputs_group.attrib.get('aria-describedby', '').split()
# Verify that number of descriptions matches description_ids
assert len(description_ids) == len(descriptions)
# For each description, check its order and text is correct
for index, description_id in enumerate(description_ids):
description_element = multi_inputs_group.xpath('//p[@id="{}"]'.format(description_id))
assert len(description_element) == 1
assert description_element[0].text == descriptions[index]
@ddt.ddt
class CAPAProblemReportHelpersTest(unittest.TestCase):
""" TestCase for CAPA methods for finding question labels and answer text """
@ddt.data(
('answerid_2_1', 'label', 'label'),
('answerid_2_2', 'label <some>html</some>', 'label html'),
('answerid_2_2', '<more html="yes"/>label <some>html</some>', 'label html'),
('answerid_2_3', None, 'Question 1'),
('answerid_2_3', '', 'Question 1'),
('answerid_3_3', '', 'Question 2'),
)
@ddt.unpack
def test_find_question_label(self, answer_id, label, stripped_label):
problem = new_loncapa_problem(
'<problem><some-problem id="{}"/></problem>'.format(answer_id)
)
mock_problem_data = {
answer_id: {
'label': HTML(label) if label else ''
}
}
with patch.object(problem, 'problem_data', mock_problem_data):
assert problem.find_question_label(answer_id) == stripped_label
@ddt.data(None, dict(), [None])
def test_find_answer_test_not_implemented(self, current_answer):
problem = new_loncapa_problem('<problem/>')
self.assertRaises(NotImplementedError, problem.find_answer_text, '', current_answer)
@ddt.data(
('1_2_1', 'choice_0', 'over-suspicious'),
('1_2_1', 'choice_1', 'funny'),
('1_3_1', 'choice_0', 'The iPad'),
('1_3_1', 'choice_2', 'The iPod'),
('1_3_1', ['choice_0', 'choice_1'], 'The iPad, Napster'),
('1_4_1', 'yellow', 'yellow'),
('1_4_1', 'blue', 'blue'),
)
@ddt.unpack
def test_find_answer_text_choices(self, answer_id, choice_id, answer_text):
problem = new_loncapa_problem(
"""
<problem>
<choiceresponse>
<checkboxgroup label="Select the correct synonym of paranoid?">
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">The iPad</choice>
<choice correct="false">Napster</choice>
<choice correct="true">The iPod</choice>
</choicegroup>
</multiplechoiceresponse>
<optionresponse>
<optioninput options="('yellow','blue','green')" correct="blue" label="Color_1"/>
</optionresponse>
</problem>
"""
)
assert problem.find_answer_text(answer_id, choice_id) == answer_text
@ddt.data(
# Test for ChoiceResponse
('1_2_1', 'choice_0', 'Answer Text Missing'),
('1_2_1', 'choice_1', 'funny'),
# Test for MultipleChoiceResponse
('1_3_1', 'choice_0', 'The iPad'),
('1_3_1', 'choice_2', 'Answer Text Missing'),
('1_3_1', ['choice_0', 'choice_1'], 'The iPad, Answer Text Missing'),
# Test for OptionResponse
('1_4_1', '', 'Answer Text Missing'),
)
@ddt.unpack
def test_find_answer_text_choices_with_missing_text(self, answer_id, choice_id, answer_text):
problem = new_loncapa_problem(
"""
<problem>
<choiceresponse>
<checkboxgroup label="Select the correct synonym of paranoid?">
<choice correct="true"></choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">The iPad</choice>
<choice correct="false"></choice>
<choice correct="true"></choice>
</choicegroup>
</multiplechoiceresponse>
<optionresponse>
<optioninput options="('yellow','blue','green')" correct="blue" label="Color_1"/>
</optionresponse>
</problem>
"""
)
assert problem.find_answer_text(answer_id, choice_id) == answer_text
@ddt.data(
# Test for ChoiceResponse
('1_2_1', 'over-suspicious'),
# Test for MultipleChoiceResponse
('1_3_1', 'The iPad, Napster'),
# Test for OptionResponse
('1_4_1', 'blue'),
)
@ddt.unpack
def test_find_correct_answer_text_choices(self, answer_id, answer_text):
"""
Verify that ``find_correct_answer_text`` can find the correct answer for
ChoiceResponse, MultipleChoiceResponse and OptionResponse problems.
"""
problem = new_loncapa_problem(
"""
<problem>
<choiceresponse>
<checkboxgroup label="Select the correct synonym of paranoid?">
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="true">The iPad</choice>
<choice correct="true">Napster</choice>
<choice correct="false">The iPod</choice>
</choicegroup>
</multiplechoiceresponse>
<optionresponse>
<optioninput options="('yellow','blue','green')" correct="blue" label="Color_1"/>
</optionresponse>
</problem>
"""
)
assert problem.find_correct_answer_text(answer_id) == answer_text
def test_find_answer_text_textinput(self):
problem = new_loncapa_problem(
"""
<problem>
<stringresponse answer="hide" type="ci">
<textline size="40"/>
</stringresponse>
</problem>
"""
)
assert problem.find_answer_text('1_2_1', 'hide') == 'hide'
def test_get_question_answer(self):
problem = new_loncapa_problem(
"""
<problem>
<optionresponse>
<optioninput options="('yellow','blue','green')" correct="blue" label="Color_1"/>
</optionresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Blue is the answer.</p>
</div>
</solution>
</problem>
"""
)
# Ensure that the answer is a string so that the dict returned from this
# function can eventualy be serialized to json without issues.
assert isinstance(problem.get_question_answers()['1_solution_1'], six.text_type)
| """
Test capa problem.
"""
import textwrap
import unittest
import pytest
import ddt
import six
from lxml import etree
from markupsafe import Markup
from mock import patch
from capa.responsetypes import LoncapaProblemError
from capa.tests.helpers import new_loncapa_problem
from openedx.core.djangolib.markup import HTML
@ddt.ddt
class CAPAProblemTest(unittest.TestCase):
""" CAPA problem related tests"""
@ddt.unpack
@ddt.data(
{'question': 'Select the correct synonym of paranoid?'},
{'question': 'Select the correct <em>synonym</em> of <strong>paranoid</strong>?'},
)
def test_label_and_description_inside_responsetype(self, question):
"""
Verify that
* label is extracted
* <label> tag is removed to avoid duplication
This is the case when we have a problem with single question or
problem with multiple-questions separated as per the new format.
"""
xml = """
<problem>
<choiceresponse>
<label>{question}</label>
<description>Only the paranoid survive.</description>
<checkboxgroup>
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""".format(question=question)
problem = new_loncapa_problem(xml)
assert problem.problem_data ==\
{'1_2_1': {'label': question, 'descriptions': {'description_1_1_1': 'Only the paranoid survive.'}}}
assert len(problem.tree.xpath('//label')) == 0
@ddt.unpack
@ddt.data(
{
'question': 'Once we become predictable, we become ______?',
'label_attr': 'Once we become predictable, we become ______?'
},
{
'question': 'Once we become predictable, we become ______?<img src="img/src"/>',
'label_attr': 'Once we become predictable, we become ______?'
},
)
def test_legacy_problem(self, question, label_attr):
"""
Verify that legacy problem is handled correctly.
"""
xml = """
<problem>
<p>Be sure to check your spelling.</p>
<p>{}</p>
<stringresponse answer="vulnerable" type="ci">
<textline label="{}" size="40"/>
</stringresponse>
</problem>
""".format(question, label_attr)
problem = new_loncapa_problem(xml)
assert problem.problem_data == {'1_2_1': {'label': question, 'descriptions': {}}}
assert len(problem.tree.xpath("//*[normalize-space(text())='{}']".format(question))) == 0
@ddt.unpack
@ddt.data(
{
'question1': 'People who say they have nothing to ____ almost always do?',
'question2': 'Select the correct synonym of paranoid?'
},
{
'question1': '<b>People</b> who say they have <mark>nothing</mark> to ____ almost always do?',
'question2': 'Select the <sup>correct</sup> synonym of <mark>paranoid</mark>?'
},
)
def test_neither_label_tag_nor_attribute(self, question1, question2):
"""
Verify that label is extracted correctly.
This is the case when we have a markdown problem with multiple-questions.
In this case when markdown is converted to xml, there will be no label
tag and label attribute inside responsetype. But we have a label tag
before the responsetype.
"""
xml = """
<problem>
<p>Be sure to check your spelling.</p>
<label>{}</label>
<stringresponse answer="hide" type="ci">
<textline size="40"/>
</stringresponse>
<choiceresponse>
<label>{}</label>
<checkboxgroup>
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""".format(question1, question2)
problem = new_loncapa_problem(xml)
assert problem.problem_data ==\
{'1_2_1': {'label': question1, 'descriptions': {}}, '1_3_1': {'label': question2, 'descriptions': {}}}
for question in (question1, question2):
assert len(problem.tree.xpath('//label[text()="{}"]'.format(question))) == 0
def test_multiple_descriptions(self):
"""
Verify that multiple descriptions are handled correctly.
"""
desc1 = "The problem with trying to be the <em>bad guy</em>, there's always someone <strong>worse</strong>."
desc2 = "Anyone who looks the world as if it was a game of chess deserves to lose."
xml = """
<problem>
<p>Be sure to check your spelling.</p>
<stringresponse answer="War" type="ci">
<label>___ requires sacrifices.</label>
<description>{}</description>
<description>{}</description>
<textline size="40"/>
</stringresponse>
</problem>
""".format(desc1, desc2)
problem = new_loncapa_problem(xml)
assert problem.problem_data ==\
{'1_2_1': {'label': '___ requires sacrifices.',
'descriptions': {'description_1_1_1': desc1, 'description_1_1_2': desc2}}}
def test_additional_answer_is_skipped_from_resulting_html(self):
"""Tests that additional_answer element is not present in transformed HTML"""
xml = """
<problem>
<p>Be sure to check your spelling.</p>
<stringresponse answer="War" type="ci">
<label>___ requires sacrifices.</label>
<description>Anyone who looks the world as if it was a game of chess deserves to lose.</description>
<additional_answer answer="optional acceptable variant of the correct answer"/>
<textline size="40"/>
</stringresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
assert len(problem.extracted_tree.xpath('//additional_answer')) == 0
assert 'additional_answer' not in problem.get_html()
def test_non_accessible_inputtype(self):
"""
Verify that tag with question text is not removed when inputtype is not fully accessible.
"""
question = "Click the country which is home to the Pyramids."
# lint-amnesty, pylint: disable=duplicate-string-formatting-argument
xml = """
<problem>
<p>{}</p>
<imageresponse>
<imageinput label="{}"
src="/static/Africa.png" width="600" height="638" rectangle="(338,98)-(412,168)"/>
</imageresponse>
</problem>
""".format(question, question)
problem = new_loncapa_problem(xml)
assert problem.problem_data == {'1_2_1': {'label': question, 'descriptions': {}}}
# <p> tag with question text should not be deleted
assert problem.tree.xpath("string(p[text()='{}'])".format(question)) == question
def test_label_is_empty_if_no_label_attribute(self):
"""
Verify that label in response_data is empty string when label
attribute is missing and responsetype is not fully accessible.
"""
question = "Click the country which is home to the Pyramids."
xml = """
<problem>
<p>{}</p>
<imageresponse>
<imageinput
src="/static/Africa.png" width="600" height="638" rectangle="(338,98)-(412,168)"/>
</imageresponse>
</problem>
""".format(question)
problem = new_loncapa_problem(xml)
assert problem.problem_data == {'1_2_1': {'label': '', 'descriptions': {}}}
def test_multiple_questions_problem(self):
"""
For a problem with multiple questions verify that for each question
* label is extracted
* descriptions info is constructed
* <label> tag is removed to avoid duplication
"""
xml = """
<problem>
<choiceresponse>
<label>Select the correct synonym of paranoid?</label>
<description>Only the paranoid survive.</description>
<checkboxgroup>
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<p>one more question</p>
<label>What Apple device competed with the portable CD player?</label>
<description>Device looks like an egg plant.</description>
<choicegroup type="MultipleChoice">
<choice correct="false">The iPad</choice>
<choice correct="false">Napster</choice>
<choice correct="true">The iPod</choice>
<choice correct="false">The vegetable peeler</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
assert problem.problem_data ==\
{'1_2_1': {'label': 'Select the correct synonym of paranoid?',
'descriptions': {'description_1_1_1': 'Only the paranoid survive.'}},
'1_3_1': {'label': 'What Apple device competed with the portable CD player?',
'descriptions': {'description_1_2_1': 'Device looks like an egg plant.'}}}
assert len(problem.tree.xpath('//label')) == 0
def test_question_title_not_removed_got_children(self):
"""
Verify that <p> question text before responsetype not deleted when
it contains other children and label is picked from label attribute of inputtype
This is the case when author updated the <p> immediately before
responsetype to contain other elements. We do not want to delete information in that case.
"""
question = 'Is egg plant a fruit?'
xml = """
<problem>
<p>Choose wisely.</p>
<p>Select the correct synonym of paranoid?</p>
<p><img src="" /></p>
<choiceresponse>
<checkboxgroup label="{}">
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""".format(question)
problem = new_loncapa_problem(xml)
assert problem.problem_data == {'1_2_1': {'label': '', 'descriptions': {}}}
assert len(problem.tree.xpath('//p/img')) == 1
@ddt.unpack
@ddt.data(
{'group_label': 'Choose the correct color'},
{'group_label': 'Choose the <b>correct</b> <mark>color</mark>'},
)
def test_multiple_inputtypes(self, group_label):
"""
Verify that group label and labels for individual inputtypes are extracted correctly.
"""
input1_label = 'What color is the sky?'
input2_label = 'What color are pine needles?'
xml = """
<problem>
<optionresponse>
<label>{}</label>
<optioninput options="('yellow','blue','green')" correct="blue" label="{}"/>
<optioninput options="('yellow','blue','green')" correct="green" label="{}"/>
</optionresponse>
</problem>
""".format(group_label, input1_label, input2_label)
problem = new_loncapa_problem(xml)
assert problem.problem_data ==\
{'1_2_1': {'group_label': group_label, 'label': input1_label, 'descriptions': {}},
'1_2_2': {'group_label': group_label, 'label': input2_label, 'descriptions': {}}}
def test_single_inputtypes(self):
"""
Verify that HTML is correctly rendered when there is single inputtype.
"""
question = 'Enter sum of 1+2'
xml = textwrap.dedent("""
<problem>
<customresponse cfn="test_sum" expect="3">
<script type="loncapa/python">
def test_sum(expect, ans):
return int(expect) == int(ans)
</script>
<label>{}</label>
<textline size="20" correct_answer="3" />
</customresponse>
</problem>
""".format(question))
problem = new_loncapa_problem(xml, use_capa_render_template=True)
problem_html = etree.XML(problem.get_html())
# verify that only no multi input group div is present
multi_inputs_group = problem_html.xpath('//div[@class="multi-inputs-group"]')
assert len(multi_inputs_group) == 0
# verify that question is rendered only once
question = problem_html.xpath("//*[normalize-space(text())='{}']".format(question))
assert len(question) == 1
def assert_question_tag(self, question1, question2, tag, label_attr=False):
"""
Verify question tag correctness.
"""
question1_tag = '<{tag}>{}</{tag}>'.format(question1, tag=tag) if question1 else ''
question2_tag = '<{tag}>{}</{tag}>'.format(question2, tag=tag) if question2 else ''
question1_label_attr = 'label="{}"'.format(question1) if label_attr else ''
question2_label_attr = 'label="{}"'.format(question2) if label_attr else ''
xml = """
<problem>
{question1_tag}
<choiceresponse>
<checkboxgroup {question1_label_attr}>
<choice correct="true">choice1</choice>
<choice correct="false">choice2</choice>
</checkboxgroup>
</choiceresponse>
{question2_tag}
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" {question2_label_attr}>
<choice correct="false">choice1</choice>
<choice correct="true">choice2</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""".format(
question1_tag=question1_tag,
question2_tag=question2_tag,
question1_label_attr=question1_label_attr,
question2_label_attr=question2_label_attr,
)
problem = new_loncapa_problem(xml)
assert problem.problem_data ==\
{'1_2_1': {'label': question1, 'descriptions': {}}, '1_3_1': {'label': question2, 'descriptions': {}}}
assert len(problem.tree.xpath('//{}'.format(tag))) == 0
@ddt.unpack
@ddt.data(
{'question1': 'question 1 label', 'question2': 'question 2 label'},
{'question1': '', 'question2': 'question 2 label'},
{'question1': 'question 1 label', 'question2': ''}
)
def test_correct_question_tag_is_picked(self, question1, question2):
"""
For a problem with multiple questions verify that correct question tag is picked.
"""
self.assert_question_tag(question1, question2, tag='label', label_attr=False)
self.assert_question_tag(question1, question2, tag='p', label_attr=True)
def test_optionresponse_xml_compatibility(self):
"""
Verify that an optionresponse problem with multiple correct answers is not instantiated.
Scenario:
Given an optionresponse/Dropdown problem
If there are multiple correct answers
Then the problem is not instantiated
And Loncapa problem error exception is raised
If the problem is corrected by including only one correct answer
Then the problem is created successfully
"""
xml = """
<problem>
<optionresponse>
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for dropdown problems. Edit this component to replace this template with your own assessment.</p>
<label>Add the question text, or prompt, here. This text is required.</label>
<description>You can add an optional tip or note related to the prompt like this. </description>
<optioninput>
<option correct="False">an incorrect answer</option>
<option correct="True">the correct answer</option>
<option correct="{correctness}">an incorrect answer</option>
</optioninput>
</optionresponse>
</problem>
"""
with pytest.raises(LoncapaProblemError):
new_loncapa_problem(xml.format(correctness=True))
problem = new_loncapa_problem(xml.format(correctness=False))
assert problem is not None
def test_optionresponse_option_with_empty_text(self):
"""
Verify successful instantiation of an optionresponse problem
with an option with empty text
"""
xml = """
<problem>
<optionresponse>
<label>Select True or False</label>
<optioninput>
<option correct="False">True <optionhint>Not this one</optionhint></option>
<option correct="True">False</option>
<option correct="False"><optionhint>Not this empty one either</optionhint></option>
</optioninput>
</optionresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
assert problem is not None
@ddt.ddt
class CAPAMultiInputProblemTest(unittest.TestCase):
""" TestCase for CAPA problems with multiple inputtypes """
def capa_problem(self, xml):
"""
Create capa problem.
"""
return new_loncapa_problem(xml, use_capa_render_template=True)
def assert_problem_data(self, problem_data):
"""Verify problem data is in expected state"""
for problem_value in six.viewvalues(problem_data):
assert isinstance(problem_value['label'], Markup)
def assert_problem_html(self, problem_html, group_label, *input_labels):
"""
Verify that correct html is rendered for multiple inputtypes.
Arguments:
problem_html (str): problem HTML
group_label (str or None): multi input group label or None if label is not present
input_labels (tuple): individual input labels
"""
html = etree.XML(problem_html)
# verify that only one multi input group div is present at correct path
multi_inputs_group = html.xpath(
'//div[@class="wrapper-problem-response"]/div[@class="multi-inputs-group"]'
)
assert len(multi_inputs_group) == 1
if group_label is None:
# if multi inputs group label is not present then there shouldn't be `aria-labelledby` attribute
assert multi_inputs_group[0].attrib.get('aria-labelledby') is None
else:
# verify that multi input group label <p> tag exists and its
# id matches with correct multi input group aria-labelledby
multi_inputs_group_label_id = multi_inputs_group[0].attrib.get('aria-labelledby')
multi_inputs_group_label = html.xpath('//p[@id="{}"]'.format(multi_inputs_group_label_id))
assert len(multi_inputs_group_label) == 1
assert multi_inputs_group_label[0].text == group_label
# verify that label for each input comes only once
for input_label in input_labels:
# normalize-space is used to remove whitespace around the text
input_label_element = multi_inputs_group[0].xpath('//*[normalize-space(text())="{}"]'.format(input_label))
assert len(input_label_element) == 1
@ddt.unpack
@ddt.data(
{'label_html': '<label>Choose the correct color</label>', 'group_label': 'Choose the correct color'},
{'label_html': '', 'group_label': None}
)
def test_optionresponse(self, label_html, group_label):
"""
Verify that optionresponse problem with multiple inputtypes is rendered correctly.
"""
input1_label = 'What color is the sky?'
input2_label = 'What color are pine needles?'
xml = """
<problem>
<optionresponse>
{label_html}
<optioninput options="('yellow','blue','green')" correct="blue" label="{input1_label}"/>
<optioninput options="('yellow','blue','green')" correct="green" label="{input2_label}"/>
</optionresponse>
</problem>
""".format(label_html=label_html, input1_label=input1_label, input2_label=input2_label)
problem = self.capa_problem(xml)
self.assert_problem_html(problem.get_html(), group_label, input1_label, input2_label)
self.assert_problem_data(problem.problem_data)
@ddt.unpack
@ddt.data(
{'inputtype': 'textline'},
{'inputtype': 'formulaequationinput'}
)
def test_customresponse(self, inputtype):
"""
Verify that customresponse problem with multiple textline
and formulaequationinput inputtypes is rendered correctly.
"""
group_label = 'Enter two integers that sum to 10.'
input1_label = 'Integer 1'
input2_label = 'Integer 2'
xml = textwrap.dedent("""
<problem>
<customresponse cfn="test_add_to_ten">
<script type="loncapa/python">
def test_add_to_ten(expect, ans):
return test_add(10, ans)
</script>
<label>{}</label>
<{inputtype} size="40" correct_answer="3" label="{}" /><br/>
<{inputtype} size="40" correct_answer="7" label="{}" />
</customresponse>
</problem>
""".format(group_label, input1_label, input2_label, inputtype=inputtype))
problem = self.capa_problem(xml)
self.assert_problem_html(problem.get_html(), group_label, input1_label, input2_label)
self.assert_problem_data(problem.problem_data)
@ddt.unpack
@ddt.data(
{
'descriptions': ('desc1', 'desc2'),
'descriptions_html': '<description>desc1</description><description>desc2</description>'
},
{
'descriptions': (),
'descriptions_html': ''
}
)
def test_descriptions(self, descriptions, descriptions_html):
"""
Verify that groups descriptions are rendered correctly.
"""
xml = """
<problem>
<optionresponse>
<label>group label</label>
{descriptions_html}
<optioninput options="('yellow','blue','green')" correct="blue" label="first label"/>
<optioninput options="('yellow','blue','green')" correct="green" label="second label"/>
</optionresponse>
</problem>
""".format(descriptions_html=descriptions_html)
problem = self.capa_problem(xml)
problem_html = etree.XML(problem.get_html())
multi_inputs_group = problem_html.xpath('//div[@class="multi-inputs-group"]')[0]
description_ids = multi_inputs_group.attrib.get('aria-describedby', '').split()
# Verify that number of descriptions matches description_ids
assert len(description_ids) == len(descriptions)
# For each description, check its order and text is correct
for index, description_id in enumerate(description_ids):
description_element = multi_inputs_group.xpath('//p[@id="{}"]'.format(description_id))
assert len(description_element) == 1
assert description_element[0].text == descriptions[index]
@ddt.ddt
class CAPAProblemReportHelpersTest(unittest.TestCase):
""" TestCase for CAPA methods for finding question labels and answer text """
@ddt.data(
('answerid_2_1', 'label', 'label'),
('answerid_2_2', 'label <some>html</some>', 'label html'),
('answerid_2_2', '<more html="yes"/>label <some>html</some>', 'label html'),
('answerid_2_3', None, 'Question 1'),
('answerid_2_3', '', 'Question 1'),
('answerid_3_3', '', 'Question 2'),
)
@ddt.unpack
def test_find_question_label(self, answer_id, label, stripped_label):
problem = new_loncapa_problem(
'<problem><some-problem id="{}"/></problem>'.format(answer_id)
)
mock_problem_data = {
answer_id: {
'label': HTML(label) if label else ''
}
}
with patch.object(problem, 'problem_data', mock_problem_data):
assert problem.find_question_label(answer_id) == stripped_label
@ddt.data(None, dict(), [None])
def test_find_answer_test_not_implemented(self, current_answer):
problem = new_loncapa_problem('<problem/>')
self.assertRaises(NotImplementedError, problem.find_answer_text, '', current_answer)
@ddt.data(
('1_2_1', 'choice_0', 'over-suspicious'),
('1_2_1', 'choice_1', 'funny'),
('1_3_1', 'choice_0', 'The iPad'),
('1_3_1', 'choice_2', 'The iPod'),
('1_3_1', ['choice_0', 'choice_1'], 'The iPad, Napster'),
('1_4_1', 'yellow', 'yellow'),
('1_4_1', 'blue', 'blue'),
)
@ddt.unpack
def test_find_answer_text_choices(self, answer_id, choice_id, answer_text):
problem = new_loncapa_problem(
"""
<problem>
<choiceresponse>
<checkboxgroup label="Select the correct synonym of paranoid?">
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">The iPad</choice>
<choice correct="false">Napster</choice>
<choice correct="true">The iPod</choice>
</choicegroup>
</multiplechoiceresponse>
<optionresponse>
<optioninput options="('yellow','blue','green')" correct="blue" label="Color_1"/>
</optionresponse>
</problem>
"""
)
assert problem.find_answer_text(answer_id, choice_id) == answer_text
@ddt.data(
# Test for ChoiceResponse
('1_2_1', 'choice_0', 'Answer Text Missing'),
('1_2_1', 'choice_1', 'funny'),
# Test for MultipleChoiceResponse
('1_3_1', 'choice_0', 'The iPad'),
('1_3_1', 'choice_2', 'Answer Text Missing'),
('1_3_1', ['choice_0', 'choice_1'], 'The iPad, Answer Text Missing'),
# Test for OptionResponse
('1_4_1', '', 'Answer Text Missing'),
)
@ddt.unpack
def test_find_answer_text_choices_with_missing_text(self, answer_id, choice_id, answer_text):
problem = new_loncapa_problem(
"""
<problem>
<choiceresponse>
<checkboxgroup label="Select the correct synonym of paranoid?">
<choice correct="true"></choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">The iPad</choice>
<choice correct="false"></choice>
<choice correct="true"></choice>
</choicegroup>
</multiplechoiceresponse>
<optionresponse>
<optioninput options="('yellow','blue','green')" correct="blue" label="Color_1"/>
</optionresponse>
</problem>
"""
)
assert problem.find_answer_text(answer_id, choice_id) == answer_text
@ddt.data(
# Test for ChoiceResponse
('1_2_1', 'over-suspicious'),
# Test for MultipleChoiceResponse
('1_3_1', 'The iPad, Napster'),
# Test for OptionResponse
('1_4_1', 'blue'),
)
@ddt.unpack
def test_find_correct_answer_text_choices(self, answer_id, answer_text):
"""
Verify that ``find_correct_answer_text`` can find the correct answer for
ChoiceResponse, MultipleChoiceResponse and OptionResponse problems.
"""
problem = new_loncapa_problem(
"""
<problem>
<choiceresponse>
<checkboxgroup label="Select the correct synonym of paranoid?">
<choice correct="true">over-suspicious</choice>
<choice correct="false">funny</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="true">The iPad</choice>
<choice correct="true">Napster</choice>
<choice correct="false">The iPod</choice>
</choicegroup>
</multiplechoiceresponse>
<optionresponse>
<optioninput options="('yellow','blue','green')" correct="blue" label="Color_1"/>
</optionresponse>
</problem>
"""
)
assert problem.find_correct_answer_text(answer_id) == answer_text
def test_find_answer_text_textinput(self):
problem = new_loncapa_problem(
"""
<problem>
<stringresponse answer="hide" type="ci">
<textline size="40"/>
</stringresponse>
</problem>
"""
)
assert problem.find_answer_text('1_2_1', 'hide') == 'hide'
def test_get_question_answer(self):
problem = new_loncapa_problem(
"""
<problem>
<optionresponse>
<optioninput options="('yellow','blue','green')" correct="blue" label="Color_1"/>
</optionresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Blue is the answer.</p>
</div>
</solution>
</problem>
"""
)
# Ensure that the answer is a string so that the dict returned from this
# function can eventualy be serialized to json without issues.
assert isinstance(problem.get_question_answers()['1_solution_1'], six.text_type)
| en | 0.523939 | Test capa problem. CAPA problem related tests Verify that * label is extracted * <label> tag is removed to avoid duplication This is the case when we have a problem with single question or problem with multiple-questions separated as per the new format. <problem> <choiceresponse> <label>{question}</label> <description>Only the paranoid survive.</description> <checkboxgroup> <choice correct="true">over-suspicious</choice> <choice correct="false">funny</choice> </checkboxgroup> </choiceresponse> </problem> Verify that legacy problem is handled correctly. <problem> <p>Be sure to check your spelling.</p> <p>{}</p> <stringresponse answer="vulnerable" type="ci"> <textline label="{}" size="40"/> </stringresponse> </problem> Verify that label is extracted correctly. This is the case when we have a markdown problem with multiple-questions. In this case when markdown is converted to xml, there will be no label tag and label attribute inside responsetype. But we have a label tag before the responsetype. <problem> <p>Be sure to check your spelling.</p> <label>{}</label> <stringresponse answer="hide" type="ci"> <textline size="40"/> </stringresponse> <choiceresponse> <label>{}</label> <checkboxgroup> <choice correct="true">over-suspicious</choice> <choice correct="false">funny</choice> </checkboxgroup> </choiceresponse> </problem> Verify that multiple descriptions are handled correctly. <problem> <p>Be sure to check your spelling.</p> <stringresponse answer="War" type="ci"> <label>___ requires sacrifices.</label> <description>{}</description> <description>{}</description> <textline size="40"/> </stringresponse> </problem> Tests that additional_answer element is not present in transformed HTML <problem> <p>Be sure to check your spelling.</p> <stringresponse answer="War" type="ci"> <label>___ requires sacrifices.</label> <description>Anyone who looks the world as if it was a game of chess deserves to lose.</description> <additional_answer answer="optional acceptable variant of the correct answer"/> <textline size="40"/> </stringresponse> </problem> Verify that tag with question text is not removed when inputtype is not fully accessible. # lint-amnesty, pylint: disable=duplicate-string-formatting-argument <problem> <p>{}</p> <imageresponse> <imageinput label="{}" src="/static/Africa.png" width="600" height="638" rectangle="(338,98)-(412,168)"/> </imageresponse> </problem> # <p> tag with question text should not be deleted Verify that label in response_data is empty string when label attribute is missing and responsetype is not fully accessible. <problem> <p>{}</p> <imageresponse> <imageinput src="/static/Africa.png" width="600" height="638" rectangle="(338,98)-(412,168)"/> </imageresponse> </problem> For a problem with multiple questions verify that for each question * label is extracted * descriptions info is constructed * <label> tag is removed to avoid duplication <problem> <choiceresponse> <label>Select the correct synonym of paranoid?</label> <description>Only the paranoid survive.</description> <checkboxgroup> <choice correct="true">over-suspicious</choice> <choice correct="false">funny</choice> </checkboxgroup> </choiceresponse> <multiplechoiceresponse> <p>one more question</p> <label>What Apple device competed with the portable CD player?</label> <description>Device looks like an egg plant.</description> <choicegroup type="MultipleChoice"> <choice correct="false">The iPad</choice> <choice correct="false">Napster</choice> <choice correct="true">The iPod</choice> <choice correct="false">The vegetable peeler</choice> </choicegroup> </multiplechoiceresponse> </problem> Verify that <p> question text before responsetype not deleted when it contains other children and label is picked from label attribute of inputtype This is the case when author updated the <p> immediately before responsetype to contain other elements. We do not want to delete information in that case. <problem> <p>Choose wisely.</p> <p>Select the correct synonym of paranoid?</p> <p><img src="" /></p> <choiceresponse> <checkboxgroup label="{}"> <choice correct="true">over-suspicious</choice> <choice correct="false">funny</choice> </checkboxgroup> </choiceresponse> </problem> Verify that group label and labels for individual inputtypes are extracted correctly. <problem> <optionresponse> <label>{}</label> <optioninput options="('yellow','blue','green')" correct="blue" label="{}"/> <optioninput options="('yellow','blue','green')" correct="green" label="{}"/> </optionresponse> </problem> Verify that HTML is correctly rendered when there is single inputtype. <problem> <customresponse cfn="test_sum" expect="3"> <script type="loncapa/python"> def test_sum(expect, ans): return int(expect) == int(ans) </script> <label>{}</label> <textline size="20" correct_answer="3" /> </customresponse> </problem> # verify that only no multi input group div is present # verify that question is rendered only once Verify question tag correctness. <problem> {question1_tag} <choiceresponse> <checkboxgroup {question1_label_attr}> <choice correct="true">choice1</choice> <choice correct="false">choice2</choice> </checkboxgroup> </choiceresponse> {question2_tag} <multiplechoiceresponse> <choicegroup type="MultipleChoice" {question2_label_attr}> <choice correct="false">choice1</choice> <choice correct="true">choice2</choice> </choicegroup> </multiplechoiceresponse> </problem> For a problem with multiple questions verify that correct question tag is picked. Verify that an optionresponse problem with multiple correct answers is not instantiated. Scenario: Given an optionresponse/Dropdown problem If there are multiple correct answers Then the problem is not instantiated And Loncapa problem error exception is raised If the problem is corrected by including only one correct answer Then the problem is created successfully <problem> <optionresponse> <p>You can use this template as a guide to the simple editor markdown and OLX markup to use for dropdown problems. Edit this component to replace this template with your own assessment.</p> <label>Add the question text, or prompt, here. This text is required.</label> <description>You can add an optional tip or note related to the prompt like this. </description> <optioninput> <option correct="False">an incorrect answer</option> <option correct="True">the correct answer</option> <option correct="{correctness}">an incorrect answer</option> </optioninput> </optionresponse> </problem> Verify successful instantiation of an optionresponse problem with an option with empty text <problem> <optionresponse> <label>Select True or False</label> <optioninput> <option correct="False">True <optionhint>Not this one</optionhint></option> <option correct="True">False</option> <option correct="False"><optionhint>Not this empty one either</optionhint></option> </optioninput> </optionresponse> </problem> TestCase for CAPA problems with multiple inputtypes Create capa problem. Verify problem data is in expected state Verify that correct html is rendered for multiple inputtypes. Arguments: problem_html (str): problem HTML group_label (str or None): multi input group label or None if label is not present input_labels (tuple): individual input labels # verify that only one multi input group div is present at correct path # if multi inputs group label is not present then there shouldn't be `aria-labelledby` attribute # verify that multi input group label <p> tag exists and its # id matches with correct multi input group aria-labelledby # verify that label for each input comes only once # normalize-space is used to remove whitespace around the text Verify that optionresponse problem with multiple inputtypes is rendered correctly. <problem> <optionresponse> {label_html} <optioninput options="('yellow','blue','green')" correct="blue" label="{input1_label}"/> <optioninput options="('yellow','blue','green')" correct="green" label="{input2_label}"/> </optionresponse> </problem> Verify that customresponse problem with multiple textline and formulaequationinput inputtypes is rendered correctly. <problem> <customresponse cfn="test_add_to_ten"> <script type="loncapa/python"> def test_add_to_ten(expect, ans): return test_add(10, ans) </script> <label>{}</label> <{inputtype} size="40" correct_answer="3" label="{}" /><br/> <{inputtype} size="40" correct_answer="7" label="{}" /> </customresponse> </problem> Verify that groups descriptions are rendered correctly. <problem> <optionresponse> <label>group label</label> {descriptions_html} <optioninput options="('yellow','blue','green')" correct="blue" label="first label"/> <optioninput options="('yellow','blue','green')" correct="green" label="second label"/> </optionresponse> </problem> # Verify that number of descriptions matches description_ids # For each description, check its order and text is correct TestCase for CAPA methods for finding question labels and answer text <problem> <choiceresponse> <checkboxgroup label="Select the correct synonym of paranoid?"> <choice correct="true">over-suspicious</choice> <choice correct="false">funny</choice> </checkboxgroup> </choiceresponse> <multiplechoiceresponse> <choicegroup type="MultipleChoice"> <choice correct="false">The iPad</choice> <choice correct="false">Napster</choice> <choice correct="true">The iPod</choice> </choicegroup> </multiplechoiceresponse> <optionresponse> <optioninput options="('yellow','blue','green')" correct="blue" label="Color_1"/> </optionresponse> </problem> # Test for ChoiceResponse # Test for MultipleChoiceResponse # Test for OptionResponse <problem> <choiceresponse> <checkboxgroup label="Select the correct synonym of paranoid?"> <choice correct="true"></choice> <choice correct="false">funny</choice> </checkboxgroup> </choiceresponse> <multiplechoiceresponse> <choicegroup type="MultipleChoice"> <choice correct="false">The iPad</choice> <choice correct="false"></choice> <choice correct="true"></choice> </choicegroup> </multiplechoiceresponse> <optionresponse> <optioninput options="('yellow','blue','green')" correct="blue" label="Color_1"/> </optionresponse> </problem> # Test for ChoiceResponse # Test for MultipleChoiceResponse # Test for OptionResponse Verify that ``find_correct_answer_text`` can find the correct answer for ChoiceResponse, MultipleChoiceResponse and OptionResponse problems. <problem> <choiceresponse> <checkboxgroup label="Select the correct synonym of paranoid?"> <choice correct="true">over-suspicious</choice> <choice correct="false">funny</choice> </checkboxgroup> </choiceresponse> <multiplechoiceresponse> <choicegroup type="MultipleChoice"> <choice correct="true">The iPad</choice> <choice correct="true">Napster</choice> <choice correct="false">The iPod</choice> </choicegroup> </multiplechoiceresponse> <optionresponse> <optioninput options="('yellow','blue','green')" correct="blue" label="Color_1"/> </optionresponse> </problem> <problem> <stringresponse answer="hide" type="ci"> <textline size="40"/> </stringresponse> </problem> <problem> <optionresponse> <optioninput options="('yellow','blue','green')" correct="blue" label="Color_1"/> </optionresponse> <solution> <div class="detailed-solution"> <p>Explanation</p> <p>Blue is the answer.</p> </div> </solution> </problem> # Ensure that the answer is a string so that the dict returned from this # function can eventualy be serialized to json without issues. | 2.453664 | 2 |
sw/hsv.py | shnayder/moabian | 13 | 6614671 | <reponame>shnayder/moabian
import colorsys
def hsv_to_rgb(h, s, v):
if s == 0.0:
return (v, v, v)
i = int(h * 6.0) # XXX assume int() truncates!
f = (h * 6.0) - i
p, q, t = v * (1.0 - s), v * (1.0 - s * f), v * (1.0 - s * (1.0 - f))
i %= 6
if i == 0:
return (v, t, p)
if i == 1:
return (q, v, p)
if i == 2:
return (p, v, t)
if i == 3:
return (p, q, v)
if i == 4:
return (t, p, v)
if i == 5:
return (v, p, q)
def rgb_to_bgr(rgb):
return rgb[::-1]
def hue_to_bgr(hue, s=0.75, v=0.75):
assert hue >= 0 and hue <= 360
rgb = hsv_to_rgb(hue / 360.0, s, v)
rgb = [int(c * 255) for c in rgb]
return rgb_to_bgr(rgb)
def hsv_normalized_to_bgr(h, s, v):
assert 0 <= h <= 1.0
assert 0 <= s <= 1.0
assert 0 <= v <= 1.0
def h2r(h, s, v):
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(h, s, v))
return rgb_to_bgr(h2r(h, s, v))
# HSV was invented by <NAME> (cool!)
def test_code(t, e):
v = hsv_to_rgb(*t)
y = [int(s * 255) for s in v]
print(f"f({t}) = {y} ~= expected: {e}")
return y == e
if __name__ == "__main__":
# 45 = orange
test_code((45 / 360.0, 1.0, 0.5), [128, 96, 0])
# 45 = orange
test_code((45 / 360.0, 0.75, 0.5), [218, 165, 32])
# 157 = green
test_code((45 / 360.0, 1.0, 0.5), [128, 96, 0])
# 211 = blue
test_code((211 / 360.0, 1.0, 0.5), [0, 61, 128])
# 0, 100%, 100% = red
test_code((1, 1.0, 1.0), [255, 4, 0])
test_code((0 / 360.0, 1.0, 1.0), [255, 0, 0])
print(hue_to_bgr(45))
| import colorsys
def hsv_to_rgb(h, s, v):
if s == 0.0:
return (v, v, v)
i = int(h * 6.0) # XXX assume int() truncates!
f = (h * 6.0) - i
p, q, t = v * (1.0 - s), v * (1.0 - s * f), v * (1.0 - s * (1.0 - f))
i %= 6
if i == 0:
return (v, t, p)
if i == 1:
return (q, v, p)
if i == 2:
return (p, v, t)
if i == 3:
return (p, q, v)
if i == 4:
return (t, p, v)
if i == 5:
return (v, p, q)
def rgb_to_bgr(rgb):
return rgb[::-1]
def hue_to_bgr(hue, s=0.75, v=0.75):
assert hue >= 0 and hue <= 360
rgb = hsv_to_rgb(hue / 360.0, s, v)
rgb = [int(c * 255) for c in rgb]
return rgb_to_bgr(rgb)
def hsv_normalized_to_bgr(h, s, v):
assert 0 <= h <= 1.0
assert 0 <= s <= 1.0
assert 0 <= v <= 1.0
def h2r(h, s, v):
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(h, s, v))
return rgb_to_bgr(h2r(h, s, v))
# HSV was invented by <NAME> (cool!)
def test_code(t, e):
v = hsv_to_rgb(*t)
y = [int(s * 255) for s in v]
print(f"f({t}) = {y} ~= expected: {e}")
return y == e
if __name__ == "__main__":
# 45 = orange
test_code((45 / 360.0, 1.0, 0.5), [128, 96, 0])
# 45 = orange
test_code((45 / 360.0, 0.75, 0.5), [218, 165, 32])
# 157 = green
test_code((45 / 360.0, 1.0, 0.5), [128, 96, 0])
# 211 = blue
test_code((211 / 360.0, 1.0, 0.5), [0, 61, 128])
# 0, 100%, 100% = red
test_code((1, 1.0, 1.0), [255, 4, 0])
test_code((0 / 360.0, 1.0, 1.0), [255, 0, 0])
print(hue_to_bgr(45)) | en | 0.900019 | # XXX assume int() truncates! # HSV was invented by <NAME> (cool!) # 45 = orange # 45 = orange # 157 = green # 211 = blue # 0, 100%, 100% = red | 2.961321 | 3 |
database/scripts.py | binarybottle/mindboggle_sidelined | 3 | 6614672 | import os
from mbdb.upload import *
# set the project
# TODO need to index all projects with K/V pairs for easy search
#proj = list(Project.get_all())[0]
set_db_url(server="http://192.168.127.12:8182/graphs/mindboggle")
db = create_db('MindBoggleDB')
proj = create_project('MDD', db)
# get a list of the files
dataList = os.listdir('.')
for file in dataList:
subjectName = file.partition('_')[0]
subject = create_subject(subjectName, proj)
stats = read_stats(file)
set_fundus_stats(subject, stats) | import os
from mbdb.upload import *
# set the project
# TODO need to index all projects with K/V pairs for easy search
#proj = list(Project.get_all())[0]
set_db_url(server="http://192.168.127.12:8182/graphs/mindboggle")
db = create_db('MindBoggleDB')
proj = create_project('MDD', db)
# get a list of the files
dataList = os.listdir('.')
for file in dataList:
subjectName = file.partition('_')[0]
subject = create_subject(subjectName, proj)
stats = read_stats(file)
set_fundus_stats(subject, stats) | en | 0.528339 | # set the project # TODO need to index all projects with K/V pairs for easy search #proj = list(Project.get_all())[0] # get a list of the files | 2.187466 | 2 |
temperature/Raw Python/temperature-solved-NoahBeckerman.py | NoahBeckerman/data-prework-labs | 1 | 6614673 | <reponame>NoahBeckerman/data-prework-labs
import statistics
# assign a variable to the list of temperatures
temperatures_C = [33,66,65,0,59,60,62,64,70,76,80,81,80,83,90,79,61,53,50,49,53,48,45,39]
temperatures_F = []
high_temp = []
high_temp_hours = []
Lowest_temp = min(temperatures_C)
Highest_temp = max(temperatures_C)
#Function for mean
def mean(x):
return sum(x)/(len(x))
# 1. Calculate the minimum of the list and print the value using print()
print("Lowest Temperature:\n{0}\n".format(Lowest_temp))
# 2. Calculate the maximum of the list and print the value using print()
print("Highest Temperature:\n{0}\n".format(Highest_temp))
# 3. Items in the list that are greater than 70ºC and print the result
print("High temperatures: ")
for temperature in temperatures_C: # for each number in list
if temperature >= 70: # if temp is over or equal to 70ºC
high_temp.append(temperature) # add that temp to a list
for value in high_temp: # print list
print(value, end=' ',)
print("\n")
# 4. Calculate the mean temperature throughout the day and print the result
print("Average Temperature:\n{0}\n".format(mean(temperatures_C)))
# 5.1 Solve the fault in the sensor by estimating a value
Estimated_Temp = (temperatures_C[2]+temperatures_C[4])/2 #List starts at 3:00 according to graph. and to find the estimated avrg, add all and divide by total.
print("Estimated Temp at {0} :\n{1}\n".format('3:00', Estimated_Temp))
# 5.2 Update of the estimated value at 03:00 on the list
print("Updated Temperatures: ")
temperatures_C[3] = Estimated_Temp # update list
for value in temperatures_C: # print list
print(value, end=' ')
print("\n")
# Bonus: convert the list of ºC to ºFarenheit
print("Temperatures in Farenheit: ")
for temp in temperatures_C:
temperatures_F.append((1.8 * temp + 32))# add to list
for value in temperatures_F: # print list
print(value, end=' ')
print("\n")
# Print True or False depending on whether you would change the cooling system or not
if (len(high_temp) > 4 or Highest_temp > 80 or mean(temperatures_C) > 65): # if there is more than 4 hours of overcooling or temp reached over 80, or the avarage temp is past 65 change it.
print("Cooling Status: WARNING!!! CHANGE SYSTEM!!!")
else:
print("Cooling Status: Normal")
print("\n")
# 1. We want the hours (not the temperatures) whose temperature exceeds 70ºC
print("Hours of overheating: ")
for i, t in enumerate(temperatures_C):# for each temp in array
if t>=70: #if temp is over or = to 70
high_temp_hours.append(i) # add to list
for value in high_temp_hours: # print list
print(value, end=' ')
print("\n")
# 2. Condition that those hours are more than 4 consecutive and consecutive, not simply the sum of the whole set. Is this condition met?
hours_overheated_boolean = [True if t>=70 else False for t in temperatures_C] #creates a boolean that acts as a function/array to check if temp is over or = to 70 and sets it to true or false
for i, boolean in enumerate(hours_overheated_boolean): #for each value in boolean loop
Overheat = False # each time it checks set the value to false
if hours_overheated_boolean[i] == True and hours_overheated_boolean[i-1] == True and hours_overheated_boolean[i-2] == True and hours_overheated_boolean[i-3] == True: # if all numbers in a span of 4 are set to true (indicating overheat for more than 4 hours at at a time) output a value to respond
Overheat = True
break
print("Overheating for more that {0} hours: {1}".format(4, Overheat))
print("\n")
# 3. Average of each of the lists (ºC and ºF). How they relate?
print("Average of ºC: {0}\nAverage of ºF: {1}".format(mean(temperatures_C), mean(temperatures_F)))
print("\n")
print("The mean of ºC: {0}\n - (Rounded: {1})\nThe mean of ºF: {1}".format((1.8 * mean(temperatures_C) + 32), mean(temperatures_F), round(1.8 * mean(temperatures_C) + 32)))
# 4. Standard deviation of each of the lists. How they relate?
print("Standard Deviation for ºC: {0}".format(statistics.pstdev(temperatures_C)))
# Using imported statistics library from python to get the standard deviation.
print("Standard Deviation for ºF: {0}".format(statistics.pstdev(temperatures_F)))
# Using imported statistics library from python to get the standard deviation.
print("\n")
#The Relation between them after you multiply ºC by '1.8' (converting to ºF) is the same.
print(" - ºF: {0}\n - ºC: {1}\n - Difference: {2}".format((statistics.pstdev(temperatures_F)), (statistics.pstdev(temperatures_C) * 1.8), (statistics.pstdev(temperatures_F) - (statistics.pstdev(temperatures_C) * 1.8))))
#3/22/19 | import statistics
# assign a variable to the list of temperatures
temperatures_C = [33,66,65,0,59,60,62,64,70,76,80,81,80,83,90,79,61,53,50,49,53,48,45,39]
temperatures_F = []
high_temp = []
high_temp_hours = []
Lowest_temp = min(temperatures_C)
Highest_temp = max(temperatures_C)
#Function for mean
def mean(x):
return sum(x)/(len(x))
# 1. Calculate the minimum of the list and print the value using print()
print("Lowest Temperature:\n{0}\n".format(Lowest_temp))
# 2. Calculate the maximum of the list and print the value using print()
print("Highest Temperature:\n{0}\n".format(Highest_temp))
# 3. Items in the list that are greater than 70ºC and print the result
print("High temperatures: ")
for temperature in temperatures_C: # for each number in list
if temperature >= 70: # if temp is over or equal to 70ºC
high_temp.append(temperature) # add that temp to a list
for value in high_temp: # print list
print(value, end=' ',)
print("\n")
# 4. Calculate the mean temperature throughout the day and print the result
print("Average Temperature:\n{0}\n".format(mean(temperatures_C)))
# 5.1 Solve the fault in the sensor by estimating a value
Estimated_Temp = (temperatures_C[2]+temperatures_C[4])/2 #List starts at 3:00 according to graph. and to find the estimated avrg, add all and divide by total.
print("Estimated Temp at {0} :\n{1}\n".format('3:00', Estimated_Temp))
# 5.2 Update of the estimated value at 03:00 on the list
print("Updated Temperatures: ")
temperatures_C[3] = Estimated_Temp # update list
for value in temperatures_C: # print list
print(value, end=' ')
print("\n")
# Bonus: convert the list of ºC to ºFarenheit
print("Temperatures in Farenheit: ")
for temp in temperatures_C:
temperatures_F.append((1.8 * temp + 32))# add to list
for value in temperatures_F: # print list
print(value, end=' ')
print("\n")
# Print True or False depending on whether you would change the cooling system or not
if (len(high_temp) > 4 or Highest_temp > 80 or mean(temperatures_C) > 65): # if there is more than 4 hours of overcooling or temp reached over 80, or the avarage temp is past 65 change it.
print("Cooling Status: WARNING!!! CHANGE SYSTEM!!!")
else:
print("Cooling Status: Normal")
print("\n")
# 1. We want the hours (not the temperatures) whose temperature exceeds 70ºC
print("Hours of overheating: ")
for i, t in enumerate(temperatures_C):# for each temp in array
if t>=70: #if temp is over or = to 70
high_temp_hours.append(i) # add to list
for value in high_temp_hours: # print list
print(value, end=' ')
print("\n")
# 2. Condition that those hours are more than 4 consecutive and consecutive, not simply the sum of the whole set. Is this condition met?
hours_overheated_boolean = [True if t>=70 else False for t in temperatures_C] #creates a boolean that acts as a function/array to check if temp is over or = to 70 and sets it to true or false
for i, boolean in enumerate(hours_overheated_boolean): #for each value in boolean loop
Overheat = False # each time it checks set the value to false
if hours_overheated_boolean[i] == True and hours_overheated_boolean[i-1] == True and hours_overheated_boolean[i-2] == True and hours_overheated_boolean[i-3] == True: # if all numbers in a span of 4 are set to true (indicating overheat for more than 4 hours at at a time) output a value to respond
Overheat = True
break
print("Overheating for more that {0} hours: {1}".format(4, Overheat))
print("\n")
# 3. Average of each of the lists (ºC and ºF). How they relate?
print("Average of ºC: {0}\nAverage of ºF: {1}".format(mean(temperatures_C), mean(temperatures_F)))
print("\n")
print("The mean of ºC: {0}\n - (Rounded: {1})\nThe mean of ºF: {1}".format((1.8 * mean(temperatures_C) + 32), mean(temperatures_F), round(1.8 * mean(temperatures_C) + 32)))
# 4. Standard deviation of each of the lists. How they relate?
print("Standard Deviation for ºC: {0}".format(statistics.pstdev(temperatures_C)))
# Using imported statistics library from python to get the standard deviation.
print("Standard Deviation for ºF: {0}".format(statistics.pstdev(temperatures_F)))
# Using imported statistics library from python to get the standard deviation.
print("\n")
#The Relation between them after you multiply ºC by '1.8' (converting to ºF) is the same.
print(" - ºF: {0}\n - ºC: {1}\n - Difference: {2}".format((statistics.pstdev(temperatures_F)), (statistics.pstdev(temperatures_C) * 1.8), (statistics.pstdev(temperatures_F) - (statistics.pstdev(temperatures_C) * 1.8))))
#3/22/19 | en | 0.861333 | # assign a variable to the list of temperatures #Function for mean # 1. Calculate the minimum of the list and print the value using print() # 2. Calculate the maximum of the list and print the value using print() # 3. Items in the list that are greater than 70ºC and print the result # for each number in list # if temp is over or equal to 70ºC # add that temp to a list # print list # 4. Calculate the mean temperature throughout the day and print the result # 5.1 Solve the fault in the sensor by estimating a value #List starts at 3:00 according to graph. and to find the estimated avrg, add all and divide by total. # 5.2 Update of the estimated value at 03:00 on the list # update list # print list # Bonus: convert the list of ºC to ºFarenheit # add to list # print list # Print True or False depending on whether you would change the cooling system or not # if there is more than 4 hours of overcooling or temp reached over 80, or the avarage temp is past 65 change it. # 1. We want the hours (not the temperatures) whose temperature exceeds 70ºC # for each temp in array #if temp is over or = to 70 # add to list # print list # 2. Condition that those hours are more than 4 consecutive and consecutive, not simply the sum of the whole set. Is this condition met? #creates a boolean that acts as a function/array to check if temp is over or = to 70 and sets it to true or false #for each value in boolean loop # each time it checks set the value to false # if all numbers in a span of 4 are set to true (indicating overheat for more than 4 hours at at a time) output a value to respond # 3. Average of each of the lists (ºC and ºF). How they relate? # 4. Standard deviation of each of the lists. How they relate? # Using imported statistics library from python to get the standard deviation. # Using imported statistics library from python to get the standard deviation. #The Relation between them after you multiply ºC by '1.8' (converting to ºF) is the same. #3/22/19 | 4.16375 | 4 |
aula14/exercicio1.py | ArseniumGX/bluemer-modulo1-python | 0 | 6614674 | <reponame>ArseniumGX/bluemer-modulo1-python
# 1. Faça um programa, com uma função que necessite de três argumentos, e que forneça a
# soma desses três argumentos.
def somaTres(values:list):
return sum(values)
numeros = [int(input('Value 1: ')), int(input('Value 2: ')), int(input('Value 3: '))]
print(somaTres(numeros))
| # 1. Faça um programa, com uma função que necessite de três argumentos, e que forneça a
# soma desses três argumentos.
def somaTres(values:list):
return sum(values)
numeros = [int(input('Value 1: ')), int(input('Value 2: ')), int(input('Value 3: '))]
print(somaTres(numeros)) | pt | 0.99747 | # 1. Faça um programa, com uma função que necessite de três argumentos, e que forneça a # soma desses três argumentos. | 4.10492 | 4 |
Python/crap.py | shujanpannag/Random_Programs | 0 | 6614675 | <gh_stars>0
def word(s,l):
a = []
for x in range((len(s)//l)+1):
a.append(s[x:x+l])
return a
s = []
for i in range(len('bbbbb')):
s.extend(word('bbbbb', i+1))
# s = set(s)
s = sorted(s)
s.pop(0)
print(s)
| def word(s,l):
a = []
for x in range((len(s)//l)+1):
a.append(s[x:x+l])
return a
s = []
for i in range(len('bbbbb')):
s.extend(word('bbbbb', i+1))
# s = set(s)
s = sorted(s)
s.pop(0)
print(s) | it | 0.481465 | # s = set(s) | 3.537061 | 4 |
handlers/connectionRequests.py | GrahamGoudeau/mcg-portal | 1 | 6614676 | <reponame>GrahamGoudeau/mcg-portal
class ConnectionRequestsHandler:
def __init__(self, db, logger):
self.db = db
self.logger = logger
def make_request(self, userID, requesteeID, message):
self.logger.info('User %s is creating request to connect with %s', userID, requesteeID)
self.db.create_request(userID, requesteeID, message)
def mark_resolved(self, connectionRequestId):
self.logger.info('Admin is resolving connection request')
self.db.resolveRequest(connectionRequestId)
def getAllRequests(self):
self.logger.info("Loading all connection requests")
return self.db.getAllConnectionRequests()
| class ConnectionRequestsHandler:
def __init__(self, db, logger):
self.db = db
self.logger = logger
def make_request(self, userID, requesteeID, message):
self.logger.info('User %s is creating request to connect with %s', userID, requesteeID)
self.db.create_request(userID, requesteeID, message)
def mark_resolved(self, connectionRequestId):
self.logger.info('Admin is resolving connection request')
self.db.resolveRequest(connectionRequestId)
def getAllRequests(self):
self.logger.info("Loading all connection requests")
return self.db.getAllConnectionRequests() | none | 1 | 2.688772 | 3 | |
bot/near.py | IgorFroehner/NearBrl_TwitterBot | 3 | 6614677 | from decouple import config
from requests import Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
import json
class Near:
def __init__(self):
self.crypto_symbol = 'NEAR'
self.currency = config('CURRENCY_TO_CONVERT')
def getData(self):
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest'
parameters = {
'convert': self.currency,
'symbol': self.crypto_symbol
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': config('CMC_API_KEY'),
}
session = Session()
session.headers.update(headers)
try:
response = session.get(url, params=parameters)
data = json.loads(response.text)
if data['status']['error_code'] != 0:
raise Exception('Error while retrieving data from the API')
else:
return {
'price': data['data'][self.crypto_symbol]['quote'][self.currency]['price'],
'percent_change_24h': data['data'][self.crypto_symbol]['quote'][self.currency]['percent_change_24h']
}
except (ConnectionError, Timeout, TooManyRedirects) as e:
raise Exception('Error while trying to get the price and percentage: ' + e.reason)
| from decouple import config
from requests import Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
import json
class Near:
def __init__(self):
self.crypto_symbol = 'NEAR'
self.currency = config('CURRENCY_TO_CONVERT')
def getData(self):
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest'
parameters = {
'convert': self.currency,
'symbol': self.crypto_symbol
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': config('CMC_API_KEY'),
}
session = Session()
session.headers.update(headers)
try:
response = session.get(url, params=parameters)
data = json.loads(response.text)
if data['status']['error_code'] != 0:
raise Exception('Error while retrieving data from the API')
else:
return {
'price': data['data'][self.crypto_symbol]['quote'][self.currency]['price'],
'percent_change_24h': data['data'][self.crypto_symbol]['quote'][self.currency]['percent_change_24h']
}
except (ConnectionError, Timeout, TooManyRedirects) as e:
raise Exception('Error while trying to get the price and percentage: ' + e.reason)
| none | 1 | 2.747519 | 3 | |
test/test_digester.py | flrt/ref-rpps-ne | 5 | 6614678 | import logging
import unittest
import digester
from easy_atom import helpers
class TestDigester(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger("utest")
def test_equal_data(self):
self.logger.info(" TEST test_equal_data")
d = digester.Digester()
dig1 = d.digest("test/files/PS_LibreAcces_Dipl_AutExerc_201807300827.txt")
self.logger.info(len(dig1))
dig2 = d.load_digest(
"test/files/PS_LibreAcces_Dipl_AutExerc_201807300827.txt.sha"
)
self.logger.info(len(dig2))
self.assertEqual(len(dig1), len(dig2))
if __name__ == "__main__":
loggers = helpers.stdout_logger(["utest", "digester"], logging.INFO)
unittest.main()
| import logging
import unittest
import digester
from easy_atom import helpers
class TestDigester(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger("utest")
def test_equal_data(self):
self.logger.info(" TEST test_equal_data")
d = digester.Digester()
dig1 = d.digest("test/files/PS_LibreAcces_Dipl_AutExerc_201807300827.txt")
self.logger.info(len(dig1))
dig2 = d.load_digest(
"test/files/PS_LibreAcces_Dipl_AutExerc_201807300827.txt.sha"
)
self.logger.info(len(dig2))
self.assertEqual(len(dig1), len(dig2))
if __name__ == "__main__":
loggers = helpers.stdout_logger(["utest", "digester"], logging.INFO)
unittest.main()
| none | 1 | 2.870964 | 3 | |
ValveAnnulusAnalysis/HeartValveLib/__init__.py | SlicerHeart/SlicerHeart | 48 | 6614679 | # For relative imports to work in Python 3.6
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from LeafletModel import *
from CoaptationModel import *
from PapillaryModel import *
from SmoothCurve import *
from ValveModel import *
from HeartValves import *
from ValveRoi import *
from Constants import * | # For relative imports to work in Python 3.6
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from LeafletModel import *
from CoaptationModel import *
from PapillaryModel import *
from SmoothCurve import *
from ValveModel import *
from HeartValves import *
from ValveRoi import *
from Constants import * | en | 0.755525 | # For relative imports to work in Python 3.6 | 1.473965 | 1 |
datacheck5_morningStar.py | SamhooXee/k | 0 | 6614680 |
def dataCheck_morningStar(datalist):
m = datalist[0]
m1 = datalist[1]
m2 = datalist[2]
m3 = datalist[3]
m4 = datalist[4]
# if m['color']=='red' and m1['color']=='green' and m2['color']=='green':
if m2['instance_low'] > m1['instance_high'] and m2['instance_low'] < m['instance_high']:
if m['instance_low'] > m1['instance_high']:
if m2['instance_low'] < m3['instance_low'] and m['color'] == 'red':
if m3['instance_low'] < m4['instance_low']:
return (True, 'BOTTOM,%f,' % (m['Close']))
return (False, 'NULL') |
def dataCheck_morningStar(datalist):
m = datalist[0]
m1 = datalist[1]
m2 = datalist[2]
m3 = datalist[3]
m4 = datalist[4]
# if m['color']=='red' and m1['color']=='green' and m2['color']=='green':
if m2['instance_low'] > m1['instance_high'] and m2['instance_low'] < m['instance_high']:
if m['instance_low'] > m1['instance_high']:
if m2['instance_low'] < m3['instance_low'] and m['color'] == 'red':
if m3['instance_low'] < m4['instance_low']:
return (True, 'BOTTOM,%f,' % (m['Close']))
return (False, 'NULL') | en | 0.276601 | # if m['color']=='red' and m1['color']=='green' and m2['color']=='green': | 3.004167 | 3 |
tx_salaries/utils/transformers/ut_brownsville.py | texastribune/tx_salaries | 6 | 6614681 | from datetime import date
from . import base
from . import mixins
# http://raw.texastribune.org.s3.amazonaws.com/ut_brownsville/salaries/2014-01/PIR%20662.xlsx
class TransformedRecord(mixins.GenericCompensationMixin,
mixins.GenericIdentifierMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord):
MAP = {
'last_name': '<NAME>',
'first_name': '<NAME>',
'middle_name': '<NAME>',
'department': 'Department',
'job_title': 'Title',
'hire_date': 'Hire Date',
'compensation': 'Annualized',
'race': 'Race',
'gender': 'Gender'
}
NAME_FIELDS = ('first_name', 'last_name', )
ORGANIZATION_NAME = 'University of Texas at Brownsville'
ORGANIZATION_CLASSIFICATION = 'University'
# TODO not given on spreadsheet, but they appear to give part time
compensation_type = 'FT'
description = 'Annual compensation'
DATE_PROVIDED = date(2014, 1, 24)
URL = 'http://raw.texastribune.org.s3.amazonaws.com/ut_brownsville/salaries/2014-01/PIR%20662.xlsx'
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.last_name.strip() != ''
@property
def identifier(self):
"""
Identifier for UT Brownsville
"""
excluded = [self.department_key, self.job_title_key,
self.hire_date_key, self.compensation_key]
return {
'scheme': 'tx_salaries_hash',
'identifier': base.create_hash_for_record(self.data,
exclude=excluded)
}
transform = base.transform_factory(TransformedRecord)
| from datetime import date
from . import base
from . import mixins
# http://raw.texastribune.org.s3.amazonaws.com/ut_brownsville/salaries/2014-01/PIR%20662.xlsx
class TransformedRecord(mixins.GenericCompensationMixin,
mixins.GenericIdentifierMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord):
MAP = {
'last_name': '<NAME>',
'first_name': '<NAME>',
'middle_name': '<NAME>',
'department': 'Department',
'job_title': 'Title',
'hire_date': 'Hire Date',
'compensation': 'Annualized',
'race': 'Race',
'gender': 'Gender'
}
NAME_FIELDS = ('first_name', 'last_name', )
ORGANIZATION_NAME = 'University of Texas at Brownsville'
ORGANIZATION_CLASSIFICATION = 'University'
# TODO not given on spreadsheet, but they appear to give part time
compensation_type = 'FT'
description = 'Annual compensation'
DATE_PROVIDED = date(2014, 1, 24)
URL = 'http://raw.texastribune.org.s3.amazonaws.com/ut_brownsville/salaries/2014-01/PIR%20662.xlsx'
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.last_name.strip() != ''
@property
def identifier(self):
"""
Identifier for UT Brownsville
"""
excluded = [self.department_key, self.job_title_key,
self.hire_date_key, self.compensation_key]
return {
'scheme': 'tx_salaries_hash',
'identifier': base.create_hash_for_record(self.data,
exclude=excluded)
}
transform = base.transform_factory(TransformedRecord)
| en | 0.762383 | # http://raw.texastribune.org.s3.amazonaws.com/ut_brownsville/salaries/2014-01/PIR%20662.xlsx # TODO not given on spreadsheet, but they appear to give part time # Adjust to return False on invalid fields. For example: Identifier for UT Brownsville | 2.547866 | 3 |
neural_network/load_dataset.py | carlatt/Cart-Pole-NN | 0 | 6614682 | import numpy as np
import pandas as pd
def load_data_k_plus(u_file, y_file, i):
x, y = load_data(u_file, y_file)
x = x[:len(x) - i]
y = y[i:]
return x, y
def load_data(u_file, y_file):
u = pd.read_csv(u_file)
y = pd.read_csv(y_file)
u = u.values
y = y.values
y.transpose()
u = np.reshape(u, (1, -1)).transpose()
y = y.transpose()
y = np.reshape(y, (4, -1)).transpose()
x = np.concatenate((u, y), axis=1)
x = np.delete(x, len(x) - 1, axis=0)
y = np.delete(y, 0, axis=0)
return x, y
| import numpy as np
import pandas as pd
def load_data_k_plus(u_file, y_file, i):
x, y = load_data(u_file, y_file)
x = x[:len(x) - i]
y = y[i:]
return x, y
def load_data(u_file, y_file):
u = pd.read_csv(u_file)
y = pd.read_csv(y_file)
u = u.values
y = y.values
y.transpose()
u = np.reshape(u, (1, -1)).transpose()
y = y.transpose()
y = np.reshape(y, (4, -1)).transpose()
x = np.concatenate((u, y), axis=1)
x = np.delete(x, len(x) - 1, axis=0)
y = np.delete(y, 0, axis=0)
return x, y
| none | 1 | 2.874939 | 3 | |
tekstovni_vmesnik.py | abramlaura/Vislice1 | 0 | 6614683 | def izpis_igre(igra):
return """===================================================================
{geslo}
Napacne crke : {napacne_crke}
Ugibaš še : {število} -krat.
==============================================""".format(
geslo=igra.pravilni_del_gesla(),
crke=igra.nepravilni_ugibi(),
stevilo=model.STEVILO_DOVOLJENIH_NAPAK - igra.stevilo_napak())
def izpis_zmaga(igra):
return 'Čestitam, uganil/a si geslo {}.'.format(igra)
def izpis_poraza(igra):
return 'Več sreče prihodnjič.'
def zahtevaj_vnost():
return input('Ugibaj:')
def pozeni_vmesnik():
igra = model.nova_igra() #poklicemo funkcijo iz datoteke model
while True: #neskoncna zanka
print(izpis_igre(igra))
crka = zahtevaj_vnos()
stanje =igra.ugibaj(crka)
if stanje == model.ZMAGA:
print(izpis_zmage(igra))
break
elif stanje == model.PORAZ:
print(izpis_poraza(igra))
break
pozeni_vmesnik()
| def izpis_igre(igra):
return """===================================================================
{geslo}
Napacne crke : {napacne_crke}
Ugibaš še : {število} -krat.
==============================================""".format(
geslo=igra.pravilni_del_gesla(),
crke=igra.nepravilni_ugibi(),
stevilo=model.STEVILO_DOVOLJENIH_NAPAK - igra.stevilo_napak())
def izpis_zmaga(igra):
return 'Čestitam, uganil/a si geslo {}.'.format(igra)
def izpis_poraza(igra):
return 'Več sreče prihodnjič.'
def zahtevaj_vnost():
return input('Ugibaj:')
def pozeni_vmesnik():
igra = model.nova_igra() #poklicemo funkcijo iz datoteke model
while True: #neskoncna zanka
print(izpis_igre(igra))
crka = zahtevaj_vnos()
stanje =igra.ugibaj(crka)
if stanje == model.ZMAGA:
print(izpis_zmage(igra))
break
elif stanje == model.PORAZ:
print(izpis_poraza(igra))
break
pozeni_vmesnik()
| fr | 0.21457 | =================================================================== {geslo} Napacne crke : {napacne_crke} Ugibaš še : {število} -krat. ============================================== #poklicemo funkcijo iz datoteke model #neskoncna zanka | 2.668477 | 3 |
taxes/views.py | wsoliveira/borsocontrole | 0 | 6614684 | <reponame>wsoliveira/borsocontrole
from django.shortcuts import render, redirect, get_object_or_404, get_list_or_404
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from decouple import config
from .models import bc_tax, bc_tax_negotiation
from .forms import TaxForm
from administrators.models import bc_admin_type_negotiation
# Create your views here.
def genericCalculates(type_negotiation, id_negotiation, gross_total_price, type_investiment):
"""
Se type_negotiation == SELL
Se existir outra negociacao de compra no mesmo dia para o (company_code e type_investiment=Stock or Options):
Se existir rentabilidade calcular aliquota sob a rentabilidade.
como vou conseguir o calculo de rentabilidade ?
"""
lst_taxes_names = bc_admin_type_negotiation.objects.filter(name=type_negotiation)
sum_results = 0
for name in lst_taxes_names:
try:
lst_taxs = bc_tax.objects.filter(bc_admin_type_negotiation=name, is_active=True, bc_admin_type_investiment__name=type_investiment)
except ObjectDoesNotExist:
lst_taxs = []
for tax in lst_taxs:
discounted_price = (float(gross_total_price) * (float(tax.value) / 100))
bc_tax_negotiation.objects.update_or_create(
bc_tax=tax,
bc_negotiation=id_negotiation,
defaults={
'discounted_price':float(discounted_price)
}
)
sum_results += discounted_price
return float(sum_results)
@login_required
def tax_list(request):
name = request.GET.get("search", None)
page = request.GET.get('page', 1)
if name:
tb_values = bc_tax.objects.filter(name__icontains=name)
else:
tb_values = bc_tax.objects.all()
paginator = Paginator(tb_values, config('LIMIT_PAGINATION',default=15,cast=int))
try:
tb_values = paginator.page(page)
except PageNotAnInteger:
tb_values = paginator.page(1)
except EmptyPage:
tb_values = paginator.page(paginator.num_pages)
return render(request, 'tax.html', {'tb_values': tb_values})
@login_required
def tax_new(request):
form = TaxForm(request.POST or None, request.FILES or None)
if form.is_valid():
is_active = False
if request.POST.get('is_active') == "on":
is_active = True
tb_values = bc_tax.objects.create(
name=request.POST.get('name'),
value = request.POST.get('value'),
description = request.POST.get('description'),
is_active = is_active,
)
tb_values.save()
return redirect('tax_list')
return render(request, 'tax_form.html', {'form': form})
@login_required
def tax_update(request, id):
tb_values = get_object_or_404(bc_tax, pk=id)
form = TaxForm(request.POST or None, request.FILES or None, instance=tb_values)
if form.is_valid():
form.save()
return redirect('tax_list')
return render(request, 'tax_form.html', {'form': form})
@login_required
def tax_delete(request, id):
tb_values = get_object_or_404(bc_tax, pk=id)
if request.method == "POST":
tb_values.delete()
return redirect('tax_list')
return render(request, 'tax_delete_confirm.html', {'tb_values': tb_values})
@login_required
def tax_negotiation_list(request, id_negotiation):
tb_values = bc_tax_negotiation.objects.filter(bc_negotiation__id=id_negotiation)
return render(request, 'tax_negotiation.html', {'tb_values': tb_values}) | from django.shortcuts import render, redirect, get_object_or_404, get_list_or_404
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from decouple import config
from .models import bc_tax, bc_tax_negotiation
from .forms import TaxForm
from administrators.models import bc_admin_type_negotiation
# Create your views here.
def genericCalculates(type_negotiation, id_negotiation, gross_total_price, type_investiment):
"""
Se type_negotiation == SELL
Se existir outra negociacao de compra no mesmo dia para o (company_code e type_investiment=Stock or Options):
Se existir rentabilidade calcular aliquota sob a rentabilidade.
como vou conseguir o calculo de rentabilidade ?
"""
lst_taxes_names = bc_admin_type_negotiation.objects.filter(name=type_negotiation)
sum_results = 0
for name in lst_taxes_names:
try:
lst_taxs = bc_tax.objects.filter(bc_admin_type_negotiation=name, is_active=True, bc_admin_type_investiment__name=type_investiment)
except ObjectDoesNotExist:
lst_taxs = []
for tax in lst_taxs:
discounted_price = (float(gross_total_price) * (float(tax.value) / 100))
bc_tax_negotiation.objects.update_or_create(
bc_tax=tax,
bc_negotiation=id_negotiation,
defaults={
'discounted_price':float(discounted_price)
}
)
sum_results += discounted_price
return float(sum_results)
@login_required
def tax_list(request):
name = request.GET.get("search", None)
page = request.GET.get('page', 1)
if name:
tb_values = bc_tax.objects.filter(name__icontains=name)
else:
tb_values = bc_tax.objects.all()
paginator = Paginator(tb_values, config('LIMIT_PAGINATION',default=15,cast=int))
try:
tb_values = paginator.page(page)
except PageNotAnInteger:
tb_values = paginator.page(1)
except EmptyPage:
tb_values = paginator.page(paginator.num_pages)
return render(request, 'tax.html', {'tb_values': tb_values})
@login_required
def tax_new(request):
form = TaxForm(request.POST or None, request.FILES or None)
if form.is_valid():
is_active = False
if request.POST.get('is_active') == "on":
is_active = True
tb_values = bc_tax.objects.create(
name=request.POST.get('name'),
value = request.POST.get('value'),
description = request.POST.get('description'),
is_active = is_active,
)
tb_values.save()
return redirect('tax_list')
return render(request, 'tax_form.html', {'form': form})
@login_required
def tax_update(request, id):
tb_values = get_object_or_404(bc_tax, pk=id)
form = TaxForm(request.POST or None, request.FILES or None, instance=tb_values)
if form.is_valid():
form.save()
return redirect('tax_list')
return render(request, 'tax_form.html', {'form': form})
@login_required
def tax_delete(request, id):
tb_values = get_object_or_404(bc_tax, pk=id)
if request.method == "POST":
tb_values.delete()
return redirect('tax_list')
return render(request, 'tax_delete_confirm.html', {'tb_values': tb_values})
@login_required
def tax_negotiation_list(request, id_negotiation):
tb_values = bc_tax_negotiation.objects.filter(bc_negotiation__id=id_negotiation)
return render(request, 'tax_negotiation.html', {'tb_values': tb_values}) | pt | 0.75699 | # Create your views here. Se type_negotiation == SELL Se existir outra negociacao de compra no mesmo dia para o (company_code e type_investiment=Stock or Options): Se existir rentabilidade calcular aliquota sob a rentabilidade. como vou conseguir o calculo de rentabilidade ? | 2.085634 | 2 |
test cases/common/69 configure file in custom target/src/mycompiler.py | kira78/meson | 4,047 | 6614685 | <filename>test cases/common/69 configure file in custom target/src/mycompiler.py<gh_stars>1000+
#!/usr/bin/env python3
import sys
with open(sys.argv[1]) as ifile:
if ifile.readline().strip() != '42':
print('Incorrect input')
with open(sys.argv[2], 'w') as ofile:
ofile.write('Success\n')
| <filename>test cases/common/69 configure file in custom target/src/mycompiler.py<gh_stars>1000+
#!/usr/bin/env python3
import sys
with open(sys.argv[1]) as ifile:
if ifile.readline().strip() != '42':
print('Incorrect input')
with open(sys.argv[2], 'w') as ofile:
ofile.write('Success\n')
| fr | 0.221828 | #!/usr/bin/env python3 | 2.345969 | 2 |
multi_affine/utils.py | wapbastiaansen/multi-atlas-seg-reg | 0 | 6614686 | import numpy as np
import glob
import os
import nibabel as nib
from shutil import copyfile
def load_multi_atlas(atlas_dir, atlas_list, output_atlas, output_age, output_seg):
"""
Loads for given directory all atlas files, their segmentations and
corresponding GA in the same order.
Args:
atlas_dir: directory with all atlas images
atlas_list: list of predictnr of atlases to include
output_atlas: output atlas images
output_age: output GA
output_seg: output segmentations
Returns:
atlasses: numpy array containing all atlases
Age: list containing all GA
segs: numpy array containing all segmenations
atlas_files: list of filenames
A_t: ground truth top landmark
A_b: ground truth bottom landmark
"""
atlas_files = glob.glob(os.path.join(atlas_dir, '*.npz'))
atlas_files, Age = sort_and_select_atlas(atlas_files, atlas_list, output_age)
i=0
for file in atlas_files:
i+=1
if output_atlas == True:
atlas_vol = np.load(file)['vol'][np.newaxis, ..., np.newaxis]
atlas_vol=atlas_vol.astype('float32')
if i==1:
atlasses=atlas_vol
else:
atlasses=np.concatenate([atlasses,atlas_vol],axis=0)
else:
atlasses=[]
if output_seg == True:
seg_vol = nib.load(atlas_dir+'/seg/seg_'+file.split(atlas_dir+'/atlas_')[1].split('.npz')[0]+'.nii.gz').get_fdata()[np.newaxis,...,np.newaxis]
if i==1:
segs=seg_vol
else:
segs=np.concatenate([segs,seg_vol],axis=0)
else:
segs=[]
A_t = np.load(atlas_dir+'/landmark/ground_truth_landmark.npz')['A_t']
A_b = np.load(atlas_dir+'/landmark/ground_truth_landmark.npz')['A_b']
return atlasses, segs, Age, atlas_files, A_t, A_b
def select_on_GA(vol_names,week_nr):
"""
Selects from list of files the files with the right week number.
Args:
vol_names: list with all files
week_nr: the week number we wish to select
Returns:
matching: list with selected images
"""
matching = [s for s in vol_names if '_US_'+week_nr in s]
return matching
def summary_experiment(directory,parameter_values, parameter_names):
"""
Function that creates a text file with information about an experiment.
Args:
directory: directory where to save the summary
parameter_values: values of the parameters used for the experiment
parameter_names: names of the parameters summarized
Returns:
summary_experiment.txt file with format: parameter_name[i]: parameter_value[i] *new_line*
"""
assert len(parameter_values) == len(parameter_names)
params = create_dictionairy(parameter_values, parameter_names)
text_file = open(directory + '/summary_experiment.txt','w+')
for var in params:
text_file.write(str(var) + ': ' + str(params[var]) + '\n')
def create_dictionairy(variables,names):
"""
Function to create a dictionary with as keys: names and as values: variables.
"""
params = {}
for i in range(0,len(variables)):
params[names[i]] = variables[i]
return params
def get_predict_nr(file_ext):
"""
Function to get the predictnumber out of a file name.
"""
name = os.path.basename(file_ext)
if 'atlas' not in name:
predictnr = name[:5]
else:
predictnr = name.split('atlas_')[1][:5]
return predictnr
def sort_and_select_atlas(atlas_files, atlas_list, output_age):
"""
Function to sort list of atlas files based on GA and select based on atlas_list.
Args:
atlas_files: list of all available atlases
atlas_list: list of predict numbers we will use
output_age: bool variable if we will output the age
Returns:
atlas_files: list of all select atlases that is sorted based on GA
Age: (n,1) np array with gestational ages, sorted.
"""
Age = []
select_files = []
for file in atlas_files:
predictnr = get_predict_nr(file)
if predictnr in atlas_list:
age=np.load(file)['GA']
Age.append(int(age))
select_files.append(file)
sort_index=np.argsort(Age)
Age = np.take_along_axis(np.array(Age), sort_index, axis=0)
Age = Age.reshape((len(Age),1))
atlas_files = list(np.take_along_axis(np.array(select_files), sort_index, axis=0))
if output_age == False:
Age = []
return atlas_files, Age
def copy_anno_files(old_dir, new_dir):
"""
Function to copy annotation files from old_dir to new_dir
"""
files = os.listdir(old_dir)
for file in files:
if '_annotation.npz' in file:
copyfile(old_dir +'/' + file, new_dir +'/' + file)
| import numpy as np
import glob
import os
import nibabel as nib
from shutil import copyfile
def load_multi_atlas(atlas_dir, atlas_list, output_atlas, output_age, output_seg):
"""
Loads for given directory all atlas files, their segmentations and
corresponding GA in the same order.
Args:
atlas_dir: directory with all atlas images
atlas_list: list of predictnr of atlases to include
output_atlas: output atlas images
output_age: output GA
output_seg: output segmentations
Returns:
atlasses: numpy array containing all atlases
Age: list containing all GA
segs: numpy array containing all segmenations
atlas_files: list of filenames
A_t: ground truth top landmark
A_b: ground truth bottom landmark
"""
atlas_files = glob.glob(os.path.join(atlas_dir, '*.npz'))
atlas_files, Age = sort_and_select_atlas(atlas_files, atlas_list, output_age)
i=0
for file in atlas_files:
i+=1
if output_atlas == True:
atlas_vol = np.load(file)['vol'][np.newaxis, ..., np.newaxis]
atlas_vol=atlas_vol.astype('float32')
if i==1:
atlasses=atlas_vol
else:
atlasses=np.concatenate([atlasses,atlas_vol],axis=0)
else:
atlasses=[]
if output_seg == True:
seg_vol = nib.load(atlas_dir+'/seg/seg_'+file.split(atlas_dir+'/atlas_')[1].split('.npz')[0]+'.nii.gz').get_fdata()[np.newaxis,...,np.newaxis]
if i==1:
segs=seg_vol
else:
segs=np.concatenate([segs,seg_vol],axis=0)
else:
segs=[]
A_t = np.load(atlas_dir+'/landmark/ground_truth_landmark.npz')['A_t']
A_b = np.load(atlas_dir+'/landmark/ground_truth_landmark.npz')['A_b']
return atlasses, segs, Age, atlas_files, A_t, A_b
def select_on_GA(vol_names,week_nr):
"""
Selects from list of files the files with the right week number.
Args:
vol_names: list with all files
week_nr: the week number we wish to select
Returns:
matching: list with selected images
"""
matching = [s for s in vol_names if '_US_'+week_nr in s]
return matching
def summary_experiment(directory,parameter_values, parameter_names):
"""
Function that creates a text file with information about an experiment.
Args:
directory: directory where to save the summary
parameter_values: values of the parameters used for the experiment
parameter_names: names of the parameters summarized
Returns:
summary_experiment.txt file with format: parameter_name[i]: parameter_value[i] *new_line*
"""
assert len(parameter_values) == len(parameter_names)
params = create_dictionairy(parameter_values, parameter_names)
text_file = open(directory + '/summary_experiment.txt','w+')
for var in params:
text_file.write(str(var) + ': ' + str(params[var]) + '\n')
def create_dictionairy(variables,names):
"""
Function to create a dictionary with as keys: names and as values: variables.
"""
params = {}
for i in range(0,len(variables)):
params[names[i]] = variables[i]
return params
def get_predict_nr(file_ext):
"""
Function to get the predictnumber out of a file name.
"""
name = os.path.basename(file_ext)
if 'atlas' not in name:
predictnr = name[:5]
else:
predictnr = name.split('atlas_')[1][:5]
return predictnr
def sort_and_select_atlas(atlas_files, atlas_list, output_age):
"""
Function to sort list of atlas files based on GA and select based on atlas_list.
Args:
atlas_files: list of all available atlases
atlas_list: list of predict numbers we will use
output_age: bool variable if we will output the age
Returns:
atlas_files: list of all select atlases that is sorted based on GA
Age: (n,1) np array with gestational ages, sorted.
"""
Age = []
select_files = []
for file in atlas_files:
predictnr = get_predict_nr(file)
if predictnr in atlas_list:
age=np.load(file)['GA']
Age.append(int(age))
select_files.append(file)
sort_index=np.argsort(Age)
Age = np.take_along_axis(np.array(Age), sort_index, axis=0)
Age = Age.reshape((len(Age),1))
atlas_files = list(np.take_along_axis(np.array(select_files), sort_index, axis=0))
if output_age == False:
Age = []
return atlas_files, Age
def copy_anno_files(old_dir, new_dir):
"""
Function to copy annotation files from old_dir to new_dir
"""
files = os.listdir(old_dir)
for file in files:
if '_annotation.npz' in file:
copyfile(old_dir +'/' + file, new_dir +'/' + file)
| en | 0.671699 | Loads for given directory all atlas files, their segmentations and corresponding GA in the same order. Args: atlas_dir: directory with all atlas images atlas_list: list of predictnr of atlases to include output_atlas: output atlas images output_age: output GA output_seg: output segmentations Returns: atlasses: numpy array containing all atlases Age: list containing all GA segs: numpy array containing all segmenations atlas_files: list of filenames A_t: ground truth top landmark A_b: ground truth bottom landmark Selects from list of files the files with the right week number. Args: vol_names: list with all files week_nr: the week number we wish to select Returns: matching: list with selected images Function that creates a text file with information about an experiment. Args: directory: directory where to save the summary parameter_values: values of the parameters used for the experiment parameter_names: names of the parameters summarized Returns: summary_experiment.txt file with format: parameter_name[i]: parameter_value[i] *new_line* Function to create a dictionary with as keys: names and as values: variables. Function to get the predictnumber out of a file name. Function to sort list of atlas files based on GA and select based on atlas_list. Args: atlas_files: list of all available atlases atlas_list: list of predict numbers we will use output_age: bool variable if we will output the age Returns: atlas_files: list of all select atlases that is sorted based on GA Age: (n,1) np array with gestational ages, sorted. Function to copy annotation files from old_dir to new_dir | 2.560976 | 3 |
array/height_checker.py | elenaborisova/LeetCode-Solutions | 0 | 6614687 | <reponame>elenaborisova/LeetCode-Solutions
def height_checker(heights):
expected = sorted(heights)
indices_mismatch = 0
for i in range(len(heights)):
if not heights[i] == expected[i]:
indices_mismatch += 1
return indices_mismatch
print(height_checker([1, 1, 4, 2, 1, 3]))
print(height_checker([5, 1, 2, 3, 4]))
print(height_checker([1, 2, 3, 4, 5]))
| def height_checker(heights):
expected = sorted(heights)
indices_mismatch = 0
for i in range(len(heights)):
if not heights[i] == expected[i]:
indices_mismatch += 1
return indices_mismatch
print(height_checker([1, 1, 4, 2, 1, 3]))
print(height_checker([5, 1, 2, 3, 4]))
print(height_checker([1, 2, 3, 4, 5])) | none | 1 | 3.781784 | 4 | |
scripts/parse_dad.py | malaterre/dicom-private-dicts | 6 | 6614688 | #!/usr/bin/env python
""" parse """
# $ ./parse_dad.py re/pms/merge119_120.dad re/pms/output_016.dad
import sys,re,json,string
from collections import defaultdict
# http://stackoverflow.com/questions/3728655/python-titlecase-a-string-with-exceptions
# @(#)EVMLegacy.dad
def parse_dad_file(filename):
aFile = open( filename, 'r' )
lineIter= iter(aFile)
array=[]
for linews in lineIter:
line = linews.strip()
if not line: continue
if line.startswith( "/*" ):
for comws in lineIter:
com = comws.strip()
if com.startswith( "*/" ):
break
else:
buf = []
#name,junk = line.split('{')
buf.append( line )
for piimws in lineIter:
piim = piimws.strip()
if piim.startswith( "}" ):
break
else:
buf.append( piim )
#print buf
# process buf:
assert len(buf)==4
clean = [None] * 4
clean[0] = buf[0].split('{')[0].strip()
clean[1] = buf[1].strip()
clean[2] = buf[2].split( '=' )[1].strip()
clean[3] = buf[3].split( '=' )[1].strip()
array.append( clean )
return array
if __name__ == "__main__":
filename = sys.argv[1] # dict
filename2 = sys.argv[2]
res = None
with open(filename,'r') as f:
my = re.compile(r'^group = (.+) {\r\n reservation = (.+)\r\n recognition = "(.+)"\r\n}', re.MULTILINE)
#content = f.readlines()
content = f.read()
#print content
res = my.findall( content )
#print res
#print len(res)
#print res
#print len(res)
#for it in res:
# print 'group = %s {\n reservation = %s\n recognition = "%s"\n}' % it
md = defaultdict(list)
for it in res:
# md[1].append('a')
group, elem, name = it
assert elem[0:2] == '00'
key = "%s,%s" % (group, elem[2:4])
#print key
md[key].append( name )
#print md
#print len(md) # seems to be at least one duplicate !
"""
res2 = None
with open(filename2,'r') as f:
my = re.compile(r'^(.+) {\r\n (.+)\r\n dicomVR = (.+)\r\n dicomVM = (.+)\r\n}', re.MULTILINE)
content = f.read()
res2 = my.findall( content )
#print res2
"""
res2 = parse_dad_file(filename2)
#print len(res2)
#print res2[20]
array = []
for it in res2:
name, tag, vr, vm = it
key = tag[:-2]
#print key
creators = md[ key ]
#print name
assert name.startswith( 'DICOM_' ) or name.startswith( 'SPI_' ) or name.startswith( 'ICS_' ) or name.startswith( 'VOL_' ) or name.startswith( 'PIIM_' )
vnames = name.split('_')
vclean = [string.capwords(it) for it in vnames]
if name.startswith( 'DICOM_' ):
gr,ele = tag.split(',')
vgr = int( '0x%s' % gr, 16)
if vgr % 2 == 0:
assert not creators
continue
if not creators:
#print name
assert "RESERVATION_OF_GROUP" in name or "LENGTH_OF_GROUP" in name
continue
# private attribute
assert creators
clean = " ".join(vclean[1:])
elif name.startswith( 'SPI_' ) or name.startswith( 'ICS_' ):
if not creators:
#print tag, name
assert "RESERVATION_OF_GROUP" in name or "LENGTH_OF_GROUP" in name
continue
clean = " ".join(vclean[1:])
elif name.startswith('VOL_'):
assert creators
clean = " ".join(vclean)
elif name.startswith('PIIM_'):
print tag
assert creators
clean = " ".join(vclean[1:])
else:
assert False
el = {}
for creator in creators:
el[ 'owner' ] = creator
el[ 'name' ] = clean
el[ 'keyword' ] = name
el[ 'group' ] = tag[:4]
el[ 'element' ] = "xx%s" % tag[7:]
el[ 'vr' ] = vr
el[ 'vm' ] = vm
array.append( el )
#print array
#for it in array:
# #print it
# if it['group' ] == '2001' or it['group' ] == '2005':
# #print it
# print '(%(group)s,%(element)s)\t%(vr)s\t%(keyword)s\t%(vm)s' % it
print json.dumps(array, sort_keys=True, indent=4)
| #!/usr/bin/env python
""" parse """
# $ ./parse_dad.py re/pms/merge119_120.dad re/pms/output_016.dad
import sys,re,json,string
from collections import defaultdict
# http://stackoverflow.com/questions/3728655/python-titlecase-a-string-with-exceptions
# @(#)EVMLegacy.dad
def parse_dad_file(filename):
aFile = open( filename, 'r' )
lineIter= iter(aFile)
array=[]
for linews in lineIter:
line = linews.strip()
if not line: continue
if line.startswith( "/*" ):
for comws in lineIter:
com = comws.strip()
if com.startswith( "*/" ):
break
else:
buf = []
#name,junk = line.split('{')
buf.append( line )
for piimws in lineIter:
piim = piimws.strip()
if piim.startswith( "}" ):
break
else:
buf.append( piim )
#print buf
# process buf:
assert len(buf)==4
clean = [None] * 4
clean[0] = buf[0].split('{')[0].strip()
clean[1] = buf[1].strip()
clean[2] = buf[2].split( '=' )[1].strip()
clean[3] = buf[3].split( '=' )[1].strip()
array.append( clean )
return array
if __name__ == "__main__":
filename = sys.argv[1] # dict
filename2 = sys.argv[2]
res = None
with open(filename,'r') as f:
my = re.compile(r'^group = (.+) {\r\n reservation = (.+)\r\n recognition = "(.+)"\r\n}', re.MULTILINE)
#content = f.readlines()
content = f.read()
#print content
res = my.findall( content )
#print res
#print len(res)
#print res
#print len(res)
#for it in res:
# print 'group = %s {\n reservation = %s\n recognition = "%s"\n}' % it
md = defaultdict(list)
for it in res:
# md[1].append('a')
group, elem, name = it
assert elem[0:2] == '00'
key = "%s,%s" % (group, elem[2:4])
#print key
md[key].append( name )
#print md
#print len(md) # seems to be at least one duplicate !
"""
res2 = None
with open(filename2,'r') as f:
my = re.compile(r'^(.+) {\r\n (.+)\r\n dicomVR = (.+)\r\n dicomVM = (.+)\r\n}', re.MULTILINE)
content = f.read()
res2 = my.findall( content )
#print res2
"""
res2 = parse_dad_file(filename2)
#print len(res2)
#print res2[20]
array = []
for it in res2:
name, tag, vr, vm = it
key = tag[:-2]
#print key
creators = md[ key ]
#print name
assert name.startswith( 'DICOM_' ) or name.startswith( 'SPI_' ) or name.startswith( 'ICS_' ) or name.startswith( 'VOL_' ) or name.startswith( 'PIIM_' )
vnames = name.split('_')
vclean = [string.capwords(it) for it in vnames]
if name.startswith( 'DICOM_' ):
gr,ele = tag.split(',')
vgr = int( '0x%s' % gr, 16)
if vgr % 2 == 0:
assert not creators
continue
if not creators:
#print name
assert "RESERVATION_OF_GROUP" in name or "LENGTH_OF_GROUP" in name
continue
# private attribute
assert creators
clean = " ".join(vclean[1:])
elif name.startswith( 'SPI_' ) or name.startswith( 'ICS_' ):
if not creators:
#print tag, name
assert "RESERVATION_OF_GROUP" in name or "LENGTH_OF_GROUP" in name
continue
clean = " ".join(vclean[1:])
elif name.startswith('VOL_'):
assert creators
clean = " ".join(vclean)
elif name.startswith('PIIM_'):
print tag
assert creators
clean = " ".join(vclean[1:])
else:
assert False
el = {}
for creator in creators:
el[ 'owner' ] = creator
el[ 'name' ] = clean
el[ 'keyword' ] = name
el[ 'group' ] = tag[:4]
el[ 'element' ] = "xx%s" % tag[7:]
el[ 'vr' ] = vr
el[ 'vm' ] = vm
array.append( el )
#print array
#for it in array:
# #print it
# if it['group' ] == '2001' or it['group' ] == '2005':
# #print it
# print '(%(group)s,%(element)s)\t%(vr)s\t%(keyword)s\t%(vm)s' % it
print json.dumps(array, sort_keys=True, indent=4)
| en | 0.297396 | #!/usr/bin/env python parse # $ ./parse_dad.py re/pms/merge119_120.dad re/pms/output_016.dad # http://stackoverflow.com/questions/3728655/python-titlecase-a-string-with-exceptions # @(#)EVMLegacy.dad #name,junk = line.split('{') #print buf # process buf: # dict #content = f.readlines() #print content #print res #print len(res) #print res #print len(res) #for it in res: # print 'group = %s {\n reservation = %s\n recognition = "%s"\n}' % it # md[1].append('a') #print key #print md #print len(md) # seems to be at least one duplicate ! res2 = None with open(filename2,'r') as f: my = re.compile(r'^(.+) {\r\n (.+)\r\n dicomVR = (.+)\r\n dicomVM = (.+)\r\n}', re.MULTILINE) content = f.read() res2 = my.findall( content ) #print res2 #print len(res2) #print res2[20] #print key #print name #print name # private attribute #print tag, name #print array #for it in array: # #print it # if it['group' ] == '2001' or it['group' ] == '2005': # #print it # print '(%(group)s,%(element)s)\t%(vr)s\t%(keyword)s\t%(vm)s' % it | 2.926116 | 3 |
madness/route.py | Waffles32/madness | 0 | 6614689 |
from dataclasses import dataclass, field
from typing import Callable, List, Tuple
from more_itertools import collapse
from werkzeug.routing import Rule
from .context import bind
@dataclass
class Route():
"""
path does not begin with a leading slash
"""
path: str
endpoint: Callable
methods: List[str] = field(default_factory=list)
context: List = field(default_factory=list)
@property
def rule(self) -> Rule:
return Rule(
f'/{self.path}',
endpoint = bind(self.endpoint, self.context_decorators),
methods = self.methods or None
)
@property
def context_decorators(self) -> Tuple:
"""
"""
return tuple(collapse(self.context))
def __repr__(self):
context = ','.join([func.__qualname__ for func in self.context_decorators])
if context:
context = f'-> @({context})'
methods = ','.join(self.methods)
if methods:
methods = f'[{methods}]'
parts = ' '.join(
filter(
bool, (
self.__class__.__name__,
f'/{self.path}',
methods,
context,
f'-> {self.endpoint.__qualname__}',
)))
return f'<{parts}>'
|
from dataclasses import dataclass, field
from typing import Callable, List, Tuple
from more_itertools import collapse
from werkzeug.routing import Rule
from .context import bind
@dataclass
class Route():
"""
path does not begin with a leading slash
"""
path: str
endpoint: Callable
methods: List[str] = field(default_factory=list)
context: List = field(default_factory=list)
@property
def rule(self) -> Rule:
return Rule(
f'/{self.path}',
endpoint = bind(self.endpoint, self.context_decorators),
methods = self.methods or None
)
@property
def context_decorators(self) -> Tuple:
"""
"""
return tuple(collapse(self.context))
def __repr__(self):
context = ','.join([func.__qualname__ for func in self.context_decorators])
if context:
context = f'-> @({context})'
methods = ','.join(self.methods)
if methods:
methods = f'[{methods}]'
parts = ' '.join(
filter(
bool, (
self.__class__.__name__,
f'/{self.path}',
methods,
context,
f'-> {self.endpoint.__qualname__}',
)))
return f'<{parts}>'
| en | 0.956381 | path does not begin with a leading slash | 2.504367 | 3 |
core_script/__pycache__/menu.py | andhra21231/mathway-bot | 1 | 6614690 | import sys
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget, QMenu
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtCore import QSize, QTimer
import os
class Example(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setMinimumSize(QSize(320, 200))
self.setWindowTitle("MathBot - VHCID.TECH")
self.bt1 = QPushButton('Start Basic Calculation', self)
self.bt2 = QPushButton('Start Areas Calculation', self)
self.bt3 = QPushButton('Start Volume Calculation', self)
self.bt4 = QPushButton('Start Surface Calculation', self)
self.bt5 = QPushButton('Start Hypotenuse', self)
self.bt6 = QPushButton('Turn Off', self)
self.bt1.move(50, 50)
self.bt2.move(50, 100)
self.bt3.move(170, 100)
self.bt4.move(170, 50)
self.bt5.move(50, 150)
self.bt6.move(170, 150)
self.bt1.clicked.connect(self.Button1)
self.count = 10
self.bt2.clicked.connect(self.Button2)
self.count = 10
self.bt3.clicked.connect(self.Button3)
self.count = 10
self.bt4.clicked.connect(self.Button4)
self.count = 10
self.bt5.clicked.connect(self.Button5)
self.count = 10
self.bt6.clicked.connect(self.Button6)
self.count = 10
def Button1(self):
os.system('python basic.py')
def Button2(self):
os.system('python areas.py')
def Button3(self):
os.system('python volume.py')
def Button4(self):
os.system('python surface-area.py')
def Button5(self):
os.system('python hypotenus.py')
def Button6(self):
exit()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mainWin = Example()
mainWin.show()
sys.exit(app.exec_())
| import sys
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget, QMenu
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtCore import QSize, QTimer
import os
class Example(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setMinimumSize(QSize(320, 200))
self.setWindowTitle("MathBot - VHCID.TECH")
self.bt1 = QPushButton('Start Basic Calculation', self)
self.bt2 = QPushButton('Start Areas Calculation', self)
self.bt3 = QPushButton('Start Volume Calculation', self)
self.bt4 = QPushButton('Start Surface Calculation', self)
self.bt5 = QPushButton('Start Hypotenuse', self)
self.bt6 = QPushButton('Turn Off', self)
self.bt1.move(50, 50)
self.bt2.move(50, 100)
self.bt3.move(170, 100)
self.bt4.move(170, 50)
self.bt5.move(50, 150)
self.bt6.move(170, 150)
self.bt1.clicked.connect(self.Button1)
self.count = 10
self.bt2.clicked.connect(self.Button2)
self.count = 10
self.bt3.clicked.connect(self.Button3)
self.count = 10
self.bt4.clicked.connect(self.Button4)
self.count = 10
self.bt5.clicked.connect(self.Button5)
self.count = 10
self.bt6.clicked.connect(self.Button6)
self.count = 10
def Button1(self):
os.system('python basic.py')
def Button2(self):
os.system('python areas.py')
def Button3(self):
os.system('python volume.py')
def Button4(self):
os.system('python surface-area.py')
def Button5(self):
os.system('python hypotenus.py')
def Button6(self):
exit()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mainWin = Example()
mainWin.show()
sys.exit(app.exec_())
| none | 1 | 2.859818 | 3 | |
hubs/models.py | moileretour/joatu | 1 | 6614691 | from django.db import models
from django.db.models.signals import post_save
from hubs.extras.coordinates import coordinates_calculation
class Hub(models.Model):
hub_name = models.CharField(max_length=20, blank=False, null=False)
number = models.CharField(max_length=10, blank=True)
street = models.CharField(max_length=200, blank=False)
postal_code = models.CharField(max_length=10, blank=False)
city = models.CharField(max_length=50, blank=False)
state = models.CharField(max_length=50)
country = models.CharField(max_length=50, blank=False)
## description of the user
description = models.CharField(max_length=1000)
## site web
website = models.URLField(blank=True)
## Email
email = models.EmailField(blank=True)
def __str__(self):
display = self.hub_name + ' - ' + self.city
return display
class HubGeolocation(models.Model):
hub = models.OneToOneField(Hub, on_delete=models.CASCADE)
# Lat = latitude of the user
lat = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True)
# Lng = longinitude of the user
lng = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True)
def Hub_created_or_updated(sender,update_fields, **kwargs):
instance = kwargs['instance']
if kwargs['created']:
lat_cal, lng_cal = coordinates_calculation(instance.number, instance.street, instance.postal_code, instance.city, instance.country)
HubGeolocation.objects.create(hub = instance, lat=lat_cal, lng= lng_cal)
else:
#if 'postal_code' in update_fields or 'city' in update_fields or 'country' in update_fields:
lat_cal, lng_cal = coordinates_calculation(instance.number, instance.street, instance.postal_code, instance.city, instance.country)
a = HubGeolocation.objects.filter(hub= instance)
if a.exists():
a.update(lat=lat_cal, lng= lng_cal)
else:
ProfileGeolocation.objects.create(hub= instance, lat=lat_cal, lng= lng_cal)
post_save.connect(Hub_created_or_updated, sender=Hub)
class HubDiscussion(models.Model):
hub=models.ForeignKey(Hub, on_delete=models.CASCADE)
text = models.CharField(max_length=1500, null=False, blank=False)
profile = models.ForeignKey('profiles.Profile', on_delete=models.SET_NULL, null=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True) | from django.db import models
from django.db.models.signals import post_save
from hubs.extras.coordinates import coordinates_calculation
class Hub(models.Model):
hub_name = models.CharField(max_length=20, blank=False, null=False)
number = models.CharField(max_length=10, blank=True)
street = models.CharField(max_length=200, blank=False)
postal_code = models.CharField(max_length=10, blank=False)
city = models.CharField(max_length=50, blank=False)
state = models.CharField(max_length=50)
country = models.CharField(max_length=50, blank=False)
## description of the user
description = models.CharField(max_length=1000)
## site web
website = models.URLField(blank=True)
## Email
email = models.EmailField(blank=True)
def __str__(self):
display = self.hub_name + ' - ' + self.city
return display
class HubGeolocation(models.Model):
hub = models.OneToOneField(Hub, on_delete=models.CASCADE)
# Lat = latitude of the user
lat = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True)
# Lng = longinitude of the user
lng = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True)
def Hub_created_or_updated(sender,update_fields, **kwargs):
instance = kwargs['instance']
if kwargs['created']:
lat_cal, lng_cal = coordinates_calculation(instance.number, instance.street, instance.postal_code, instance.city, instance.country)
HubGeolocation.objects.create(hub = instance, lat=lat_cal, lng= lng_cal)
else:
#if 'postal_code' in update_fields or 'city' in update_fields or 'country' in update_fields:
lat_cal, lng_cal = coordinates_calculation(instance.number, instance.street, instance.postal_code, instance.city, instance.country)
a = HubGeolocation.objects.filter(hub= instance)
if a.exists():
a.update(lat=lat_cal, lng= lng_cal)
else:
ProfileGeolocation.objects.create(hub= instance, lat=lat_cal, lng= lng_cal)
post_save.connect(Hub_created_or_updated, sender=Hub)
class HubDiscussion(models.Model):
hub=models.ForeignKey(Hub, on_delete=models.CASCADE)
text = models.CharField(max_length=1500, null=False, blank=False)
profile = models.ForeignKey('profiles.Profile', on_delete=models.SET_NULL, null=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True) | en | 0.477999 | ## description of the user ## site web ## Email # Lat = latitude of the user # Lng = longinitude of the user #if 'postal_code' in update_fields or 'city' in update_fields or 'country' in update_fields: | 2.334657 | 2 |
tests/test_dipdup/types/tezotop/storage.py | dipdup-net/dipdup-py | 39 | 6614692 | <filename>tests/test_dipdup/types/tezotop/storage.py
# generated by datamodel-codegen:
# filename: storage.json
from __future__ import annotations
from typing import Dict
from typing import List
from typing import Optional
from pydantic import BaseModel
from pydantic import Extra
class ResourceMap(BaseModel):
class Config:
extra = Extra.forbid
id: str
rate: str
class ResourceCollectorStorage(BaseModel):
class Config:
extra = Extra.forbid
administrator: str
current_user: Optional[str]
default_start_time: str
generation_rate: str
managers: List[str]
metadata: Dict[str, str]
nft_registry: str
paused: bool
resource_map: Dict[str, ResourceMap]
resource_registry: str
tezotop_collection: Dict[str, str]
| <filename>tests/test_dipdup/types/tezotop/storage.py
# generated by datamodel-codegen:
# filename: storage.json
from __future__ import annotations
from typing import Dict
from typing import List
from typing import Optional
from pydantic import BaseModel
from pydantic import Extra
class ResourceMap(BaseModel):
class Config:
extra = Extra.forbid
id: str
rate: str
class ResourceCollectorStorage(BaseModel):
class Config:
extra = Extra.forbid
administrator: str
current_user: Optional[str]
default_start_time: str
generation_rate: str
managers: List[str]
metadata: Dict[str, str]
nft_registry: str
paused: bool
resource_map: Dict[str, ResourceMap]
resource_registry: str
tezotop_collection: Dict[str, str]
| en | 0.580911 | # generated by datamodel-codegen: # filename: storage.json | 1.998134 | 2 |
build/lib/MapReduceWIW/shuffler.py | BhairavValera/WIW_Coding_Challenge | 0 | 6614693 | def shuffle(user_map):
'''
Sorts the outputs from user_map first by user_id and then
sorts each inidividual path string dictionary
Args:
user_map: map of user_ids to their respective paths
'''
sorted_user_map = dict(sorted(user_map.items(), key=lambda item: item[0])) #sort by user_ids
for user_id, pathMap in sorted_user_map.items():
sorted_pathMap = dict(sorted(pathMap.items(), key=lambda item: item[0])) #sort each path map alphabetically
sorted_user_map[user_id] = sorted_pathMap
return sorted_user_map | def shuffle(user_map):
'''
Sorts the outputs from user_map first by user_id and then
sorts each inidividual path string dictionary
Args:
user_map: map of user_ids to their respective paths
'''
sorted_user_map = dict(sorted(user_map.items(), key=lambda item: item[0])) #sort by user_ids
for user_id, pathMap in sorted_user_map.items():
sorted_pathMap = dict(sorted(pathMap.items(), key=lambda item: item[0])) #sort each path map alphabetically
sorted_user_map[user_id] = sorted_pathMap
return sorted_user_map | en | 0.662336 | Sorts the outputs from user_map first by user_id and then sorts each inidividual path string dictionary Args: user_map: map of user_ids to their respective paths #sort by user_ids #sort each path map alphabetically | 3.562479 | 4 |
python_tutorial/CircleArea.py | MiracleWong/PythonBasic | 0 | 6614694 | #!/usr/local/bin/python
import math
# radius
r = 2
area = r**2*math.pi
print area
print("{:2.10f}".format(area)) | #!/usr/local/bin/python
import math
# radius
r = 2
area = r**2*math.pi
print area
print("{:2.10f}".format(area)) | en | 0.4571 | #!/usr/local/bin/python # radius | 3.90326 | 4 |
scripts/bonus.py | lamproot/telegramh5 | 1 | 6614695 | #encoding:utf-8
import mysql
import datetime
import sys
import urllib, urllib2, json
import datetime
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
conn = mysql.db()
def isdate():
content = 1
now = datetime.datetime.now().strftime('%Y%m%d')
url = 'http://apis.baidu.com/xiaogg/holiday/holiday?d=%s' % (now)
req = urllib2.Request(url)
req.add_header("apikey", "9c1081f2f42cce41ad92dad6d8552902")
resp = urllib2.urlopen(req)
content = resp.read()
if(content):
return int(content)
return content
def rate():
rate_sql = """
select category, value from zx_bonus_rule where category in ('rongzidun', 'jiangjinbi', 'lovemoney', 'platmoney', 'taxmoney')
"""
rates = conn.query(rate_sql)
if rates:
rates = rates
else:
rates = (
{'category': 'rongzidun', 'value': 25},
{'category': 'jiangjinbi', 'value': 55},
{'category': 'lovemoney', 'value': 1},
{'category': 'platmoney', 'value': 2},
{'category': 'taxmoney', 'value': 17}
)
return rates
# 最大分红
def maxcash(userrank):
value = 0
sql = """
select value from zx_bonus_rule where category = 'maxcash' and `key` = %s
""" % (userrank)
result = conn.query(sql)
if result:
value = result[0]['value']
return value
# 通过级别查看对应的金额
def cash(userrank):
# 会员
sql = """
select value from zx_bonus_rule where category = 'userrank' and `key` = %s
""" % (userrank)
result = conn.query(sql)
if result:
value = result[0]['value']
return value
return result
# 分红
def fenhong():
now = datetime.datetime.now()
now_second = datetime.datetime.now().strftime('%s')
yes_second = (now + datetime.timedelta(days=-1)).strftime('%s')
# 比率配比
rates = rate()
sql = "select value from zx_bonus_rule where category = 'UserCash'"
result = conn.query(sql)
if result:
fenghong_scale = result[0]['value'] / 100
else:
fenghong_scale = 1.1 / 100
# 会员
member_sql = """
select m.uid, m.usernumber, m.realname, m.userrank, m.jiangjinbi, m.rongzidun, m.max_bonus, m.upgrade_level, m.upgrade_status, m.packages, r.value from zx_member as m left join zx_bonus_rule as r
on m.userrank = r.key
where m.userrank != 1 and m.status = 1 and m.proxy_state = 1 and r.category = 'userrank' and m.uid != 1
"""
members = conn.query(member_sql)
if members:
for member in members:
uid = member['uid']
usernumber = member['usernumber']
realname = member['realname']
userrank = int(member['userrank'])
value = int(member['value'])
max_bonus = float(member['max_bonus'])
upgrade_status = int(member['upgrade_status'])
upgrade_level = int(member['upgrade_level'])
packages = int(member['packages'])
max_cash = 0
fenhong = fenghong_scale * value
# 普通套餐
if packages == 1:
# 升级的分红模式
if upgrade_status == 1:
current_cash = cash(userrank)
ago_cash = cash(upgrade_level)
# 升级差值的最大分红奖金
max_cash = maxcash(userrank) * (current_cash - ago_cash) + maxcash(upgrade_level) * ago_cash
elif upgrade_status == 0:
# 最大分红的奖金
max_cash = maxcash(userrank) * value
# 金卡、钻卡等额价值套餐
elif packages == 2:
max_cash = maxcash(userrank) * value - value
if max_bonus < max_cash:
if fenhong + max_bonus > max_cash:
fenhong = max_cash - max_bonus
sql = """
update zx_member set proxy_state = 0 where uid = %s
""" % (uid)
conn.dml(sql, 'update')
else:
fenhong = fenhong
jiangjinbi_award, rongzidun_award, lovemoney_award, platmoney_award, taxmoney_award = 0, 0, 0, 0, 0
for r in rates:
if r['category'] == 'jiangjinbi':
jiangjinbi_rate = r['value'] / 100
jiangjinbi_award = fenhong * jiangjinbi_rate
elif r['category'] == 'rongzidun':
rongzidun_rate = r['value'] / 100
rongzidun_award = fenhong * rongzidun_rate
elif r['category'] == 'lovemoney':
lovemoney_rate = r['value'] / 100
lovemoney_award = fenhong * lovemoney_rate
elif r['category'] == 'platmoney':
platmoney_rate = r['value'] / 100
platmoney_award = fenhong * platmoney_rate
elif r['category'] == 'taxmoney':
taxmoney_rate = r['value'] / 100
taxmoney_award = fenhong * taxmoney_rate
# real_total 实发奖金
real_total = fenhong - lovemoney_award - platmoney_award - taxmoney_award
# 销费商虚拟币增加
zx_member_sql = """
update zx_member set jiangjinbi = jiangjinbi + %s, rongzidun = rongzidun + %s where usernumber = %s
""" % (jiangjinbi_award, rongzidun_award, usernumber)
zx_member = conn.dml(zx_member_sql, 'update')
if zx_member:
max_bonus_sql = """
update zx_member set max_bonus = max_bonus + %s where uid = %s
""" % (fenhong, uid)
conn.dml(max_bonus_sql, 'update')
# 分红奖金支出
zx_finance_sql = """
update zx_finance set expend = expend + %s, createtime = %s
""" % (fenhong, now_second)
conn.dml(zx_finance_sql, 'update')
# 明细
zx_bonus_detail_sql = """
insert into zx_bonus_detail (touserid, tousernumber, torealname, moneytype, jiangjinbi, rongzidun, lovemoney, platmoney, taxmoney, total, real_total, createdate)
values (%s, %s, '%s', %s, %s, %s, %s, %s, %s, %s, %s, %s)
""" % (uid, usernumber, realname, 1, jiangjinbi_award, rongzidun_award, lovemoney_award, platmoney_award, taxmoney_award, fenhong, real_total, yes_second)
# 插入明细表
conn.dml(zx_bonus_detail_sql, 'insert')
# 奖金币流水
jiangjinbi_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (1, 1, uid, usernumber, realname, 1, 1, '戎子', 3, 1, jiangjinbi_award, now_second)
conn.dml(jiangjinbi_change_sql, 'insert')
jiangjinbi_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (1, 1, 1, 1, '戎子', uid, usernumber, realname, 3, 0, jiangjinbi_award, now_second)
conn.dml(jiangjinbi_change_sql_1, 'insert')
# 戎子盾流水
rongzidun_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (3, 3, uid, usernumber, realname, 1, 1, '戎子', 3, 1, rongzidun_award, now_second)
conn.dml(rongzidun_change_sql, 'insert')
# 戎子盾流水
rongzidun_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (3, 3, 1, 1, '戎子', uid, usernumber, realname, 3, 0, rongzidun_award, now_second)
conn.dml(rongzidun_change_sql_1, 'insert')
# 爱心基金流水
lovemoney_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (6, 6, uid, usernumber, realname, 1, 1, '戎子', 3, 0, lovemoney_award, now_second)
conn.dml(lovemoney_change_sql, 'insert')
lovemoney_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (6, 6, 1, 1, '戎子', uid, usernumber, realname, 3, 1, lovemoney_award, now_second)
conn.dml(lovemoney_change_sql_1, 'insert')
# 平台管理费流水
platmoney_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (7, 7, uid, usernumber, realname, 1, 1, '戎子', 3, 0, platmoney_award, now_second)
conn.dml(platmoney_change_sql, 'insert')
platmoney_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (7, 7, 1, 1, '戎子', uid, usernumber, realname, 3, 1, platmoney_award, now_second)
conn.dml(platmoney_change_sql_1, 'insert')
# 税费流水
taxmoney_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (8, 8, uid, usernumber, realname, 1, 1, '戎子', 3, 0, taxmoney_award, now_second)
conn.dml(taxmoney_change_sql, 'insert')
taxmoney_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (8, 8, 1, 1, '戎子', uid, usernumber, realname, 3, 1, taxmoney_award, now_second)
conn.dml(taxmoney_change_sql_1, 'insert')
conn.close()
print "ok"
def main():
status = isdate()
if status == 0:
fenhong()
if __name__ == '__main__':
main() | #encoding:utf-8
import mysql
import datetime
import sys
import urllib, urllib2, json
import datetime
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
conn = mysql.db()
def isdate():
content = 1
now = datetime.datetime.now().strftime('%Y%m%d')
url = 'http://apis.baidu.com/xiaogg/holiday/holiday?d=%s' % (now)
req = urllib2.Request(url)
req.add_header("apikey", "9c1081f2f42cce41ad92dad6d8552902")
resp = urllib2.urlopen(req)
content = resp.read()
if(content):
return int(content)
return content
def rate():
rate_sql = """
select category, value from zx_bonus_rule where category in ('rongzidun', 'jiangjinbi', 'lovemoney', 'platmoney', 'taxmoney')
"""
rates = conn.query(rate_sql)
if rates:
rates = rates
else:
rates = (
{'category': 'rongzidun', 'value': 25},
{'category': 'jiangjinbi', 'value': 55},
{'category': 'lovemoney', 'value': 1},
{'category': 'platmoney', 'value': 2},
{'category': 'taxmoney', 'value': 17}
)
return rates
# 最大分红
def maxcash(userrank):
value = 0
sql = """
select value from zx_bonus_rule where category = 'maxcash' and `key` = %s
""" % (userrank)
result = conn.query(sql)
if result:
value = result[0]['value']
return value
# 通过级别查看对应的金额
def cash(userrank):
# 会员
sql = """
select value from zx_bonus_rule where category = 'userrank' and `key` = %s
""" % (userrank)
result = conn.query(sql)
if result:
value = result[0]['value']
return value
return result
# 分红
def fenhong():
now = datetime.datetime.now()
now_second = datetime.datetime.now().strftime('%s')
yes_second = (now + datetime.timedelta(days=-1)).strftime('%s')
# 比率配比
rates = rate()
sql = "select value from zx_bonus_rule where category = 'UserCash'"
result = conn.query(sql)
if result:
fenghong_scale = result[0]['value'] / 100
else:
fenghong_scale = 1.1 / 100
# 会员
member_sql = """
select m.uid, m.usernumber, m.realname, m.userrank, m.jiangjinbi, m.rongzidun, m.max_bonus, m.upgrade_level, m.upgrade_status, m.packages, r.value from zx_member as m left join zx_bonus_rule as r
on m.userrank = r.key
where m.userrank != 1 and m.status = 1 and m.proxy_state = 1 and r.category = 'userrank' and m.uid != 1
"""
members = conn.query(member_sql)
if members:
for member in members:
uid = member['uid']
usernumber = member['usernumber']
realname = member['realname']
userrank = int(member['userrank'])
value = int(member['value'])
max_bonus = float(member['max_bonus'])
upgrade_status = int(member['upgrade_status'])
upgrade_level = int(member['upgrade_level'])
packages = int(member['packages'])
max_cash = 0
fenhong = fenghong_scale * value
# 普通套餐
if packages == 1:
# 升级的分红模式
if upgrade_status == 1:
current_cash = cash(userrank)
ago_cash = cash(upgrade_level)
# 升级差值的最大分红奖金
max_cash = maxcash(userrank) * (current_cash - ago_cash) + maxcash(upgrade_level) * ago_cash
elif upgrade_status == 0:
# 最大分红的奖金
max_cash = maxcash(userrank) * value
# 金卡、钻卡等额价值套餐
elif packages == 2:
max_cash = maxcash(userrank) * value - value
if max_bonus < max_cash:
if fenhong + max_bonus > max_cash:
fenhong = max_cash - max_bonus
sql = """
update zx_member set proxy_state = 0 where uid = %s
""" % (uid)
conn.dml(sql, 'update')
else:
fenhong = fenhong
jiangjinbi_award, rongzidun_award, lovemoney_award, platmoney_award, taxmoney_award = 0, 0, 0, 0, 0
for r in rates:
if r['category'] == 'jiangjinbi':
jiangjinbi_rate = r['value'] / 100
jiangjinbi_award = fenhong * jiangjinbi_rate
elif r['category'] == 'rongzidun':
rongzidun_rate = r['value'] / 100
rongzidun_award = fenhong * rongzidun_rate
elif r['category'] == 'lovemoney':
lovemoney_rate = r['value'] / 100
lovemoney_award = fenhong * lovemoney_rate
elif r['category'] == 'platmoney':
platmoney_rate = r['value'] / 100
platmoney_award = fenhong * platmoney_rate
elif r['category'] == 'taxmoney':
taxmoney_rate = r['value'] / 100
taxmoney_award = fenhong * taxmoney_rate
# real_total 实发奖金
real_total = fenhong - lovemoney_award - platmoney_award - taxmoney_award
# 销费商虚拟币增加
zx_member_sql = """
update zx_member set jiangjinbi = jiangjinbi + %s, rongzidun = rongzidun + %s where usernumber = %s
""" % (jiangjinbi_award, rongzidun_award, usernumber)
zx_member = conn.dml(zx_member_sql, 'update')
if zx_member:
max_bonus_sql = """
update zx_member set max_bonus = max_bonus + %s where uid = %s
""" % (fenhong, uid)
conn.dml(max_bonus_sql, 'update')
# 分红奖金支出
zx_finance_sql = """
update zx_finance set expend = expend + %s, createtime = %s
""" % (fenhong, now_second)
conn.dml(zx_finance_sql, 'update')
# 明细
zx_bonus_detail_sql = """
insert into zx_bonus_detail (touserid, tousernumber, torealname, moneytype, jiangjinbi, rongzidun, lovemoney, platmoney, taxmoney, total, real_total, createdate)
values (%s, %s, '%s', %s, %s, %s, %s, %s, %s, %s, %s, %s)
""" % (uid, usernumber, realname, 1, jiangjinbi_award, rongzidun_award, lovemoney_award, platmoney_award, taxmoney_award, fenhong, real_total, yes_second)
# 插入明细表
conn.dml(zx_bonus_detail_sql, 'insert')
# 奖金币流水
jiangjinbi_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (1, 1, uid, usernumber, realname, 1, 1, '戎子', 3, 1, jiangjinbi_award, now_second)
conn.dml(jiangjinbi_change_sql, 'insert')
jiangjinbi_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (1, 1, 1, 1, '戎子', uid, usernumber, realname, 3, 0, jiangjinbi_award, now_second)
conn.dml(jiangjinbi_change_sql_1, 'insert')
# 戎子盾流水
rongzidun_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (3, 3, uid, usernumber, realname, 1, 1, '戎子', 3, 1, rongzidun_award, now_second)
conn.dml(rongzidun_change_sql, 'insert')
# 戎子盾流水
rongzidun_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (3, 3, 1, 1, '戎子', uid, usernumber, realname, 3, 0, rongzidun_award, now_second)
conn.dml(rongzidun_change_sql_1, 'insert')
# 爱心基金流水
lovemoney_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (6, 6, uid, usernumber, realname, 1, 1, '戎子', 3, 0, lovemoney_award, now_second)
conn.dml(lovemoney_change_sql, 'insert')
lovemoney_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (6, 6, 1, 1, '戎子', uid, usernumber, realname, 3, 1, lovemoney_award, now_second)
conn.dml(lovemoney_change_sql_1, 'insert')
# 平台管理费流水
platmoney_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (7, 7, uid, usernumber, realname, 1, 1, '戎子', 3, 0, platmoney_award, now_second)
conn.dml(platmoney_change_sql, 'insert')
platmoney_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (7, 7, 1, 1, '戎子', uid, usernumber, realname, 3, 1, platmoney_award, now_second)
conn.dml(platmoney_change_sql_1, 'insert')
# 税费流水
taxmoney_change_sql = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (8, 8, uid, usernumber, realname, 1, 1, '戎子', 3, 0, taxmoney_award, now_second)
conn.dml(taxmoney_change_sql, 'insert')
taxmoney_change_sql_1 = """
insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime)
values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s)
""" % (8, 8, 1, 1, '戎子', uid, usernumber, realname, 3, 1, taxmoney_award, now_second)
conn.dml(taxmoney_change_sql_1, 'insert')
conn.close()
print "ok"
def main():
status = isdate()
if status == 0:
fenhong()
if __name__ == '__main__':
main() | en | 0.223586 | #encoding:utf-8 select category, value from zx_bonus_rule where category in ('rongzidun', 'jiangjinbi', 'lovemoney', 'platmoney', 'taxmoney') # 最大分红 select value from zx_bonus_rule where category = 'maxcash' and `key` = %s # 通过级别查看对应的金额 # 会员 select value from zx_bonus_rule where category = 'userrank' and `key` = %s # 分红 # 比率配比 # 会员 select m.uid, m.usernumber, m.realname, m.userrank, m.jiangjinbi, m.rongzidun, m.max_bonus, m.upgrade_level, m.upgrade_status, m.packages, r.value from zx_member as m left join zx_bonus_rule as r on m.userrank = r.key where m.userrank != 1 and m.status = 1 and m.proxy_state = 1 and r.category = 'userrank' and m.uid != 1 # 普通套餐 # 升级的分红模式 # 升级差值的最大分红奖金 # 最大分红的奖金 # 金卡、钻卡等额价值套餐 update zx_member set proxy_state = 0 where uid = %s # real_total 实发奖金 # 销费商虚拟币增加 update zx_member set jiangjinbi = jiangjinbi + %s, rongzidun = rongzidun + %s where usernumber = %s update zx_member set max_bonus = max_bonus + %s where uid = %s # 分红奖金支出 update zx_finance set expend = expend + %s, createtime = %s # 明细 insert into zx_bonus_detail (touserid, tousernumber, torealname, moneytype, jiangjinbi, rongzidun, lovemoney, platmoney, taxmoney, total, real_total, createdate) values (%s, %s, '%s', %s, %s, %s, %s, %s, %s, %s, %s, %s) # 插入明细表 # 奖金币流水 insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime) values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s) insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime) values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s) # 戎子盾流水 insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime) values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s) # 戎子盾流水 insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime) values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s) # 爱心基金流水 insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime) values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s) insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime) values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s) # 平台管理费流水 insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime) values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s) insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime) values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s) # 税费流水 insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime) values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s) insert into zx_money_change (moneytype, status, targetuserid, targetusernumber, targetrealname, userid, usernumber, realname, changetype, recordtype, money, createtime) values (%s, %s, %s, %s, '%s', %s, %s, '%s', %s, %s, %s, %s) | 2.719642 | 3 |
tests/parsers/test_parser_create_project.py | tableau/tabcmd | 3 | 6614696 | <gh_stars>1-10
import unittest
from tabcmd.commands.project.create_project_command import CreateProjectCommand
from .common_setup import *
commandname = "createproject"
class CreateProjectParserTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser_under_test = initialize_test_pieces(commandname, CreateProjectCommand)
def test_create_project_parser_optional_arguments(self):
mock_args = [
commandname,
"--name",
"testproject",
"--parent-project-path",
"abcdef",
"--description",
"desc",
]
args = self.parser_under_test.parse_args(mock_args)
assert args.project_name == "testproject"
assert args.parent_project_path == "abcdef"
def test_create_project_parser_required_arguments_name(self):
mock_args = [
commandname,
"-n",
"project-name",
"--parent-project-path",
"abcdef",
"--description",
"desc",
]
args = self.parser_under_test.parse_args(mock_args)
assert args.project_name == "project-name"
assert args.parent_project_path == "abcdef"
def test_create_project_parser_required_arguments_missing_name(self):
mock_args = [
commandname,
"--parent-project-path",
"abcdef",
"--description",
"desc",
]
with self.assertRaises(SystemExit):
self.parser_under_test.parse_args(mock_args)
def test_create_project_parser_optional_arguments_missing_project_path(self):
mock_args = [
commandname,
"-n",
"project-name",
"--parent-project-path",
"--description",
"desc",
]
with self.assertRaises(SystemExit):
args = self.parser_under_test.parse_args(mock_args)
| import unittest
from tabcmd.commands.project.create_project_command import CreateProjectCommand
from .common_setup import *
commandname = "createproject"
class CreateProjectParserTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser_under_test = initialize_test_pieces(commandname, CreateProjectCommand)
def test_create_project_parser_optional_arguments(self):
mock_args = [
commandname,
"--name",
"testproject",
"--parent-project-path",
"abcdef",
"--description",
"desc",
]
args = self.parser_under_test.parse_args(mock_args)
assert args.project_name == "testproject"
assert args.parent_project_path == "abcdef"
def test_create_project_parser_required_arguments_name(self):
mock_args = [
commandname,
"-n",
"project-name",
"--parent-project-path",
"abcdef",
"--description",
"desc",
]
args = self.parser_under_test.parse_args(mock_args)
assert args.project_name == "project-name"
assert args.parent_project_path == "abcdef"
def test_create_project_parser_required_arguments_missing_name(self):
mock_args = [
commandname,
"--parent-project-path",
"abcdef",
"--description",
"desc",
]
with self.assertRaises(SystemExit):
self.parser_under_test.parse_args(mock_args)
def test_create_project_parser_optional_arguments_missing_project_path(self):
mock_args = [
commandname,
"-n",
"project-name",
"--parent-project-path",
"--description",
"desc",
]
with self.assertRaises(SystemExit):
args = self.parser_under_test.parse_args(mock_args) | none | 1 | 3.116542 | 3 | |
blowtorch/run.py | alebeck/blowtorch | 3 | 6614697 | from datetime import datetime
from typing import Optional, List, Union
from pathlib import Path
import random
import functools
import warnings
from contextlib import nullcontext
import numpy as np
import torch
from torch.utils.data import DataLoader
from coolname import generate_slug, replace_random
from . import _writer as writer
from .backends.cpu_backend import CPUBackend
from .backends.gpu_backend import GPUBackend
from .bound_functions import call
from .utils import get_highest_run, std_round, seed_all, set_deterministic
from .config import TrainingConfig
from .bound_functions import BoundFunctions
from .loggers import BaseLogger, LoggerSet, StandardLogger
class Run:
"""
Represents an individual training run.
"""
def __init__(self, config_files: Optional[List] = None, random_seed: int = None):
self._bound_functions = BoundFunctions()
self._config = None
self._backend = None
self._logger = None
self.train_loader = None
self.val_loader = None
self._loggers = None
self._max_epochs = None
self._use_gpu = None
self._resume_checkpoint = None
self._save_path = None
self.checkpoints_path = None
self._run_name = None
self._optimize_metric = None
self._checkpoint_metric = None
self._checkpoint_every = None
self._smaller_is_better = None # TODO state which to minimize/checkpoint on in result dict
self._optimize_first = None
self._enable_amp = None
self._detect_anomalies = None
self._is_validate = None
self._start_epoch = 0
self._is_main_node = None
self._config = TrainingConfig([] if config_files is None else config_files)
self.random_seed = random_seed
if random_seed:
seed_all(random_seed)
# TODO types, docstrings
# TODO pin_memory
# todo clear cache before start
# todo hooks
# todo cleanup code (extra files for optim, devices etc.)
# todo save on ctrl-C
# todo look at pl GPUbackend (amp optimizuation etc)
def run(self,
model: torch.nn.Module,
train_loader: DataLoader,
val_loader: DataLoader,
*,
loggers: Optional[List[BaseLogger]] = None,
max_epochs=1,
use_gpu=True,
num_nodes=1,
num_gpus_per_node=1,
node_rank=0,
ddp_backend='nccl',
ddp_init_method='env://',
ddp_find_unused_parameters=False,
resume_checkpoint: Optional[Union[str, Path]] = None,
save_path='train_logs',
run_name=None,
optimize_metric=None,
checkpoint_metric=None,
checkpoint_every=1,
smaller_is_better=True,
optimize_first=False,
enable_amp=False,
detect_anomalies=False
):
"""
Starts the training run.
Args:
model:
train_loader:
val_loader:
loggers: list of loggers that subscribe to various logging events
max_epochs:
use_gpu:
num_nodes:
num_gpus_per_node:
node_rank: when num_nodes > 1, this specifies the ordinal number of the current node within all nodes
ddp_backend:
ddp_init_method:
ddp_find_unused_parameters:
resume_checkpoint: path to checkpoint to resume training from
save_path: path to directory that blowtorch will save logs and checkpoints to
run_name: name associated with this run, will be randomly created if None
optimize_metric: train metric that will be used for optimization, will pick the first returned one if None
checkpoint_metric: validation metric that will be used for checkpointing, will pick the first returned one
if None
checkpoint_every: every checkpoint_every epochs a checkpoint is saved, disregarding performance of the
current model. This way it's always possible to resume the run from the latest (or near-latest) state
smaller_is_better: ``True`` if we want to minimize, ``False`` if maximize
optimize_first: whether optimization should occur during the first epoch
enable_amp:
detect_anomalies: enable autograd anomaly detection
"""
self.train_loader = train_loader
self.val_loader = val_loader
self._loggers = loggers
self._max_epochs = max_epochs
self._use_gpu = use_gpu
self._resume_checkpoint = resume_checkpoint
self._run_name = run_name
self._save_path = save_path
self._optimize_metric = optimize_metric
self._checkpoint_metric = checkpoint_metric
self._checkpoint_every = checkpoint_every
self._smaller_is_better = smaller_is_better
self._optimize_first = optimize_first
self._enable_amp = enable_amp
self._detect_anomalies = detect_anomalies
self._save_path = Path(self._save_path)
self._save_path.mkdir(parents=True, exist_ok=True)
self._is_main_node = num_nodes == 1 or node_rank == 0
# assign new random.Random() instance to coolname, such that slugs are different even though we have seeded
replace_random(random.Random())
if self._resume_checkpoint:
if self._run_name is not None:
raise ValueError('A run name cannot be specified when resuming from a previous run.')
self._resume_checkpoint = Path(self._resume_checkpoint)
if not self._resume_checkpoint.is_dir() or not (self._resume_checkpoint / 'checkpoints').exists():
raise ValueError('Path to resume from should be the parent directory of the "checkpoints" folder.')
self._run_name = self._resume_checkpoint.stem.split('_')[-1]
self._save_path = self._resume_checkpoint
else:
if self._run_name is None:
self._run_name = generate_slug(2)
assert '_' not in self._run_name
elif '_' in self._run_name:
raise ValueError('Run name cannot contain "_".')
# append consecutive number to run name
self._run_name += f'-{get_highest_run(self._save_path) + 1}'
self._save_path = self._save_path / (datetime.now().strftime("%y-%m-%d_%H-%M-%S") + '_' + self._run_name)
self._save_path.mkdir(parents=True, exist_ok=False)
self.checkpoints_path = self._save_path / 'checkpoints'
self.checkpoints_path.mkdir(exist_ok=True)
# backend initialization
try:
if self._use_gpu:
self._backend = GPUBackend(num_nodes, num_gpus_per_node, node_rank, ddp_backend,
ddp_find_unused_parameters, ddp_init_method, enable_amp)
else:
self._backend = CPUBackend()
except Exception as e:
writer.error(str(e))
raise
writer.info(f'Using {self._backend}')
checkpoint = None
if self._is_main_node:
if self._resume_checkpoint:
# only need to pass weights on main process, for it is distributed to the other nodes automatically
writer.info(f'Resuming training from checkpoint {self._resume_checkpoint}')
checkpoint = torch.load(self.checkpoints_path / 'latest', map_location='cpu')
self._start_epoch = checkpoint['epoch']
if not self._optimize_first and self._start_epoch == 0:
writer.info('Not optimizing during first epoch')
self._backend.dispatch(model, self._train_fn, self._bound_functions['configure_optimizers'], checkpoint)
def _train_fn(self, model, rank):
if self.random_seed:
# we want every training process to have a different, but deterministic random seed
seed_all(self.random_seed + rank)
is_main = rank == 0
best_val = float('inf') if self._smaller_is_better else 0.
did_warn_train_metrics = False
self._init_loggers(is_main)
self._logger.before_training_start(self._config.get_raw_config(), model, self._bound_functions)
# give backend the chance to wrap dataloaders, e.g. with samplers for multi-process training
train_loader, val_loader = self._backend.prepare_data_loaders(self.train_loader, self.val_loader)
for epoch in range(self._start_epoch, self._start_epoch + self._max_epochs):
# stores metrics of current epoch
metrics = {}
# ===== TRAINING ==== #
model.train()
torch.set_grad_enabled(True)
with writer.task(f'Training epoch {epoch}') as t:
step_metrics = []
for batch in t.tqdm(train_loader):
batch = self._backend.to_device(batch)
with torch.autograd.set_detect_anomaly(self._detect_anomalies) if is_main else nullcontext():
# don't calculate grads if we're in epoch zero and not optimizing
torch.set_grad_enabled(self._optimize_first or epoch > 0)
with self._backend.get_train_step_context():
train_metrics = call(
self._bound_functions['train_step'],
batch=batch,
model=model,
is_validate=False,
device=self._backend.device,
epoch=epoch
)
if not isinstance(train_metrics, dict):
if not did_warn_train_metrics:
writer.warning('Received a single return value from `train_step`, assuming '
'"loss". Return a dict to explicitly name the metric(s).')
did_warn_train_metrics = True
train_metrics = {'loss': train_metrics}
if self._optimize_metric is None:
metric = list(train_metrics.keys())[0] # TODO possibility to state which one to optimize
writer.info(f'Selected metric "{metric}" for minimization')
self._optimize_metric = metric
if self._optimize_first or epoch > 0:
self._backend.optim_step(train_metrics[self._optimize_metric])
t.set_current_metrics({
self._optimize_metric: std_round(train_metrics[self._optimize_metric].item())})
step_metrics.append({k: float(v) for k, v in train_metrics.items()})
if 'after_train_step' in self._bound_functions:
call(
self._bound_functions['after_train_step'],
model=model,
is_validate=False,
device=self._backend.device,
epoch=epoch,
metrics=step_metrics[-1]
)
# calculate mean metrics
metrics['train'] = {
metric: np.array([dic[metric] for dic in step_metrics]).mean() for metric in step_metrics[0]
}
# give backend the possibility to synchronize metrics across multiple processes, blocking
self._backend.synchronize_metrics(metrics['train'])
self._logger.after_pass(metrics['train'], epoch, is_validate=False)
status_str = f'[Epoch {epoch} / Train] ' \
+ ' '.join([f'{k}: {std_round(v)}' for k, v in metrics['train'].items()])
t.success(status_str)
# ===== VALIDATION ==== #
model.eval()
torch.set_grad_enabled(False)
with writer.task(f'Validating epoch {epoch}') as t:
step_metrics = []
for batch in t.tqdm(val_loader):
batch = self._backend.to_device(batch)
with self._backend.get_val_step_context():
val_metrics = call(
self._bound_functions['val_step'],
batch=batch,
model=model,
is_validate=True,
device=self._backend.device,
epoch=epoch
)
if not isinstance(val_metrics, dict):
val_metrics = {'loss': val_metrics}
t.set_current_metrics({
self._optimize_metric: std_round(val_metrics[self._optimize_metric].item())})
step_metrics.append({k: float(v) for k, v in val_metrics.items()})
if 'after_val_step' in self._bound_functions:
call(
self._bound_functions['after_val_step'],
model=model,
is_validate=True,
device=self._backend.device,
epoch=epoch,
metrics=step_metrics[-1]
)
metrics['val'] = {
metric: np.array([dic[metric] for dic in step_metrics]).mean() for metric in step_metrics[0]
}
self._backend.synchronize_metrics(metrics['val'])
# TODO specify metric to do scheduling on
# if self._optimize_first is False, a warning will be raised by the schedulers which suggests that
# optim.step() is called after scheduler.step(), which would normally result in the first epoch being
# skipped from the learning rate scheduler. In our case, however, optim.step() was not called because
# of self._optimize_first is False, and the epoch counter should indeed be increased.
if epoch == 0 and not self._optimize_first:
warnings.simplefilter(action='ignore', category=UserWarning)
self._backend.scheduler_step(metrics['val'][self._optimize_metric])
warnings.filterwarnings('default')
else:
self._backend.scheduler_step(metrics['val'][self._optimize_metric])
self._logger.after_pass(metrics['val'], epoch, is_validate=True)
status_str = f'[Epoch {epoch} / Val] ' \
+ ' '.join([f'{k}: {std_round(v)}' for k, v in metrics['val'].items()])
t.success(status_str)
if self._checkpoint_metric is None:
metric = list(val_metrics.keys())[0]
writer.info(f'Selected metric "{metric}" for checkpointing')
self._checkpoint_metric = metric
is_best = (self._smaller_is_better and metrics['val'][self._checkpoint_metric] < best_val) or \
(not self._smaller_is_better and metrics['val'][self._checkpoint_metric] > best_val)
# do checkpointing
if is_main and (is_best or epoch % self._checkpoint_every == 0):
with writer.task(f'Saving checkpoint'):
checkpoint = {
'model': model.state_dict(),
'optimizers': {name: optim.state_dict() for name, optim in self._backend.optimizers.items()},
'schedulers': {name: sched.state_dict() for name, sched in self._backend.schedulers.items()},
'next_epoch': epoch + 1
}
path = self.checkpoints_path / f'epoch_{epoch}.pt'
torch.save(checkpoint, path)
latest_path = self.checkpoints_path / 'latest'
best_path = self.checkpoints_path / 'best'
if latest_path.is_symlink():
# delete previous latest checkpoint
checkpoint_file = latest_path.resolve()
latest_path.unlink()
if not (best_path.is_symlink() and best_path.resolve() == checkpoint_file):
# best_path symlink does not link to this checkpoint, so we can delete it
checkpoint_file.unlink()
# create new latest symlink
latest_path.symlink_to(path.name)
if is_best:
# delete old best checkpoint and symlink new one
if best_path.is_symlink():
checkpoint_file = best_path.resolve()
best_path.unlink()
checkpoint_file.unlink()
best_path.symlink_to(path.name)
best_val = metrics['val'][self._checkpoint_metric]
writer.success(f'Training finished')
def _init_loggers(self, is_main):
if self._loggers is None:
self._loggers = []
if not isinstance(self._loggers, (list, tuple)):
self._loggers = [self._loggers]
if is_main:
self._logger = LoggerSet([StandardLogger()] + self._loggers)
else:
self._logger = LoggerSet([])
self._logger.setup(self._save_path, self._run_name, self._resume_checkpoint is not None)
def get_raw_config(self):
return self._config.get_raw_config()
def __getitem__(self, item):
return self._config[item]
@staticmethod
def seed_all(seed):
seed_all(seed)
@staticmethod
def set_deterministic(deterministic):
set_deterministic(deterministic)
@functools.wraps(run)
def __call__(self, *args, **kwargs):
self.run(*args, **kwargs)
# DECORATORS #
# TODO docstrings
def init(self, f):
self._bound_functions['init'] = f
return f
def train_step(self, f):
self._bound_functions['train_step'] = f
return f
def after_train_step(self, f):
self._bound_functions['after_train_step'] = f
return f
def validate_step(self, f):
self._bound_functions['val_step'] = f
return f
def train_epoch(self, f):
self._bound_functions['train_epoch'] = f
return f
def validate_epoch(self, f):
self._bound_functions['val_epoch'] = f
return f
def configure_optimizers(self, f):
self._bound_functions['configure_optimizers'] = f
return f
| from datetime import datetime
from typing import Optional, List, Union
from pathlib import Path
import random
import functools
import warnings
from contextlib import nullcontext
import numpy as np
import torch
from torch.utils.data import DataLoader
from coolname import generate_slug, replace_random
from . import _writer as writer
from .backends.cpu_backend import CPUBackend
from .backends.gpu_backend import GPUBackend
from .bound_functions import call
from .utils import get_highest_run, std_round, seed_all, set_deterministic
from .config import TrainingConfig
from .bound_functions import BoundFunctions
from .loggers import BaseLogger, LoggerSet, StandardLogger
class Run:
"""
Represents an individual training run.
"""
def __init__(self, config_files: Optional[List] = None, random_seed: int = None):
self._bound_functions = BoundFunctions()
self._config = None
self._backend = None
self._logger = None
self.train_loader = None
self.val_loader = None
self._loggers = None
self._max_epochs = None
self._use_gpu = None
self._resume_checkpoint = None
self._save_path = None
self.checkpoints_path = None
self._run_name = None
self._optimize_metric = None
self._checkpoint_metric = None
self._checkpoint_every = None
self._smaller_is_better = None # TODO state which to minimize/checkpoint on in result dict
self._optimize_first = None
self._enable_amp = None
self._detect_anomalies = None
self._is_validate = None
self._start_epoch = 0
self._is_main_node = None
self._config = TrainingConfig([] if config_files is None else config_files)
self.random_seed = random_seed
if random_seed:
seed_all(random_seed)
# TODO types, docstrings
# TODO pin_memory
# todo clear cache before start
# todo hooks
# todo cleanup code (extra files for optim, devices etc.)
# todo save on ctrl-C
# todo look at pl GPUbackend (amp optimizuation etc)
def run(self,
model: torch.nn.Module,
train_loader: DataLoader,
val_loader: DataLoader,
*,
loggers: Optional[List[BaseLogger]] = None,
max_epochs=1,
use_gpu=True,
num_nodes=1,
num_gpus_per_node=1,
node_rank=0,
ddp_backend='nccl',
ddp_init_method='env://',
ddp_find_unused_parameters=False,
resume_checkpoint: Optional[Union[str, Path]] = None,
save_path='train_logs',
run_name=None,
optimize_metric=None,
checkpoint_metric=None,
checkpoint_every=1,
smaller_is_better=True,
optimize_first=False,
enable_amp=False,
detect_anomalies=False
):
"""
Starts the training run.
Args:
model:
train_loader:
val_loader:
loggers: list of loggers that subscribe to various logging events
max_epochs:
use_gpu:
num_nodes:
num_gpus_per_node:
node_rank: when num_nodes > 1, this specifies the ordinal number of the current node within all nodes
ddp_backend:
ddp_init_method:
ddp_find_unused_parameters:
resume_checkpoint: path to checkpoint to resume training from
save_path: path to directory that blowtorch will save logs and checkpoints to
run_name: name associated with this run, will be randomly created if None
optimize_metric: train metric that will be used for optimization, will pick the first returned one if None
checkpoint_metric: validation metric that will be used for checkpointing, will pick the first returned one
if None
checkpoint_every: every checkpoint_every epochs a checkpoint is saved, disregarding performance of the
current model. This way it's always possible to resume the run from the latest (or near-latest) state
smaller_is_better: ``True`` if we want to minimize, ``False`` if maximize
optimize_first: whether optimization should occur during the first epoch
enable_amp:
detect_anomalies: enable autograd anomaly detection
"""
self.train_loader = train_loader
self.val_loader = val_loader
self._loggers = loggers
self._max_epochs = max_epochs
self._use_gpu = use_gpu
self._resume_checkpoint = resume_checkpoint
self._run_name = run_name
self._save_path = save_path
self._optimize_metric = optimize_metric
self._checkpoint_metric = checkpoint_metric
self._checkpoint_every = checkpoint_every
self._smaller_is_better = smaller_is_better
self._optimize_first = optimize_first
self._enable_amp = enable_amp
self._detect_anomalies = detect_anomalies
self._save_path = Path(self._save_path)
self._save_path.mkdir(parents=True, exist_ok=True)
self._is_main_node = num_nodes == 1 or node_rank == 0
# assign new random.Random() instance to coolname, such that slugs are different even though we have seeded
replace_random(random.Random())
if self._resume_checkpoint:
if self._run_name is not None:
raise ValueError('A run name cannot be specified when resuming from a previous run.')
self._resume_checkpoint = Path(self._resume_checkpoint)
if not self._resume_checkpoint.is_dir() or not (self._resume_checkpoint / 'checkpoints').exists():
raise ValueError('Path to resume from should be the parent directory of the "checkpoints" folder.')
self._run_name = self._resume_checkpoint.stem.split('_')[-1]
self._save_path = self._resume_checkpoint
else:
if self._run_name is None:
self._run_name = generate_slug(2)
assert '_' not in self._run_name
elif '_' in self._run_name:
raise ValueError('Run name cannot contain "_".')
# append consecutive number to run name
self._run_name += f'-{get_highest_run(self._save_path) + 1}'
self._save_path = self._save_path / (datetime.now().strftime("%y-%m-%d_%H-%M-%S") + '_' + self._run_name)
self._save_path.mkdir(parents=True, exist_ok=False)
self.checkpoints_path = self._save_path / 'checkpoints'
self.checkpoints_path.mkdir(exist_ok=True)
# backend initialization
try:
if self._use_gpu:
self._backend = GPUBackend(num_nodes, num_gpus_per_node, node_rank, ddp_backend,
ddp_find_unused_parameters, ddp_init_method, enable_amp)
else:
self._backend = CPUBackend()
except Exception as e:
writer.error(str(e))
raise
writer.info(f'Using {self._backend}')
checkpoint = None
if self._is_main_node:
if self._resume_checkpoint:
# only need to pass weights on main process, for it is distributed to the other nodes automatically
writer.info(f'Resuming training from checkpoint {self._resume_checkpoint}')
checkpoint = torch.load(self.checkpoints_path / 'latest', map_location='cpu')
self._start_epoch = checkpoint['epoch']
if not self._optimize_first and self._start_epoch == 0:
writer.info('Not optimizing during first epoch')
self._backend.dispatch(model, self._train_fn, self._bound_functions['configure_optimizers'], checkpoint)
def _train_fn(self, model, rank):
if self.random_seed:
# we want every training process to have a different, but deterministic random seed
seed_all(self.random_seed + rank)
is_main = rank == 0
best_val = float('inf') if self._smaller_is_better else 0.
did_warn_train_metrics = False
self._init_loggers(is_main)
self._logger.before_training_start(self._config.get_raw_config(), model, self._bound_functions)
# give backend the chance to wrap dataloaders, e.g. with samplers for multi-process training
train_loader, val_loader = self._backend.prepare_data_loaders(self.train_loader, self.val_loader)
for epoch in range(self._start_epoch, self._start_epoch + self._max_epochs):
# stores metrics of current epoch
metrics = {}
# ===== TRAINING ==== #
model.train()
torch.set_grad_enabled(True)
with writer.task(f'Training epoch {epoch}') as t:
step_metrics = []
for batch in t.tqdm(train_loader):
batch = self._backend.to_device(batch)
with torch.autograd.set_detect_anomaly(self._detect_anomalies) if is_main else nullcontext():
# don't calculate grads if we're in epoch zero and not optimizing
torch.set_grad_enabled(self._optimize_first or epoch > 0)
with self._backend.get_train_step_context():
train_metrics = call(
self._bound_functions['train_step'],
batch=batch,
model=model,
is_validate=False,
device=self._backend.device,
epoch=epoch
)
if not isinstance(train_metrics, dict):
if not did_warn_train_metrics:
writer.warning('Received a single return value from `train_step`, assuming '
'"loss". Return a dict to explicitly name the metric(s).')
did_warn_train_metrics = True
train_metrics = {'loss': train_metrics}
if self._optimize_metric is None:
metric = list(train_metrics.keys())[0] # TODO possibility to state which one to optimize
writer.info(f'Selected metric "{metric}" for minimization')
self._optimize_metric = metric
if self._optimize_first or epoch > 0:
self._backend.optim_step(train_metrics[self._optimize_metric])
t.set_current_metrics({
self._optimize_metric: std_round(train_metrics[self._optimize_metric].item())})
step_metrics.append({k: float(v) for k, v in train_metrics.items()})
if 'after_train_step' in self._bound_functions:
call(
self._bound_functions['after_train_step'],
model=model,
is_validate=False,
device=self._backend.device,
epoch=epoch,
metrics=step_metrics[-1]
)
# calculate mean metrics
metrics['train'] = {
metric: np.array([dic[metric] for dic in step_metrics]).mean() for metric in step_metrics[0]
}
# give backend the possibility to synchronize metrics across multiple processes, blocking
self._backend.synchronize_metrics(metrics['train'])
self._logger.after_pass(metrics['train'], epoch, is_validate=False)
status_str = f'[Epoch {epoch} / Train] ' \
+ ' '.join([f'{k}: {std_round(v)}' for k, v in metrics['train'].items()])
t.success(status_str)
# ===== VALIDATION ==== #
model.eval()
torch.set_grad_enabled(False)
with writer.task(f'Validating epoch {epoch}') as t:
step_metrics = []
for batch in t.tqdm(val_loader):
batch = self._backend.to_device(batch)
with self._backend.get_val_step_context():
val_metrics = call(
self._bound_functions['val_step'],
batch=batch,
model=model,
is_validate=True,
device=self._backend.device,
epoch=epoch
)
if not isinstance(val_metrics, dict):
val_metrics = {'loss': val_metrics}
t.set_current_metrics({
self._optimize_metric: std_round(val_metrics[self._optimize_metric].item())})
step_metrics.append({k: float(v) for k, v in val_metrics.items()})
if 'after_val_step' in self._bound_functions:
call(
self._bound_functions['after_val_step'],
model=model,
is_validate=True,
device=self._backend.device,
epoch=epoch,
metrics=step_metrics[-1]
)
metrics['val'] = {
metric: np.array([dic[metric] for dic in step_metrics]).mean() for metric in step_metrics[0]
}
self._backend.synchronize_metrics(metrics['val'])
# TODO specify metric to do scheduling on
# if self._optimize_first is False, a warning will be raised by the schedulers which suggests that
# optim.step() is called after scheduler.step(), which would normally result in the first epoch being
# skipped from the learning rate scheduler. In our case, however, optim.step() was not called because
# of self._optimize_first is False, and the epoch counter should indeed be increased.
if epoch == 0 and not self._optimize_first:
warnings.simplefilter(action='ignore', category=UserWarning)
self._backend.scheduler_step(metrics['val'][self._optimize_metric])
warnings.filterwarnings('default')
else:
self._backend.scheduler_step(metrics['val'][self._optimize_metric])
self._logger.after_pass(metrics['val'], epoch, is_validate=True)
status_str = f'[Epoch {epoch} / Val] ' \
+ ' '.join([f'{k}: {std_round(v)}' for k, v in metrics['val'].items()])
t.success(status_str)
if self._checkpoint_metric is None:
metric = list(val_metrics.keys())[0]
writer.info(f'Selected metric "{metric}" for checkpointing')
self._checkpoint_metric = metric
is_best = (self._smaller_is_better and metrics['val'][self._checkpoint_metric] < best_val) or \
(not self._smaller_is_better and metrics['val'][self._checkpoint_metric] > best_val)
# do checkpointing
if is_main and (is_best or epoch % self._checkpoint_every == 0):
with writer.task(f'Saving checkpoint'):
checkpoint = {
'model': model.state_dict(),
'optimizers': {name: optim.state_dict() for name, optim in self._backend.optimizers.items()},
'schedulers': {name: sched.state_dict() for name, sched in self._backend.schedulers.items()},
'next_epoch': epoch + 1
}
path = self.checkpoints_path / f'epoch_{epoch}.pt'
torch.save(checkpoint, path)
latest_path = self.checkpoints_path / 'latest'
best_path = self.checkpoints_path / 'best'
if latest_path.is_symlink():
# delete previous latest checkpoint
checkpoint_file = latest_path.resolve()
latest_path.unlink()
if not (best_path.is_symlink() and best_path.resolve() == checkpoint_file):
# best_path symlink does not link to this checkpoint, so we can delete it
checkpoint_file.unlink()
# create new latest symlink
latest_path.symlink_to(path.name)
if is_best:
# delete old best checkpoint and symlink new one
if best_path.is_symlink():
checkpoint_file = best_path.resolve()
best_path.unlink()
checkpoint_file.unlink()
best_path.symlink_to(path.name)
best_val = metrics['val'][self._checkpoint_metric]
writer.success(f'Training finished')
def _init_loggers(self, is_main):
if self._loggers is None:
self._loggers = []
if not isinstance(self._loggers, (list, tuple)):
self._loggers = [self._loggers]
if is_main:
self._logger = LoggerSet([StandardLogger()] + self._loggers)
else:
self._logger = LoggerSet([])
self._logger.setup(self._save_path, self._run_name, self._resume_checkpoint is not None)
def get_raw_config(self):
return self._config.get_raw_config()
def __getitem__(self, item):
return self._config[item]
@staticmethod
def seed_all(seed):
seed_all(seed)
@staticmethod
def set_deterministic(deterministic):
set_deterministic(deterministic)
@functools.wraps(run)
def __call__(self, *args, **kwargs):
self.run(*args, **kwargs)
# DECORATORS #
# TODO docstrings
def init(self, f):
self._bound_functions['init'] = f
return f
def train_step(self, f):
self._bound_functions['train_step'] = f
return f
def after_train_step(self, f):
self._bound_functions['after_train_step'] = f
return f
def validate_step(self, f):
self._bound_functions['val_step'] = f
return f
def train_epoch(self, f):
self._bound_functions['train_epoch'] = f
return f
def validate_epoch(self, f):
self._bound_functions['val_epoch'] = f
return f
def configure_optimizers(self, f):
self._bound_functions['configure_optimizers'] = f
return f
| en | 0.839595 | Represents an individual training run. # TODO state which to minimize/checkpoint on in result dict # TODO types, docstrings # TODO pin_memory # todo clear cache before start # todo hooks # todo cleanup code (extra files for optim, devices etc.) # todo save on ctrl-C # todo look at pl GPUbackend (amp optimizuation etc) Starts the training run. Args: model: train_loader: val_loader: loggers: list of loggers that subscribe to various logging events max_epochs: use_gpu: num_nodes: num_gpus_per_node: node_rank: when num_nodes > 1, this specifies the ordinal number of the current node within all nodes ddp_backend: ddp_init_method: ddp_find_unused_parameters: resume_checkpoint: path to checkpoint to resume training from save_path: path to directory that blowtorch will save logs and checkpoints to run_name: name associated with this run, will be randomly created if None optimize_metric: train metric that will be used for optimization, will pick the first returned one if None checkpoint_metric: validation metric that will be used for checkpointing, will pick the first returned one if None checkpoint_every: every checkpoint_every epochs a checkpoint is saved, disregarding performance of the current model. This way it's always possible to resume the run from the latest (or near-latest) state smaller_is_better: ``True`` if we want to minimize, ``False`` if maximize optimize_first: whether optimization should occur during the first epoch enable_amp: detect_anomalies: enable autograd anomaly detection # assign new random.Random() instance to coolname, such that slugs are different even though we have seeded # append consecutive number to run name # backend initialization # only need to pass weights on main process, for it is distributed to the other nodes automatically # we want every training process to have a different, but deterministic random seed # give backend the chance to wrap dataloaders, e.g. with samplers for multi-process training # stores metrics of current epoch # ===== TRAINING ==== # # don't calculate grads if we're in epoch zero and not optimizing # TODO possibility to state which one to optimize # calculate mean metrics # give backend the possibility to synchronize metrics across multiple processes, blocking # ===== VALIDATION ==== # # TODO specify metric to do scheduling on # if self._optimize_first is False, a warning will be raised by the schedulers which suggests that # optim.step() is called after scheduler.step(), which would normally result in the first epoch being # skipped from the learning rate scheduler. In our case, however, optim.step() was not called because # of self._optimize_first is False, and the epoch counter should indeed be increased. # do checkpointing # delete previous latest checkpoint # best_path symlink does not link to this checkpoint, so we can delete it # create new latest symlink # delete old best checkpoint and symlink new one # DECORATORS # # TODO docstrings | 1.835208 | 2 |
core/exploration.py | htdt/diqn | 5 | 6614698 | <reponame>htdt/diqn<gh_stars>1-10
from dataclasses import dataclass
import numpy as np
@dataclass
class DecayingEpsilon:
epsilon: float
warmup: int
decay_period: float
n_iter: int = 0
def update(self, n_iter):
self.n_iter = n_iter
def __call__(self):
steps_left = self.decay_period + self.warmup - self.n_iter
bonus = (1.0 - self.epsilon) * steps_left / self.decay_period
bonus = np.clip(bonus, 0., 1. - self.epsilon)
return self.epsilon + bonus
| from dataclasses import dataclass
import numpy as np
@dataclass
class DecayingEpsilon:
epsilon: float
warmup: int
decay_period: float
n_iter: int = 0
def update(self, n_iter):
self.n_iter = n_iter
def __call__(self):
steps_left = self.decay_period + self.warmup - self.n_iter
bonus = (1.0 - self.epsilon) * steps_left / self.decay_period
bonus = np.clip(bonus, 0., 1. - self.epsilon)
return self.epsilon + bonus | none | 1 | 2.615499 | 3 | |
archive/ma-demo.py | tsherburne/ma_sim | 0 | 6614699 | #!/usr/bin/python3
import simpy
import random
import logging
import logging.handlers
import sys
# Setup Logger
logger = logging.getLogger("SimPy")
logger.setLevel(logging.DEBUG)
ls = logging.StreamHandler(sys.stdout)
ls.setLevel(logging.INFO)
logFormat = logging.Formatter('%(asctime)s : %(name)s : %(levelname)s : %(message)s')
ls.setFormatter(logFormat)
logger.addHandler(ls)
# Initialize SimPy
env = simpy.Environment()
def f0(env, level):
print("Start %s at %d" % (level, env.now))
eb1 = env.process(b1(env, level + ".b1"))
eb2 = env.process(b2(env, level + ".b2"))
eb3 = env.process(b3(env, level + ".b3"))
yield eb1 & eb2 & eb3
print("Finish %s at %d" % (level, env.now))
def b1(env, level):
print("Start %s at %d" % (level, env.now))
e1 = env.process(f1(env, level + ".f1"))
yield e1
e2 = env.process(f2(env, level + ".f2"))
yield e2
e3 = env.process(f3(env, level + ".f3"))
yield e1 & e2 & e3
print("Finish %s at %d" % (level, env.now))
def b2(env, level):
print("Start %s at %d" % (level, env.now))
e3 = env.process(f3(env, level + ".f3"))
yield e3
e2 = env.process(f2(env, level + ".f2"))
yield e2
e1 = env.process(f1(env, level + ".f1"))
yield e1 & e2 & e3
print("Finish %s at %d" % (level, env.now))
def b3(env, level):
print("Start %s at %d" % (level, env.now))
funcList = ["f1", "f2", "f3"]
selected = random.choices(funcList, weights = [10, 10, 1], k = 1)
selectedFunc = globals()[selected[0]]
print("Selected: " + str(selectedFunc))
e = env.process(selectedFunc(env, level + "." + selected[0]))
yield e
print("Finish %s at %d" % (level, env.now))
def f1(env, level):
logger.info("Start %s at %d" % (level, env.now))
yield env.timeout(3)
logger.info("Finish %s at %d" % (level, env.now))
def f2(env, level):
logger.info("Start %s at %d" % (level, env.now))
yield env.timeout(2)
logger.info("Finish %s at %d" % (level, env.now))
def f3(env, level):
logger.info("Start %s at %d" % (level, env.now))
yield env.timeout(1)
logger.info("Finish %s at %d" % (level, env.now))
env.process(f0(env, "f0"))
env.run(until=10) | #!/usr/bin/python3
import simpy
import random
import logging
import logging.handlers
import sys
# Setup Logger
logger = logging.getLogger("SimPy")
logger.setLevel(logging.DEBUG)
ls = logging.StreamHandler(sys.stdout)
ls.setLevel(logging.INFO)
logFormat = logging.Formatter('%(asctime)s : %(name)s : %(levelname)s : %(message)s')
ls.setFormatter(logFormat)
logger.addHandler(ls)
# Initialize SimPy
env = simpy.Environment()
def f0(env, level):
print("Start %s at %d" % (level, env.now))
eb1 = env.process(b1(env, level + ".b1"))
eb2 = env.process(b2(env, level + ".b2"))
eb3 = env.process(b3(env, level + ".b3"))
yield eb1 & eb2 & eb3
print("Finish %s at %d" % (level, env.now))
def b1(env, level):
print("Start %s at %d" % (level, env.now))
e1 = env.process(f1(env, level + ".f1"))
yield e1
e2 = env.process(f2(env, level + ".f2"))
yield e2
e3 = env.process(f3(env, level + ".f3"))
yield e1 & e2 & e3
print("Finish %s at %d" % (level, env.now))
def b2(env, level):
print("Start %s at %d" % (level, env.now))
e3 = env.process(f3(env, level + ".f3"))
yield e3
e2 = env.process(f2(env, level + ".f2"))
yield e2
e1 = env.process(f1(env, level + ".f1"))
yield e1 & e2 & e3
print("Finish %s at %d" % (level, env.now))
def b3(env, level):
print("Start %s at %d" % (level, env.now))
funcList = ["f1", "f2", "f3"]
selected = random.choices(funcList, weights = [10, 10, 1], k = 1)
selectedFunc = globals()[selected[0]]
print("Selected: " + str(selectedFunc))
e = env.process(selectedFunc(env, level + "." + selected[0]))
yield e
print("Finish %s at %d" % (level, env.now))
def f1(env, level):
logger.info("Start %s at %d" % (level, env.now))
yield env.timeout(3)
logger.info("Finish %s at %d" % (level, env.now))
def f2(env, level):
logger.info("Start %s at %d" % (level, env.now))
yield env.timeout(2)
logger.info("Finish %s at %d" % (level, env.now))
def f3(env, level):
logger.info("Start %s at %d" % (level, env.now))
yield env.timeout(1)
logger.info("Finish %s at %d" % (level, env.now))
env.process(f0(env, "f0"))
env.run(until=10) | en | 0.220974 | #!/usr/bin/python3 # Setup Logger # Initialize SimPy | 2.35415 | 2 |
setup.py | Tal-Leibman/scrapy-selenium-middleware | 6 | 6614700 | from setuptools import setup, find_packages
with open("README.md") as readme_file:
README = readme_file.read()
setup_args = dict(
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/Tal-Leibman/scrapy-selenium-middleware",
name="scrapy_selenium_middleware",
version="0.0.5",
description="""Scrapy middleware for downloading a page html source using selenium,
and interacting with the web driver in the request context
eventually returning an HtmlResponse to the spider
""",
long_description=README,
keywords=[
"scrapy",
"selenium",
"middleware",
"proxy",
"web scraping",
"render javascript",
"selenium-wire",
"headless browser",
],
long_description_content_type="text/markdown",
packages=find_packages(),
)
install_requires = [
"scrapy==2.4.0",
"selenium-wire==2.1.1",
"selenium==3.141.0",
]
if __name__ == "__main__":
setup(**setup_args, install_requires=install_requires)
| from setuptools import setup, find_packages
with open("README.md") as readme_file:
README = readme_file.read()
setup_args = dict(
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/Tal-Leibman/scrapy-selenium-middleware",
name="scrapy_selenium_middleware",
version="0.0.5",
description="""Scrapy middleware for downloading a page html source using selenium,
and interacting with the web driver in the request context
eventually returning an HtmlResponse to the spider
""",
long_description=README,
keywords=[
"scrapy",
"selenium",
"middleware",
"proxy",
"web scraping",
"render javascript",
"selenium-wire",
"headless browser",
],
long_description_content_type="text/markdown",
packages=find_packages(),
)
install_requires = [
"scrapy==2.4.0",
"selenium-wire==2.1.1",
"selenium==3.141.0",
]
if __name__ == "__main__":
setup(**setup_args, install_requires=install_requires)
| en | 0.815873 | Scrapy middleware for downloading a page html source using selenium, and interacting with the web driver in the request context eventually returning an HtmlResponse to the spider | 1.618094 | 2 |
runPlannerSummary.py | asa-leholland/planner-daily-summary | 0 | 6614701 | <gh_stars>0
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.styles import Font
from openpyxl.utils import get_column_letter
import sys
import pandas as pd
from datetime import date, datetime, timedelta
pd.set_option('display.max_columns', None)
def summarize_planner_export(sample_filepath):
pre_process_result = pre_process(sample_filepath)
post_process_result = post_processing(pre_process_result)
format_final_result(post_process_result)
return
def printCols(df):
print(df.columns)
def pre_process(infile):
# default columns
# ['Task ID', 'Task Name', 'Bucket Name', 'Progress', 'Priority',
# 'Assigned To', 'Created By', 'Created Date', 'Start Date', 'Due Date',
# 'Late', 'Completed Date', 'Completed By', 'Description',
# 'Completed Checklist Items', 'Checklist Items', 'Labels']
# ['Task ID', 'Task Name', 'Bucket Name', 'Progress', 'Priority',
# 'Assigned To', 'Created By', 'Created Date', 'Start Date', 'Due Date',
# 'Late', 'Completed Date', 'Completed By', 'Description',
# 'Completed Checklist Items', 'Checklist Items', 'Labels']
# TODO: add functionality for Late items
# check if file is open
while True: # repeat until the try statement succeeds
try:
myfile = open(infile, "r+") # or "a+", whatever you need
myfile.close()
break # exit the loop
except IOError:
input("Could not open file! Please close Excel. Press Enter to retry.")
# restart the loop
required_columns = ['Task Name', 'Priority', 'Assigned To', 'Due Date', 'Description']
df = pd.read_excel(infile, usecols=required_columns)
# remove items with no due date
df = df[df['Due Date'].notna()]
# convert due date from sting to datetime
df['Due Date'] = df['Due Date'].astype(str)
# filter to only the work items due today
today = str(date.today().strftime("%m/%d/%Y"))
due_today = df["Due Date"] == today
pre_processed_df = df.loc[due_today]
# tomorrow = date.today() + timedelta(days=1)
# tomorrow = tomorrow.strftime("%m/%d/%Y")
# due_tomorrow = df["Due Date"] == tomorrow
return pre_processed_df
def post_processing(dataframe):
# Add Categories column
post_processed_dataframe = dataframe
# Remove Due Date Columns
post_processed_dataframe.drop(columns='Due Date')
# Clean up Assigned To column to only first names
post_processed_dataframe['Assigned To'] = post_processed_dataframe['Assigned To'].str.replace(' [\w]*;', ', ', regex=True)
post_processed_dataframe['Assigned To'] = post_processed_dataframe['Assigned To'].str.replace(' [\w]*$', '', regex=True)
# TODO: remove populate Category column and drop Description column
# Create custom sort order
df_urgency_order = pd.DataFrame({
'urgency': ['Urgent', 'Important', 'Medium', 'Low'],
})
sort_urgency = df_urgency_order.reset_index().set_index('urgency')
# Create new column for sort order
post_processed_dataframe['urgency_order'] = post_processed_dataframe['Priority'].map(sort_urgency['index'])
# Sort by urgency_order
post_processed_dataframe = post_processed_dataframe.sort_values('urgency_order')
# then by Priority using custom sort 'Urgent', 'Important', 'Medium', 'Low'
post_processed_dataframe = post_processed_dataframe.sort_values('Priority')
return post_processed_dataframe
def format_final_result(dataframe):
today = str(date.today().strftime("%m_%d_%Y"))
# export to excel file
filename = f'Planner Daily Summary {today}.xlsx'
ordered_columns = ['Task Name', 'Priority', 'Assigned To', 'Due Date', 'Description']
dataframe.to_excel(filename, index=False, columns=ordered_columns)
# bold top row
wb = load_workbook(filename=filename)
ws = wb['Sheet1']
bold_font = Font(bold=True)
# Enumerate the cells in the first row
for cell in ws["1:1"]:
cell.font = bold_font
# update column widths
column_widths = []
for row in ws.iter_rows():
for i, cell in enumerate(row):
if len(column_widths) > i:
if cell.value is not None:
if len(cell.value) > column_widths[i]:
column_widths[i] = len(cell.value)
else:
column_widths += [len(cell.value)]
for i, column_width in enumerate(column_widths, 1):
if i == 1:
ws.column_dimensions[get_column_letter(i)].width = round(column_width)
else:
ws.column_dimensions[get_column_letter(i)].width = round(column_width * 1.2)
wb.save(filename=filename)
today = str(date.today().strftime("%m/%d/%Y"))
count = len(dataframe.index)
print(f'Summary created for {count} Planner tasks dated {today}.')
return
if __name__ == "__main__":
SAMPLE_FILEPATH = sys.argv[1]
summarize_planner_export(SAMPLE_FILEPATH)
| from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.styles import Font
from openpyxl.utils import get_column_letter
import sys
import pandas as pd
from datetime import date, datetime, timedelta
pd.set_option('display.max_columns', None)
def summarize_planner_export(sample_filepath):
pre_process_result = pre_process(sample_filepath)
post_process_result = post_processing(pre_process_result)
format_final_result(post_process_result)
return
def printCols(df):
print(df.columns)
def pre_process(infile):
# default columns
# ['Task ID', 'Task Name', 'Bucket Name', 'Progress', 'Priority',
# 'Assigned To', 'Created By', 'Created Date', 'Start Date', 'Due Date',
# 'Late', 'Completed Date', 'Completed By', 'Description',
# 'Completed Checklist Items', 'Checklist Items', 'Labels']
# ['Task ID', 'Task Name', 'Bucket Name', 'Progress', 'Priority',
# 'Assigned To', 'Created By', 'Created Date', 'Start Date', 'Due Date',
# 'Late', 'Completed Date', 'Completed By', 'Description',
# 'Completed Checklist Items', 'Checklist Items', 'Labels']
# TODO: add functionality for Late items
# check if file is open
while True: # repeat until the try statement succeeds
try:
myfile = open(infile, "r+") # or "a+", whatever you need
myfile.close()
break # exit the loop
except IOError:
input("Could not open file! Please close Excel. Press Enter to retry.")
# restart the loop
required_columns = ['Task Name', 'Priority', 'Assigned To', 'Due Date', 'Description']
df = pd.read_excel(infile, usecols=required_columns)
# remove items with no due date
df = df[df['Due Date'].notna()]
# convert due date from sting to datetime
df['Due Date'] = df['Due Date'].astype(str)
# filter to only the work items due today
today = str(date.today().strftime("%m/%d/%Y"))
due_today = df["Due Date"] == today
pre_processed_df = df.loc[due_today]
# tomorrow = date.today() + timedelta(days=1)
# tomorrow = tomorrow.strftime("%m/%d/%Y")
# due_tomorrow = df["Due Date"] == tomorrow
return pre_processed_df
def post_processing(dataframe):
# Add Categories column
post_processed_dataframe = dataframe
# Remove Due Date Columns
post_processed_dataframe.drop(columns='Due Date')
# Clean up Assigned To column to only first names
post_processed_dataframe['Assigned To'] = post_processed_dataframe['Assigned To'].str.replace(' [\w]*;', ', ', regex=True)
post_processed_dataframe['Assigned To'] = post_processed_dataframe['Assigned To'].str.replace(' [\w]*$', '', regex=True)
# TODO: remove populate Category column and drop Description column
# Create custom sort order
df_urgency_order = pd.DataFrame({
'urgency': ['Urgent', 'Important', 'Medium', 'Low'],
})
sort_urgency = df_urgency_order.reset_index().set_index('urgency')
# Create new column for sort order
post_processed_dataframe['urgency_order'] = post_processed_dataframe['Priority'].map(sort_urgency['index'])
# Sort by urgency_order
post_processed_dataframe = post_processed_dataframe.sort_values('urgency_order')
# then by Priority using custom sort 'Urgent', 'Important', 'Medium', 'Low'
post_processed_dataframe = post_processed_dataframe.sort_values('Priority')
return post_processed_dataframe
def format_final_result(dataframe):
today = str(date.today().strftime("%m_%d_%Y"))
# export to excel file
filename = f'Planner Daily Summary {today}.xlsx'
ordered_columns = ['Task Name', 'Priority', 'Assigned To', 'Due Date', 'Description']
dataframe.to_excel(filename, index=False, columns=ordered_columns)
# bold top row
wb = load_workbook(filename=filename)
ws = wb['Sheet1']
bold_font = Font(bold=True)
# Enumerate the cells in the first row
for cell in ws["1:1"]:
cell.font = bold_font
# update column widths
column_widths = []
for row in ws.iter_rows():
for i, cell in enumerate(row):
if len(column_widths) > i:
if cell.value is not None:
if len(cell.value) > column_widths[i]:
column_widths[i] = len(cell.value)
else:
column_widths += [len(cell.value)]
for i, column_width in enumerate(column_widths, 1):
if i == 1:
ws.column_dimensions[get_column_letter(i)].width = round(column_width)
else:
ws.column_dimensions[get_column_letter(i)].width = round(column_width * 1.2)
wb.save(filename=filename)
today = str(date.today().strftime("%m/%d/%Y"))
count = len(dataframe.index)
print(f'Summary created for {count} Planner tasks dated {today}.')
return
if __name__ == "__main__":
SAMPLE_FILEPATH = sys.argv[1]
summarize_planner_export(SAMPLE_FILEPATH) | en | 0.573373 | # default columns # ['Task ID', 'Task Name', 'Bucket Name', 'Progress', 'Priority', # 'Assigned To', 'Created By', 'Created Date', 'Start Date', 'Due Date', # 'Late', 'Completed Date', 'Completed By', 'Description', # 'Completed Checklist Items', 'Checklist Items', 'Labels'] # ['Task ID', 'Task Name', 'Bucket Name', 'Progress', 'Priority', # 'Assigned To', 'Created By', 'Created Date', 'Start Date', 'Due Date', # 'Late', 'Completed Date', 'Completed By', 'Description', # 'Completed Checklist Items', 'Checklist Items', 'Labels'] # TODO: add functionality for Late items # check if file is open # repeat until the try statement succeeds # or "a+", whatever you need # exit the loop # restart the loop # remove items with no due date # convert due date from sting to datetime # filter to only the work items due today # tomorrow = date.today() + timedelta(days=1) # tomorrow = tomorrow.strftime("%m/%d/%Y") # due_tomorrow = df["Due Date"] == tomorrow # Add Categories column # Remove Due Date Columns # Clean up Assigned To column to only first names # TODO: remove populate Category column and drop Description column # Create custom sort order # Create new column for sort order # Sort by urgency_order # then by Priority using custom sort 'Urgent', 'Important', 'Medium', 'Low' # export to excel file # bold top row # Enumerate the cells in the first row # update column widths | 2.846823 | 3 |
aiorelax/server.py | hzlmn/aiorelax | 0 | 6614702 | import asyncio
import operator
import warnings
import aiohttp
from yarl import URL
from .client import Client
from .database import Database
from .helpers import match_version
class Server:
couchdb_version = None
def __init__(self, base_url="http://localhost:5984/", auth=None):
self.auth = auth
self.base_url = URL(base_url)
self.client = Client(self.base_url, auth=auth)
async def __aiter__(self):
for db in await self.all_dbs():
resp = await self.client.get(db)
yield (await resp.json())
async def all_dbs(self):
resp = await self.client.get("_all_dbs")
return await resp.json()
async def info(self):
resp = await self.client.get("")
return await resp.json()
async def version(self):
if self.couchdb_version is not None:
return self.couchdb_version
info = await self.info()
self.couchdb_version = info["version"]
return self.couchdb_version
def database(self, name):
return Database(self.client(name))
async def stats(self):
resp = await self.client.get("_stats")
return await resp.json()
async def active_tasks(self):
resp = await self.client.get("_active_tasks")
return await resp.json()
async def uuids(self, count=None):
params = {}
if count is not None:
params["count"] = count
resp = await self.client.get("_uuids", params=params)
return await resp.json()
async def membership(self):
resp = await self.client.get("_membership")
return await resp.json()
@match_version("1.6.1", compare=operator.lt)
async def stats(self):
raise NotImplementedError
@match_version("2.0.0")
async def cluster_setup(self, feed=None, timeout=None, heartbeat=None, since=None):
feed_values = ("normal", "longpool", "continuous", "eventsource")
params = {}
if feed is not None:
if feed not in feed_values:
raise ValueError
params["feed"] = feed
if timeout is not None:
params["timeout"] = timeout
if heartbeat is not None:
params["heartbeat"] = heartbeat
if since is not None:
params["since"] = since
resp = await self.client.get("_db_updates", params=params)
async def close(self):
await self.client.close()
| import asyncio
import operator
import warnings
import aiohttp
from yarl import URL
from .client import Client
from .database import Database
from .helpers import match_version
class Server:
couchdb_version = None
def __init__(self, base_url="http://localhost:5984/", auth=None):
self.auth = auth
self.base_url = URL(base_url)
self.client = Client(self.base_url, auth=auth)
async def __aiter__(self):
for db in await self.all_dbs():
resp = await self.client.get(db)
yield (await resp.json())
async def all_dbs(self):
resp = await self.client.get("_all_dbs")
return await resp.json()
async def info(self):
resp = await self.client.get("")
return await resp.json()
async def version(self):
if self.couchdb_version is not None:
return self.couchdb_version
info = await self.info()
self.couchdb_version = info["version"]
return self.couchdb_version
def database(self, name):
return Database(self.client(name))
async def stats(self):
resp = await self.client.get("_stats")
return await resp.json()
async def active_tasks(self):
resp = await self.client.get("_active_tasks")
return await resp.json()
async def uuids(self, count=None):
params = {}
if count is not None:
params["count"] = count
resp = await self.client.get("_uuids", params=params)
return await resp.json()
async def membership(self):
resp = await self.client.get("_membership")
return await resp.json()
@match_version("1.6.1", compare=operator.lt)
async def stats(self):
raise NotImplementedError
@match_version("2.0.0")
async def cluster_setup(self, feed=None, timeout=None, heartbeat=None, since=None):
feed_values = ("normal", "longpool", "continuous", "eventsource")
params = {}
if feed is not None:
if feed not in feed_values:
raise ValueError
params["feed"] = feed
if timeout is not None:
params["timeout"] = timeout
if heartbeat is not None:
params["heartbeat"] = heartbeat
if since is not None:
params["since"] = since
resp = await self.client.get("_db_updates", params=params)
async def close(self):
await self.client.close()
| none | 1 | 2.171319 | 2 | |
reelLib.py | jethrodew/PyReel | 0 | 6614703 | import time
import sys
import feedparser
import config
#Constants
logo_print_speed = 0.002
# Helper Functions
def delay(t):
time.sleep(t)
# Print Functions
def delay_print(s,t=0.03):
for c in s:
sys.stdout.write('%s' % c)
sys.stdout.flush()
time.sleep(t)
sys.stdout.write('\n')
def print_rss(post):
print('\n')
delay_print(post.title)
print('- '*20)
delay_print(post.summary)
def print_weather(obs,fore):
print('\n')
print('- '*20)
delay_print("Current Temperature ("+ config.region_name +")")
print('- '*10)
delay_print(obs.entries[0].title.replace('°',''))
print('\n')
print('- '*20)
delay_print("3 Day Forecast ("+ config.region_name +")")
print('- '*10)
for post in fore.entries:
delay_print (post.title.replace('°',''))
print('\n')
#Logo functions
def bbc_logo():
print("\n")
print("\n")
delay_print('88888888ba 88888888ba ,ad8888ba, ',logo_print_speed)
delay_print('88 "8b 88 "8b d8"\' `"8b',logo_print_speed)
delay_print('88 ,8P 88 ,8P d8\' ',logo_print_speed)
delay_print('88aaaaaa8P\' 88aaaaaa8P\' 88 ',logo_print_speed)
delay_print('88""""""8b, 88""""""8b, 88 ',logo_print_speed)
delay_print('88 `8b 88 `8b Y8, ',logo_print_speed)
delay_print('88 a8P 88 a8P Y8a. .a8P',logo_print_speed)
delay_print('88888888P" 88888888P" `"Y8888Y"\' ',logo_print_speed)
def bbc_news_logo():
print('\n')
print('\n')
delay_print('88888888ba 88888888ba ,ad8888ba, 888b 88 ',logo_print_speed)
delay_print('88 "8b 88 "8b d8"\' `"8b 8888b 88 ',logo_print_speed)
delay_print('88 ,8P 88 ,8P d8\' 88 `8b 88 ',logo_print_speed)
delay_print('88aaaaaa8P\' 88aaaaaa8P\' 88 88 `8b 88 ,adPPYba, 8b db d8 ,adPPYba, ',logo_print_speed)
delay_print('88""""""8b, 88""""""8b, 88 88 `8b 88 a8P_____88 `8b d88b d8\' I8[ "" ',logo_print_speed)
delay_print('88 `8b 88 `8b Y8, 88 `8b 88 8PP""""""" `8b d8\'`8b d8\' `"Y8ba, ',logo_print_speed)
delay_print('88 a8P 88 a8P Y8a. .a8P 88 `8888 "8b, ,aa `8bd8\' `8bd8\' aa ]8I ',logo_print_speed)
delay_print('88888888P" 88888888P" `"Y8888Y"\' 88 `888 `"Ybbd8"\' YP YP `"YbbdP"\' ',logo_print_speed)
def technology_logo():
print("\n")
delay_print('888888888888 88 88 ',logo_print_speed)
delay_print(' 88 88 88 ',logo_print_speed)
delay_print(' 88 88 88 ',logo_print_speed)
delay_print(' 88 ,adPPYba, ,adPPYba, 88,dPPYba, 8b,dPPYba, ,adPPYba, 88 ,adPPYba, ,adPPYb,d8 8b d8 ',logo_print_speed)
delay_print(' 88 a8P_____88 a8" "" 88P\' "8a 88P\' `"8a a8" "8a 88 a8" "8a a8" `Y88 `8b d8\' ',logo_print_speed)
delay_print(' 88 8PP""""""" 8b 88 88 88 88 8b d8 88 8b d8 8b 88 `8b d8\' ',logo_print_speed)
delay_print(' 88 "8b, ,aa "8a, ,aa 88 88 88 88 "8a, ,a8" 88 "8a, ,a8" "8a, ,d88 `8b,d8\' ',logo_print_speed)
delay_print(' 88 `"Ybbd8"\' `"Ybbd8"\' 88 88 88 88 `"YbbdP"\' 88 `"YbbdP"\' `"YbbdP"Y8 Y88\' ',logo_print_speed)
delay_print(' aa, ,88 d8\' ',logo_print_speed)
delay_print(' "Y8bbdP" d8\' ',logo_print_speed)
def weather_logo():
print('\n')
delay_print('I8, 8 ,8I 88 ',logo_print_speed)
delay_print('`8b d8b d8\' ,d 88 ',logo_print_speed)
delay_print(' "8, ,8"8, ,8" 88 88 ',logo_print_speed)
delay_print(' Y8 8P Y8 8P ,adPPYba, ,adPPYYba, MM88MMM 88,dPPYba, ,adPPYba, 8b,dPPYba, ',logo_print_speed)
delay_print(' `8b d8\' `8b d8\' a8P_____88 "" `Y8 88 88P\' "8a a8P_____88 88P\' "Y8 ',logo_print_speed)
delay_print(' `8a a8\' `8a a8\' 8PP""""""" ,adPPPPP88 88 88 88 8PP""""""" 88 ',logo_print_speed)
delay_print(' `8a8\' `8a8\' "8b, ,aa 88, ,88 88, 88 88 "8b, ,aa 88 ',logo_print_speed)
delay_print(' `8\' `8\' `"Ybbd8"\' `"8bbdP"Y8 "Y888 88 88 `"Ybbd8"\' 88 ',logo_print_speed)
# Run Functions
def bbc_news():
f = feedparser.parse('http://feeds.bbci.co.uk/news/rss.xml?edition='+config.news_region) #BBC News Frontpage
bbc_news_logo()
for post in f.entries[:10]:
print_rss(post)
delay(1)
delay(2)
print('\n')
def bbc_technology_news():
f = feedparser.parse('http://feeds.bbci.co.uk/news/technology/rss.xml?edition='+config.news_region) #BBC Technology News
bbc_logo()
technology_logo()
for post in f.entries[:10]:
print_rss(post)
delay(1)
delay(2)
print('\n')
def bbc_weather():
w = feedparser.parse('http://open.live.bbc.co.uk/weather/feeds/en/' + config.region_code + '/3dayforecast.rss') #BBC Weather 3 Day Forecast (Oxford)
o = feedparser.parse('http://open.live.bbc.co.uk/weather/feeds/en/' + config.region_code + '/observations.rss') #BBC Weather Observations (Oxford)
bbc_logo()
weather_logo()
print_weather(o,w)
delay(2)
print('\n')
| import time
import sys
import feedparser
import config
#Constants
logo_print_speed = 0.002
# Helper Functions
def delay(t):
time.sleep(t)
# Print Functions
def delay_print(s,t=0.03):
for c in s:
sys.stdout.write('%s' % c)
sys.stdout.flush()
time.sleep(t)
sys.stdout.write('\n')
def print_rss(post):
print('\n')
delay_print(post.title)
print('- '*20)
delay_print(post.summary)
def print_weather(obs,fore):
print('\n')
print('- '*20)
delay_print("Current Temperature ("+ config.region_name +")")
print('- '*10)
delay_print(obs.entries[0].title.replace('°',''))
print('\n')
print('- '*20)
delay_print("3 Day Forecast ("+ config.region_name +")")
print('- '*10)
for post in fore.entries:
delay_print (post.title.replace('°',''))
print('\n')
#Logo functions
def bbc_logo():
print("\n")
print("\n")
delay_print('88888888ba 88888888ba ,ad8888ba, ',logo_print_speed)
delay_print('88 "8b 88 "8b d8"\' `"8b',logo_print_speed)
delay_print('88 ,8P 88 ,8P d8\' ',logo_print_speed)
delay_print('88aaaaaa8P\' 88aaaaaa8P\' 88 ',logo_print_speed)
delay_print('88""""""8b, 88""""""8b, 88 ',logo_print_speed)
delay_print('88 `8b 88 `8b Y8, ',logo_print_speed)
delay_print('88 a8P 88 a8P Y8a. .a8P',logo_print_speed)
delay_print('88888888P" 88888888P" `"Y8888Y"\' ',logo_print_speed)
def bbc_news_logo():
print('\n')
print('\n')
delay_print('88888888ba 88888888ba ,ad8888ba, 888b 88 ',logo_print_speed)
delay_print('88 "8b 88 "8b d8"\' `"8b 8888b 88 ',logo_print_speed)
delay_print('88 ,8P 88 ,8P d8\' 88 `8b 88 ',logo_print_speed)
delay_print('88aaaaaa8P\' 88aaaaaa8P\' 88 88 `8b 88 ,adPPYba, 8b db d8 ,adPPYba, ',logo_print_speed)
delay_print('88""""""8b, 88""""""8b, 88 88 `8b 88 a8P_____88 `8b d88b d8\' I8[ "" ',logo_print_speed)
delay_print('88 `8b 88 `8b Y8, 88 `8b 88 8PP""""""" `8b d8\'`8b d8\' `"Y8ba, ',logo_print_speed)
delay_print('88 a8P 88 a8P Y8a. .a8P 88 `8888 "8b, ,aa `8bd8\' `8bd8\' aa ]8I ',logo_print_speed)
delay_print('88888888P" 88888888P" `"Y8888Y"\' 88 `888 `"Ybbd8"\' YP YP `"YbbdP"\' ',logo_print_speed)
def technology_logo():
print("\n")
delay_print('888888888888 88 88 ',logo_print_speed)
delay_print(' 88 88 88 ',logo_print_speed)
delay_print(' 88 88 88 ',logo_print_speed)
delay_print(' 88 ,adPPYba, ,adPPYba, 88,dPPYba, 8b,dPPYba, ,adPPYba, 88 ,adPPYba, ,adPPYb,d8 8b d8 ',logo_print_speed)
delay_print(' 88 a8P_____88 a8" "" 88P\' "8a 88P\' `"8a a8" "8a 88 a8" "8a a8" `Y88 `8b d8\' ',logo_print_speed)
delay_print(' 88 8PP""""""" 8b 88 88 88 88 8b d8 88 8b d8 8b 88 `8b d8\' ',logo_print_speed)
delay_print(' 88 "8b, ,aa "8a, ,aa 88 88 88 88 "8a, ,a8" 88 "8a, ,a8" "8a, ,d88 `8b,d8\' ',logo_print_speed)
delay_print(' 88 `"Ybbd8"\' `"Ybbd8"\' 88 88 88 88 `"YbbdP"\' 88 `"YbbdP"\' `"YbbdP"Y8 Y88\' ',logo_print_speed)
delay_print(' aa, ,88 d8\' ',logo_print_speed)
delay_print(' "Y8bbdP" d8\' ',logo_print_speed)
def weather_logo():
print('\n')
delay_print('I8, 8 ,8I 88 ',logo_print_speed)
delay_print('`8b d8b d8\' ,d 88 ',logo_print_speed)
delay_print(' "8, ,8"8, ,8" 88 88 ',logo_print_speed)
delay_print(' Y8 8P Y8 8P ,adPPYba, ,adPPYYba, MM88MMM 88,dPPYba, ,adPPYba, 8b,dPPYba, ',logo_print_speed)
delay_print(' `8b d8\' `8b d8\' a8P_____88 "" `Y8 88 88P\' "8a a8P_____88 88P\' "Y8 ',logo_print_speed)
delay_print(' `8a a8\' `8a a8\' 8PP""""""" ,adPPPPP88 88 88 88 8PP""""""" 88 ',logo_print_speed)
delay_print(' `8a8\' `8a8\' "8b, ,aa 88, ,88 88, 88 88 "8b, ,aa 88 ',logo_print_speed)
delay_print(' `8\' `8\' `"Ybbd8"\' `"8bbdP"Y8 "Y888 88 88 `"Ybbd8"\' 88 ',logo_print_speed)
# Run Functions
def bbc_news():
f = feedparser.parse('http://feeds.bbci.co.uk/news/rss.xml?edition='+config.news_region) #BBC News Frontpage
bbc_news_logo()
for post in f.entries[:10]:
print_rss(post)
delay(1)
delay(2)
print('\n')
def bbc_technology_news():
f = feedparser.parse('http://feeds.bbci.co.uk/news/technology/rss.xml?edition='+config.news_region) #BBC Technology News
bbc_logo()
technology_logo()
for post in f.entries[:10]:
print_rss(post)
delay(1)
delay(2)
print('\n')
def bbc_weather():
w = feedparser.parse('http://open.live.bbc.co.uk/weather/feeds/en/' + config.region_code + '/3dayforecast.rss') #BBC Weather 3 Day Forecast (Oxford)
o = feedparser.parse('http://open.live.bbc.co.uk/weather/feeds/en/' + config.region_code + '/observations.rss') #BBC Weather Observations (Oxford)
bbc_logo()
weather_logo()
print_weather(o,w)
delay(2)
print('\n')
| en | 0.594341 | #Constants # Helper Functions # Print Functions #Logo functions # Run Functions #BBC News Frontpage #BBC Technology News #BBC Weather 3 Day Forecast (Oxford) #BBC Weather Observations (Oxford) | 3.048436 | 3 |
supriya/ugens/CompanderD.py | deeuu/supriya | 0 | 6614704 | from supriya.ugens.PseudoUGen import PseudoUGen
class CompanderD(PseudoUGen):
"""
A convenience constructor for Compander.
"""
### CLASS VARIABLES ###
__documentation_section__ = "Dynamics UGens"
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
source=None,
threshold=0.5,
clamp_time=0.01,
relax_time=0.1,
slope_above=1.0,
slope_below=1.0,
):
"""
Constructs an audio-rate dynamics processor.
.. container:: example
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> compander_d = supriya.ugens.CompanderD.ar(
... source=source,
... )
>>> supriya.graph(compander_d) # doctest: +SKIP
::
>>> print(compander_d)
synthdef:
name: d4e7b88df56af5070a88f09b0f8c633e
ugens:
- In.ar:
bus: 0.0
- DelayN.ar:
delay_time: 0.01
maximum_delay_time: 0.01
source: In.ar[0]
- Compander.ar:
clamp_time: 0.01
control: DelayN.ar[0]
relax_time: 0.1
slope_above: 1.0
slope_below: 1.0
source: In.ar[0]
threshold: 0.5
Returns ugen graph.
"""
import supriya.synthdefs
import supriya.ugens
calculation_rate = supriya.CalculationRate.AUDIO
control = supriya.ugens.DelayN.ar(
source=source, maximum_delay_time=clamp_time, delay_time=clamp_time
)
ugen = supriya.ugens.Compander._new_expanded(
clamp_time=clamp_time,
calculation_rate=calculation_rate,
relax_time=relax_time,
slope_above=slope_above,
slope_below=slope_below,
source=source,
control=control,
threshold=threshold,
)
return ugen
| from supriya.ugens.PseudoUGen import PseudoUGen
class CompanderD(PseudoUGen):
"""
A convenience constructor for Compander.
"""
### CLASS VARIABLES ###
__documentation_section__ = "Dynamics UGens"
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
source=None,
threshold=0.5,
clamp_time=0.01,
relax_time=0.1,
slope_above=1.0,
slope_below=1.0,
):
"""
Constructs an audio-rate dynamics processor.
.. container:: example
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> compander_d = supriya.ugens.CompanderD.ar(
... source=source,
... )
>>> supriya.graph(compander_d) # doctest: +SKIP
::
>>> print(compander_d)
synthdef:
name: d4e7b88df56af5070a88f09b0f8c633e
ugens:
- In.ar:
bus: 0.0
- DelayN.ar:
delay_time: 0.01
maximum_delay_time: 0.01
source: In.ar[0]
- Compander.ar:
clamp_time: 0.01
control: DelayN.ar[0]
relax_time: 0.1
slope_above: 1.0
slope_below: 1.0
source: In.ar[0]
threshold: 0.5
Returns ugen graph.
"""
import supriya.synthdefs
import supriya.ugens
calculation_rate = supriya.CalculationRate.AUDIO
control = supriya.ugens.DelayN.ar(
source=source, maximum_delay_time=clamp_time, delay_time=clamp_time
)
ugen = supriya.ugens.Compander._new_expanded(
clamp_time=clamp_time,
calculation_rate=calculation_rate,
relax_time=relax_time,
slope_above=slope_above,
slope_below=slope_below,
source=source,
control=control,
threshold=threshold,
)
return ugen
| en | 0.489862 | A convenience constructor for Compander. ### CLASS VARIABLES ### ### PUBLIC METHODS ### Constructs an audio-rate dynamics processor. .. container:: example :: >>> source = supriya.ugens.In.ar(bus=0) >>> compander_d = supriya.ugens.CompanderD.ar( ... source=source, ... ) >>> supriya.graph(compander_d) # doctest: +SKIP :: >>> print(compander_d) synthdef: name: d4e7b88df56af5070a88f09b0f8c633e ugens: - In.ar: bus: 0.0 - DelayN.ar: delay_time: 0.01 maximum_delay_time: 0.01 source: In.ar[0] - Compander.ar: clamp_time: 0.01 control: DelayN.ar[0] relax_time: 0.1 slope_above: 1.0 slope_below: 1.0 source: In.ar[0] threshold: 0.5 Returns ugen graph. | 2.533903 | 3 |
examples/other/animation1.py | charliekind/vtkplotter | 0 | 6614705 | <filename>examples/other/animation1.py
"""
This example shows how to animate simultaneously various objects
by specifying event times and durations of the effects
"""
from vedo import *
from vedo.applications import Animation
sp = Sphere(r=0.5).cutWithPlane(origin=(0.15,0,0)).lw(0.1)
cu = Cube().pos(-2,0,0)
tr = Torus().pos(1,0,0).rotateY(80)
plt = Animation()
plt.showProgressBar = True
plt.timeResolution = 0.025 # secs
plt.totalDuration = 4 # can shrink/expand total duration
plt.fadeIn([cu, tr], t=0, duration=0.2)
plt.fadeIn(sp, t=1, duration=2)
plt.move(sp, (2,0,0), style="linear")
plt.rotate(sp, axis="y", angle=180)
plt.fadeOut(sp, t=3, duration=2)
plt.fadeOut(tr, t=4, duration=1)
plt.scale(cu, 0.1, t=5, duration=1)
plt.play()
| <filename>examples/other/animation1.py
"""
This example shows how to animate simultaneously various objects
by specifying event times and durations of the effects
"""
from vedo import *
from vedo.applications import Animation
sp = Sphere(r=0.5).cutWithPlane(origin=(0.15,0,0)).lw(0.1)
cu = Cube().pos(-2,0,0)
tr = Torus().pos(1,0,0).rotateY(80)
plt = Animation()
plt.showProgressBar = True
plt.timeResolution = 0.025 # secs
plt.totalDuration = 4 # can shrink/expand total duration
plt.fadeIn([cu, tr], t=0, duration=0.2)
plt.fadeIn(sp, t=1, duration=2)
plt.move(sp, (2,0,0), style="linear")
plt.rotate(sp, axis="y", angle=180)
plt.fadeOut(sp, t=3, duration=2)
plt.fadeOut(tr, t=4, duration=1)
plt.scale(cu, 0.1, t=5, duration=1)
plt.play()
| en | 0.919967 | This example shows how to animate simultaneously various objects by specifying event times and durations of the effects # secs # can shrink/expand total duration | 3.116632 | 3 |
ct/model/layers/embedding.py | ViktorStagge/CompressiveTransformer | 2 | 6614706 | <reponame>ViktorStagge/CompressiveTransformer
import numpy as np
import itertools
from typing import Tuple, \
Union, \
List
from keras import layers
from keras import activations
from keras import backend as K
from keras.layers import Layer
class ReverseEmbedding(Layer):
def __init__(self,
embedding_layer=None,
activation=None,
embedding_layer_input_dim=None,
**kwargs):
super().__init__(**kwargs)
self.embedding_layer = embedding_layer
self.vocab_size = embedding_layer.get_config()['input_dim']
self.activation = activations.get(activation)
self.trainable = False
def build(self, input_shape):
super().build(input_shape)
def call(self, inputs, **kwargs):
assert len(inputs.shape) == 3, \
'expected 3 dimensions'
if self.embedding_layer is None:
return inputs
input_emb = inputs[:, -1, :]
w_transpose = K.transpose(self.embedding_layer.embeddings)
y = K.dot(input_emb, w_transpose)
if self.activation is not None:
y = self.activation(y)
return y
def compute_output_shape(self, input_shape):
return input_shape[0], self.embedding_layer.input_dim
def get_config(self):
config = super().get_config()
config.update(activation=self.activation)
return config
class RelativeEncoding(Layer):
def __init__(self,
batch_size: int,
verbose: bool = False,
**kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
self.verbose = verbose
self.sequence_length = None
self.d_model = None
self.encodings = None
self.W_kr = None
def build(self,
input_shape: Tuple):
assert isinstance(input_shape, tuple), \
f'received input_shape={input_shape}. Expected a tuple (i.e. single input).'
assert len(input_shape) == 3, \
f'expected shape with 3 dimensions: (batch_size, sequence_length, dimensions), ' \
f'received shape with {len(input_shape)} dimensions: {input_shape}'
self.sequence_length = input_shape[1]
self.d_model = input_shape[2]
self.W_kr = self.add_weight(name='W_k,r',
shape=input_shape[1:],
initializer='uniform',
trainable=True)
self.encodings = self.create_relative_encodings()
super().build(input_shape)
def call(self, inputs, **kwargs):
y = self.encodings * self.W_kr
if self.verbose:
print(f'{self.__class__.__name__} call:')
print(f' encodings: {self.encodings.shape}')
print(f' W_kr: {self.W_kr.shape}')
print(f' inputs: {inputs.shape}')
# print(f' z: {z.shape}')
print(f' y: {y.shape}')
assert len(inputs.shape) == len(y.shape), \
f'unexpected length for produced output: ' \
f'expected {inputs.shape}, ' \
f'produced {y.shape}'
assert inputs.shape[1:] == y.shape[1:], \
f'unexpected shape for produced output: ' \
f'expected {inputs.shape[1:]}, ' \
f'produced {y.shape[1:]}'
return y
def compute_output_shape(self,
input_shape: Tuple):
return input_shape
def create_positional_encodings(self):
encoding = [PE(pos, l, self.d_model) for pos, l in itertools.product(range(self.sequence_length),
range(self.d_model))]
encoding = np.array(encoding)
encoding = encoding.reshape((self.sequence_length, self.d_model))
return encoding
def create_relative_encodings(self):
encoding = self.create_positional_encodings()
encoding = np.tile(encoding, (self.batch_size, 1, 1))
# encoding = K.variable(encoding)
# encoding._trainable = False
return encoding
def relative_encoding(self,
i: int,
j: int):
assert self.encodings is not None, \
'build the Positional Encoding layer before using it'
delta = i - j
delta = max(0, min(self.sequence_length, delta))
return self.encodings[delta]
def get_config(self):
config = super().get_config()
config.update(batch_size=self.batch_size,
# d_model=self.d_model,
# sequence_length=self.sequence_length,
# encodings=self.encodings,
# W_kr=self.W_kr.numpy() if self.W_kr is not None else None,
verbose=self.verbose)
return config
@staticmethod
def load(path, compile=True):
from keras.models import load_model
ct = load_model(path, custom_objects={}, compile=compile)
return ct
def PE(pos, l, max_dimension):
"""Positional Encoding
Arguments:
pos: position in the sequence
l: dimension, referred to in the paper as "i".
Changed due to duplicated variable name
max_dimension: maximum amount of dimensions used
"""
alpha = pos/10000**(2*l/max_dimension)
if l % 2 == 0:
return np.sin(alpha)
return np.cos(alpha)
| import numpy as np
import itertools
from typing import Tuple, \
Union, \
List
from keras import layers
from keras import activations
from keras import backend as K
from keras.layers import Layer
class ReverseEmbedding(Layer):
def __init__(self,
embedding_layer=None,
activation=None,
embedding_layer_input_dim=None,
**kwargs):
super().__init__(**kwargs)
self.embedding_layer = embedding_layer
self.vocab_size = embedding_layer.get_config()['input_dim']
self.activation = activations.get(activation)
self.trainable = False
def build(self, input_shape):
super().build(input_shape)
def call(self, inputs, **kwargs):
assert len(inputs.shape) == 3, \
'expected 3 dimensions'
if self.embedding_layer is None:
return inputs
input_emb = inputs[:, -1, :]
w_transpose = K.transpose(self.embedding_layer.embeddings)
y = K.dot(input_emb, w_transpose)
if self.activation is not None:
y = self.activation(y)
return y
def compute_output_shape(self, input_shape):
return input_shape[0], self.embedding_layer.input_dim
def get_config(self):
config = super().get_config()
config.update(activation=self.activation)
return config
class RelativeEncoding(Layer):
def __init__(self,
batch_size: int,
verbose: bool = False,
**kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
self.verbose = verbose
self.sequence_length = None
self.d_model = None
self.encodings = None
self.W_kr = None
def build(self,
input_shape: Tuple):
assert isinstance(input_shape, tuple), \
f'received input_shape={input_shape}. Expected a tuple (i.e. single input).'
assert len(input_shape) == 3, \
f'expected shape with 3 dimensions: (batch_size, sequence_length, dimensions), ' \
f'received shape with {len(input_shape)} dimensions: {input_shape}'
self.sequence_length = input_shape[1]
self.d_model = input_shape[2]
self.W_kr = self.add_weight(name='W_k,r',
shape=input_shape[1:],
initializer='uniform',
trainable=True)
self.encodings = self.create_relative_encodings()
super().build(input_shape)
def call(self, inputs, **kwargs):
y = self.encodings * self.W_kr
if self.verbose:
print(f'{self.__class__.__name__} call:')
print(f' encodings: {self.encodings.shape}')
print(f' W_kr: {self.W_kr.shape}')
print(f' inputs: {inputs.shape}')
# print(f' z: {z.shape}')
print(f' y: {y.shape}')
assert len(inputs.shape) == len(y.shape), \
f'unexpected length for produced output: ' \
f'expected {inputs.shape}, ' \
f'produced {y.shape}'
assert inputs.shape[1:] == y.shape[1:], \
f'unexpected shape for produced output: ' \
f'expected {inputs.shape[1:]}, ' \
f'produced {y.shape[1:]}'
return y
def compute_output_shape(self,
input_shape: Tuple):
return input_shape
def create_positional_encodings(self):
encoding = [PE(pos, l, self.d_model) for pos, l in itertools.product(range(self.sequence_length),
range(self.d_model))]
encoding = np.array(encoding)
encoding = encoding.reshape((self.sequence_length, self.d_model))
return encoding
def create_relative_encodings(self):
encoding = self.create_positional_encodings()
encoding = np.tile(encoding, (self.batch_size, 1, 1))
# encoding = K.variable(encoding)
# encoding._trainable = False
return encoding
def relative_encoding(self,
i: int,
j: int):
assert self.encodings is not None, \
'build the Positional Encoding layer before using it'
delta = i - j
delta = max(0, min(self.sequence_length, delta))
return self.encodings[delta]
def get_config(self):
config = super().get_config()
config.update(batch_size=self.batch_size,
# d_model=self.d_model,
# sequence_length=self.sequence_length,
# encodings=self.encodings,
# W_kr=self.W_kr.numpy() if self.W_kr is not None else None,
verbose=self.verbose)
return config
@staticmethod
def load(path, compile=True):
from keras.models import load_model
ct = load_model(path, custom_objects={}, compile=compile)
return ct
def PE(pos, l, max_dimension):
"""Positional Encoding
Arguments:
pos: position in the sequence
l: dimension, referred to in the paper as "i".
Changed due to duplicated variable name
max_dimension: maximum amount of dimensions used
"""
alpha = pos/10000**(2*l/max_dimension)
if l % 2 == 0:
return np.sin(alpha)
return np.cos(alpha) | en | 0.660528 | # print(f' z: {z.shape}') # encoding = K.variable(encoding) # encoding._trainable = False # d_model=self.d_model, # sequence_length=self.sequence_length, # encodings=self.encodings, # W_kr=self.W_kr.numpy() if self.W_kr is not None else None, Positional Encoding Arguments: pos: position in the sequence l: dimension, referred to in the paper as "i". Changed due to duplicated variable name max_dimension: maximum amount of dimensions used | 2.589377 | 3 |
chapter_03/6_more_guests.py | UgRoss/learn-python | 0 | 6614707 | # -*- coding: utf-8 -*-
# More Guests: You just found a bigger dinner table, so now more space is available. Think of three more guests to invite to dinner.
# • Start with your program from Exercise 3-4 or Exercise 3-5. Add a print statement
# to the end of your program informing people that you found a bigger dinner table.
# • Use insert() to add one new guest to the beginning of your list.
# • Use insert() to add one new guest to the middle of your list.
# • Use append() to add one new guest to the end of your list.
# • Print a new set of invitation messages, one for each person in your list.
guests = ['Harper', 'Theresa', 'Owen', 'Edwin']
print('Hello, dear friends! Party becomes bigger! 🎉 \nNew invites:')
guests.insert(0, 'Kira')
guests.insert(len(guests) / 2, 'Bob')
guests.append('Victoria')
for guest in guests:
print('Dear, %s come and join us at a dinner party with cocktails, dance and music!' % guest)
| # -*- coding: utf-8 -*-
# More Guests: You just found a bigger dinner table, so now more space is available. Think of three more guests to invite to dinner.
# • Start with your program from Exercise 3-4 or Exercise 3-5. Add a print statement
# to the end of your program informing people that you found a bigger dinner table.
# • Use insert() to add one new guest to the beginning of your list.
# • Use insert() to add one new guest to the middle of your list.
# • Use append() to add one new guest to the end of your list.
# • Print a new set of invitation messages, one for each person in your list.
guests = ['Harper', 'Theresa', 'Owen', 'Edwin']
print('Hello, dear friends! Party becomes bigger! 🎉 \nNew invites:')
guests.insert(0, 'Kira')
guests.insert(len(guests) / 2, 'Bob')
guests.append('Victoria')
for guest in guests:
print('Dear, %s come and join us at a dinner party with cocktails, dance and music!' % guest)
| en | 0.878589 | # -*- coding: utf-8 -*- # More Guests: You just found a bigger dinner table, so now more space is available. Think of three more guests to invite to dinner. # • Start with your program from Exercise 3-4 or Exercise 3-5. Add a print statement # to the end of your program informing people that you found a bigger dinner table. # • Use insert() to add one new guest to the beginning of your list. # • Use insert() to add one new guest to the middle of your list. # • Use append() to add one new guest to the end of your list. # • Print a new set of invitation messages, one for each person in your list. | 4.494371 | 4 |
pypy/_cache/pyopcode_85bad43c1b652dafe97b3ab4412af822.py | woodrow/pyoac | 1 | 6614708 | <reponame>woodrow/pyoac
# self-destruct on double-click:
if __name__ == "__main__":
from pypy import _cache
import os
namestart = os.path.join(os.path.split(_cache.__file__)[0], 'pyopcode_85bad43c1b652dafe97b3ab4412af822')
for ending in ('.py', '.pyc', '.pyo'):
try:
os.unlink(namestart+ending)
except os.error:
pass
#!/bin/env python
# -*- coding: LATIN-1 -*-
#*************************************************************
__name__ = "_geninterp_"+'__builtin__'
_geninterp_ = True
def init__builtin__(space):
"""NOT_RPYTHON"""
##SECTION##
## filename 'interpreter/pyopcode.py'
## function 'import_all_from'
## firstlineno 1330
##SECTION##
# global declarations
# global object g4dict
# global object gs___name__
# global object gs___builtin__
# global object gs___file__
# global object gs__Users_steve_Documents_MIT_TPP_2
# global object gs_import_all_from
# global object gfunc_import_all_from
# global object gs___all__
# global object gs___dict__
# global object gs_from_import___object_has_no___di
# global object gs_keys
# global object gi_0
# global object gs__
def import_all_from(space, w_module, w_into_locals):
goto = 1 # startblock
while True:
if goto == 1:
try:
w_0 = space.getattr(w_module, gs___all__)
w_skip_leading_underscores, w_1 = space.w_False, w_0
goto = 7
except gOperationError, e:
e.normalize_exception(space)
if e.match(space, space.w_AttributeError):
goto = 2
else:raise # unhandled case, should not happen
if goto == 2:
try:
w_2 = space.getattr(w_module, gs___dict__)
goto = 5
except gOperationError, e:
e.normalize_exception(space)
if e.match(space, space.w_AttributeError):
goto = 3
else:raise # unhandled case, should not happen
if goto == 3:
w_3 = space.call_function(space.w_ImportError, gs_from_import___object_has_no___di)
w_4 = space.type(w_3)
w_5 = space.issubtype(w_4, space.w_type)
v0 = space.is_true(w_5)
if v0 == True:
goto = 4
else:
goto = 6
if goto == 4:
w_6 = space.call_function(w_3, )
w_7 = space.type(w_6)
w_etype, w_evalue = w_7, w_6
goto = 12
if goto == 5:
w_8 = space.getattr(w_2, gs_keys)
w_9 = space.call_function(w_8, )
w_skip_leading_underscores, w_1 = space.w_True, w_9
goto = 7
if goto == 6:
w_10 = space.type(w_3)
w_etype, w_evalue = w_10, w_3
goto = 12
if goto == 7:
w_11 = space.iter(w_1)
goto = 8
if goto == 8:
try:
w_name = space.next(w_11)
goto = 9
except gOperationError, e:
e.normalize_exception(space)
if e.match(space, space.w_StopIteration):
w_12 = space.w_None
goto = 13
else:raise # unhandled case, should not happen
if goto == 9:
v1 = space.is_true(w_skip_leading_underscores)
if v1 == True:
goto = 10
else:
goto = 11
if goto == 10:
w_13 = space.getitem(w_name, gi_0)
w_14 = space.eq(w_13, gs__)
v2 = space.is_true(w_14)
if v2 == True:
goto = 8
continue
else:
goto = 11
if goto == 11:
w_15 = space.getattr(w_module, w_name)
w_16 = space.setitem(w_into_locals, w_name, w_15)
goto = 8
continue
if goto == 12:
raise gOperationError(w_etype, w_evalue)
if goto == 13:
return w_12
fastf_import_all_from = import_all_from
fastf_import_all_from.__name__ = 'fastf_import_all_from'
##SECTION##
g4dict = space.newdict()
gs___name__ = space.new_interned_str('__name__')
gs___builtin__ = space.new_interned_str('__builtin__')
space.setitem(g4dict, gs___name__, gs___builtin__)
gs___file__ = space.new_interned_str('__file__')
gs__Users_steve_Documents_MIT_TPP_2 = space.new_interned_str(
"""/Users/steve/Documents/MIT TPP/2009-2010/6.893/project/pypy-dist/pypy/interpreter/pyopcode.py""")
space.setitem(g4dict, gs___file__, gs__Users_steve_Documents_MIT_TPP_2)
gs_import_all_from = space.new_interned_str('import_all_from')
from pypy.interpreter import gateway
gfunc_import_all_from = space.wrap(gateway.interp2app(fastf_import_all_from, unwrap_spec=[gateway.ObjSpace, gateway.W_Root, gateway.W_Root]))
space.setitem(g4dict, gs_import_all_from, gfunc_import_all_from)
gs___all__ = space.new_interned_str('__all__')
from pypy.interpreter.error import OperationError as gOperationError
gs___dict__ = space.new_interned_str('__dict__')
gs_from_import___object_has_no___di = space.new_interned_str(
"""from-import-* object has no __dict__ and no __all__""")
gs_keys = space.new_interned_str('keys')
gi_0 = space.wrap(0)
gs__ = space.new_interned_str('_')
return g4dict
from pypy._cache import known_code
known_code['85bad43c1b652dafe97b3ab4412af822'] = init__builtin__
| # self-destruct on double-click:
if __name__ == "__main__":
from pypy import _cache
import os
namestart = os.path.join(os.path.split(_cache.__file__)[0], 'pyopcode_85bad43c1b652dafe97b3ab4412af822')
for ending in ('.py', '.pyc', '.pyo'):
try:
os.unlink(namestart+ending)
except os.error:
pass
#!/bin/env python
# -*- coding: LATIN-1 -*-
#*************************************************************
__name__ = "_geninterp_"+'__builtin__'
_geninterp_ = True
def init__builtin__(space):
"""NOT_RPYTHON"""
##SECTION##
## filename 'interpreter/pyopcode.py'
## function 'import_all_from'
## firstlineno 1330
##SECTION##
# global declarations
# global object g4dict
# global object gs___name__
# global object gs___builtin__
# global object gs___file__
# global object gs__Users_steve_Documents_MIT_TPP_2
# global object gs_import_all_from
# global object gfunc_import_all_from
# global object gs___all__
# global object gs___dict__
# global object gs_from_import___object_has_no___di
# global object gs_keys
# global object gi_0
# global object gs__
def import_all_from(space, w_module, w_into_locals):
goto = 1 # startblock
while True:
if goto == 1:
try:
w_0 = space.getattr(w_module, gs___all__)
w_skip_leading_underscores, w_1 = space.w_False, w_0
goto = 7
except gOperationError, e:
e.normalize_exception(space)
if e.match(space, space.w_AttributeError):
goto = 2
else:raise # unhandled case, should not happen
if goto == 2:
try:
w_2 = space.getattr(w_module, gs___dict__)
goto = 5
except gOperationError, e:
e.normalize_exception(space)
if e.match(space, space.w_AttributeError):
goto = 3
else:raise # unhandled case, should not happen
if goto == 3:
w_3 = space.call_function(space.w_ImportError, gs_from_import___object_has_no___di)
w_4 = space.type(w_3)
w_5 = space.issubtype(w_4, space.w_type)
v0 = space.is_true(w_5)
if v0 == True:
goto = 4
else:
goto = 6
if goto == 4:
w_6 = space.call_function(w_3, )
w_7 = space.type(w_6)
w_etype, w_evalue = w_7, w_6
goto = 12
if goto == 5:
w_8 = space.getattr(w_2, gs_keys)
w_9 = space.call_function(w_8, )
w_skip_leading_underscores, w_1 = space.w_True, w_9
goto = 7
if goto == 6:
w_10 = space.type(w_3)
w_etype, w_evalue = w_10, w_3
goto = 12
if goto == 7:
w_11 = space.iter(w_1)
goto = 8
if goto == 8:
try:
w_name = space.next(w_11)
goto = 9
except gOperationError, e:
e.normalize_exception(space)
if e.match(space, space.w_StopIteration):
w_12 = space.w_None
goto = 13
else:raise # unhandled case, should not happen
if goto == 9:
v1 = space.is_true(w_skip_leading_underscores)
if v1 == True:
goto = 10
else:
goto = 11
if goto == 10:
w_13 = space.getitem(w_name, gi_0)
w_14 = space.eq(w_13, gs__)
v2 = space.is_true(w_14)
if v2 == True:
goto = 8
continue
else:
goto = 11
if goto == 11:
w_15 = space.getattr(w_module, w_name)
w_16 = space.setitem(w_into_locals, w_name, w_15)
goto = 8
continue
if goto == 12:
raise gOperationError(w_etype, w_evalue)
if goto == 13:
return w_12
fastf_import_all_from = import_all_from
fastf_import_all_from.__name__ = 'fastf_import_all_from'
##SECTION##
g4dict = space.newdict()
gs___name__ = space.new_interned_str('__name__')
gs___builtin__ = space.new_interned_str('__builtin__')
space.setitem(g4dict, gs___name__, gs___builtin__)
gs___file__ = space.new_interned_str('__file__')
gs__Users_steve_Documents_MIT_TPP_2 = space.new_interned_str(
"""/Users/steve/Documents/MIT TPP/2009-2010/6.893/project/pypy-dist/pypy/interpreter/pyopcode.py""")
space.setitem(g4dict, gs___file__, gs__Users_steve_Documents_MIT_TPP_2)
gs_import_all_from = space.new_interned_str('import_all_from')
from pypy.interpreter import gateway
gfunc_import_all_from = space.wrap(gateway.interp2app(fastf_import_all_from, unwrap_spec=[gateway.ObjSpace, gateway.W_Root, gateway.W_Root]))
space.setitem(g4dict, gs_import_all_from, gfunc_import_all_from)
gs___all__ = space.new_interned_str('__all__')
from pypy.interpreter.error import OperationError as gOperationError
gs___dict__ = space.new_interned_str('__dict__')
gs_from_import___object_has_no___di = space.new_interned_str(
"""from-import-* object has no __dict__ and no __all__""")
gs_keys = space.new_interned_str('keys')
gi_0 = space.wrap(0)
gs__ = space.new_interned_str('_')
return g4dict
from pypy._cache import known_code
known_code['85bad43c1b652dafe97b3ab4412af822'] = init__builtin__ | en | 0.367953 | # self-destruct on double-click: #!/bin/env python # -*- coding: LATIN-1 -*- #************************************************************* NOT_RPYTHON ##SECTION## ## filename 'interpreter/pyopcode.py' ## function 'import_all_from' ## firstlineno 1330 ##SECTION## # global declarations # global object g4dict # global object gs___name__ # global object gs___builtin__ # global object gs___file__ # global object gs__Users_steve_Documents_MIT_TPP_2 # global object gs_import_all_from # global object gfunc_import_all_from # global object gs___all__ # global object gs___dict__ # global object gs_from_import___object_has_no___di # global object gs_keys # global object gi_0 # global object gs__ # startblock # unhandled case, should not happen # unhandled case, should not happen # unhandled case, should not happen ##SECTION## /Users/steve/Documents/MIT TPP/2009-2010/6.893/project/pypy-dist/pypy/interpreter/pyopcode.py from-import-* object has no __dict__ and no __all__ | 2.022126 | 2 |
bar_graphs.py | kethan1/Scipy-Python | 0 | 6614709 | import random
import matplotlib.pyplot as plt
import numpy as np
bar_names = np.array([str(v) for v in range(0, 11)])
# Then, make an array holding numerical indices of these bars:
bar_indexes = np.arange(len(bar_names))
# Define the bar heights as a NumPy array:
bar_heights = [random.randint(1, 20) for _ in range(11)]
# Pass the data to Matplotlib:
plt.bar(bar_indexes, bar_heights, align="center")
plt.xticks(bar_indexes, bar_names)
plt.ylabel("Random Numbers")
plt.xlabel("Numbers 0-10")
plt.title("Random Stuff")
plt.show()
| import random
import matplotlib.pyplot as plt
import numpy as np
bar_names = np.array([str(v) for v in range(0, 11)])
# Then, make an array holding numerical indices of these bars:
bar_indexes = np.arange(len(bar_names))
# Define the bar heights as a NumPy array:
bar_heights = [random.randint(1, 20) for _ in range(11)]
# Pass the data to Matplotlib:
plt.bar(bar_indexes, bar_heights, align="center")
plt.xticks(bar_indexes, bar_names)
plt.ylabel("Random Numbers")
plt.xlabel("Numbers 0-10")
plt.title("Random Stuff")
plt.show()
| en | 0.677914 | # Then, make an array holding numerical indices of these bars: # Define the bar heights as a NumPy array: # Pass the data to Matplotlib: | 3.77659 | 4 |
celescope/fusion/__init__.py | susucy/CeleScope | 0 | 6614710 | __STEPS__ = ['sample', 'barcode', 'cutadapt', "STAR_fusion", "count_fusion"]
__ASSAY__ = 'fusion'
| __STEPS__ = ['sample', 'barcode', 'cutadapt', "STAR_fusion", "count_fusion"]
__ASSAY__ = 'fusion'
| none | 1 | 0.881534 | 1 | |
src/sqlalchemy/User.py | ptphp/PyLib | 1 | 6614711 | <reponame>ptphp/PyLib
#!/usr/bin/env python
# -*- coding=utf-8 -*-
'''
Created on 2013-2-2
@author: Joseph
'''
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base #@UnresolvedImport
from sqlalchemy import Column, Integer, String #@UnresolvedImport
from sqlalchemy import create_engine #@UnresolvedImport
from sqlalchemy import Sequence#@UnresolvedImport
engine = create_engine('sqlite:///:memory:', echo=True)
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, Sequence('user_id_seq'),primary_key=True)
name = Column(String(50))
fullname = Column(String(50))
password = Column(String(50))
def __init__(self, name, fullname, password):
self.name = name
self.fullname = fullname
self.password = password
def __repr__(self):
return "<User('%s','%s', '%s')>" % (self.name, self.fullname, self.password)
if __name__ == '__main__':
print sqlalchemy.__version__ #@UndefinedVariable
print engine.execute("select 1").scalar()
user = User('joseph','zhou','<PASSWORD>')
print user
print user.name
print user.id
print User
print User.__table__
print User.__mapper__
Base.metadata.create_all(engine) | #!/usr/bin/env python
# -*- coding=utf-8 -*-
'''
Created on 2013-2-2
@author: Joseph
'''
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base #@UnresolvedImport
from sqlalchemy import Column, Integer, String #@UnresolvedImport
from sqlalchemy import create_engine #@UnresolvedImport
from sqlalchemy import Sequence#@UnresolvedImport
engine = create_engine('sqlite:///:memory:', echo=True)
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, Sequence('user_id_seq'),primary_key=True)
name = Column(String(50))
fullname = Column(String(50))
password = Column(String(50))
def __init__(self, name, fullname, password):
self.name = name
self.fullname = fullname
self.password = password
def __repr__(self):
return "<User('%s','%s', '%s')>" % (self.name, self.fullname, self.password)
if __name__ == '__main__':
print sqlalchemy.__version__ #@UndefinedVariable
print engine.execute("select 1").scalar()
user = User('joseph','zhou','<PASSWORD>')
print user
print user.name
print user.id
print User
print User.__table__
print User.__mapper__
Base.metadata.create_all(engine) | en | 0.389729 | #!/usr/bin/env python # -*- coding=utf-8 -*- Created on 2013-2-2 @author: Joseph #@UnresolvedImport #@UnresolvedImport #@UnresolvedImport #@UnresolvedImport #@UndefinedVariable | 3.278496 | 3 |
oplab/filename_to_date.py | ocean-perception/oplab_pipeline | 5 | 6614712 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2020, University of Southampton
All rights reserved.
Licensed under the BSD 3-Clause License.
See LICENSE.md file in the project root for full license information.
"""
import calendar
import os
from datetime import datetime
from pathlib import Path
import pandas as pd
from .console import Console
from .folder_structure import get_raw_folder
def resolve(filename, folder):
workdir = get_raw_folder(folder)
resolved_filename = ""
for x in workdir.glob(filename):
resolved_filename = x
if resolved_filename == "":
Console.error("The file: ", filename, " could not be found.")
Console.quit("Invalid timestamp file or format")
return resolved_filename
class FilenameToDate:
def __init__(
self, stamp_format: str, filename=None, columns=None, path=None
):
self.stamp_format = stamp_format
self.df = None
if path is not None:
self.path = Path(path)
else:
self.path = Path.cwd()
if filename is not None and columns is not None:
self.filename = resolve(filename, self.path)
self.read_timestamp_file(self.filename, columns)
# Make the object callable (e.g. operator() )
def __call__(self, filename: str):
# Get the name without extension
filename = Path(filename)
if self.stamp_format == "m":
modification_time = os.stat(str(filename)).st_mtime
return modification_time
else:
stamp_format = Path(self.stamp_format)
filename = filename.stem
stamp_format = stamp_format.stem
return self.string_to_epoch(filename, self.stamp_format)
def string_to_epoch(self, filename, stamp_format):
year = ""
month = ""
day = ""
hour = ""
minute = ""
second = ""
msecond = ""
usecond = ""
index = ""
for n, f in zip(filename, stamp_format):
if f == "Y":
year += n
if f == "M":
month += n
if f == "D":
day += n
if f == "h":
hour += n
if f == "m":
minute += n
if f == "s":
second += n
if f == "f":
msecond += n
if f == "u":
usecond += n
if f == "i":
index += n
if not index:
assert len(year) == 4, "Year in filename should have a length of 4"
assert (
len(month) == 2
), "Month in filename should have a length of \
2"
assert len(day) == 2, "Day in filename should have a length of 2"
assert len(hour) == 2, "Hour in filename should have a length of 2"
assert (
len(minute) <= 2
), "Minute in filename should have a length \
of 2"
assert (
len(second) <= 2
), "Second in filename should have a length \
of 2"
if msecond:
assert (
len(msecond) <= 3
), "Milliseconds in filename should \
have a maximum length of 3"
else:
msecond = "0"
if usecond:
assert (
len(usecond) <= 3
), "Microseconds in filename should \
have a length of 3"
else:
usecond = "0"
microsecond = int(msecond) * 1000 + int(usecond)
date = datetime(
int(year),
int(month),
int(day),
int(hour),
int(minute),
int(second),
microsecond,
)
stamp = float(calendar.timegm(date.timetuple()))
return stamp + microsecond * 1e-6
else:
if self.df is None:
Console.error(
"FilenameToDate specified using indexing, but no \
timestamp file has been provided or read."
)
Console.quit("Invalid timestamp format")
stamp = self.df["epoch_timestamp"][int(index)]
return stamp
def read_timestamp_file(self, filename, columns):
filename = Path(filename)
filestream = filename.open("r")
lines = filestream.readlines()
header = lines[0]
first_row = lines[1]
headers = header.split(",")
hn = len(headers)
ln = len(first_row.split(","))
if ln > hn:
for i in range(ln - hn):
headers.append("unknown" + str(i))
df = pd.read_csv(
filename, dtype=str, header=None, names=headers, skiprows=[0]
)
else:
df = pd.read_csv(filename, dtype=str)
df["combined"] = ""
df["combined_format"] = ""
df["epoch_timestamp"] = ""
df_index_name = None
for c in columns:
name = c["name"]
content = c["content"]
# If it is not index columns, concatenate all columns into one
if "i" not in content:
df["combined"] += df[name].astype(str)
df["combined_format"] += content
df.drop(name, axis=1)
else:
if df_index_name is None:
df_index_name = name
else:
Console.error("There should only be one Index column")
Console.quit("Invalid timestamp format")
last_idx = int(df["index"].tail(1))
Console.info("Found", last_idx, "timestamp records in", filename)
for index, row in df.iterrows():
row["epoch_timestamp"] = self.string_to_epoch(
row["combined"], row["combined_format"]
)
df = df.drop("combined", axis=1)
df = df.drop("combined_format", axis=1)
df[df_index_name] = df[df_index_name].astype(int)
self.df = df.set_index(df_index_name)
| # -*- coding: utf-8 -*-
"""
Copyright (c) 2020, University of Southampton
All rights reserved.
Licensed under the BSD 3-Clause License.
See LICENSE.md file in the project root for full license information.
"""
import calendar
import os
from datetime import datetime
from pathlib import Path
import pandas as pd
from .console import Console
from .folder_structure import get_raw_folder
def resolve(filename, folder):
workdir = get_raw_folder(folder)
resolved_filename = ""
for x in workdir.glob(filename):
resolved_filename = x
if resolved_filename == "":
Console.error("The file: ", filename, " could not be found.")
Console.quit("Invalid timestamp file or format")
return resolved_filename
class FilenameToDate:
def __init__(
self, stamp_format: str, filename=None, columns=None, path=None
):
self.stamp_format = stamp_format
self.df = None
if path is not None:
self.path = Path(path)
else:
self.path = Path.cwd()
if filename is not None and columns is not None:
self.filename = resolve(filename, self.path)
self.read_timestamp_file(self.filename, columns)
# Make the object callable (e.g. operator() )
def __call__(self, filename: str):
# Get the name without extension
filename = Path(filename)
if self.stamp_format == "m":
modification_time = os.stat(str(filename)).st_mtime
return modification_time
else:
stamp_format = Path(self.stamp_format)
filename = filename.stem
stamp_format = stamp_format.stem
return self.string_to_epoch(filename, self.stamp_format)
def string_to_epoch(self, filename, stamp_format):
year = ""
month = ""
day = ""
hour = ""
minute = ""
second = ""
msecond = ""
usecond = ""
index = ""
for n, f in zip(filename, stamp_format):
if f == "Y":
year += n
if f == "M":
month += n
if f == "D":
day += n
if f == "h":
hour += n
if f == "m":
minute += n
if f == "s":
second += n
if f == "f":
msecond += n
if f == "u":
usecond += n
if f == "i":
index += n
if not index:
assert len(year) == 4, "Year in filename should have a length of 4"
assert (
len(month) == 2
), "Month in filename should have a length of \
2"
assert len(day) == 2, "Day in filename should have a length of 2"
assert len(hour) == 2, "Hour in filename should have a length of 2"
assert (
len(minute) <= 2
), "Minute in filename should have a length \
of 2"
assert (
len(second) <= 2
), "Second in filename should have a length \
of 2"
if msecond:
assert (
len(msecond) <= 3
), "Milliseconds in filename should \
have a maximum length of 3"
else:
msecond = "0"
if usecond:
assert (
len(usecond) <= 3
), "Microseconds in filename should \
have a length of 3"
else:
usecond = "0"
microsecond = int(msecond) * 1000 + int(usecond)
date = datetime(
int(year),
int(month),
int(day),
int(hour),
int(minute),
int(second),
microsecond,
)
stamp = float(calendar.timegm(date.timetuple()))
return stamp + microsecond * 1e-6
else:
if self.df is None:
Console.error(
"FilenameToDate specified using indexing, but no \
timestamp file has been provided or read."
)
Console.quit("Invalid timestamp format")
stamp = self.df["epoch_timestamp"][int(index)]
return stamp
def read_timestamp_file(self, filename, columns):
filename = Path(filename)
filestream = filename.open("r")
lines = filestream.readlines()
header = lines[0]
first_row = lines[1]
headers = header.split(",")
hn = len(headers)
ln = len(first_row.split(","))
if ln > hn:
for i in range(ln - hn):
headers.append("unknown" + str(i))
df = pd.read_csv(
filename, dtype=str, header=None, names=headers, skiprows=[0]
)
else:
df = pd.read_csv(filename, dtype=str)
df["combined"] = ""
df["combined_format"] = ""
df["epoch_timestamp"] = ""
df_index_name = None
for c in columns:
name = c["name"]
content = c["content"]
# If it is not index columns, concatenate all columns into one
if "i" not in content:
df["combined"] += df[name].astype(str)
df["combined_format"] += content
df.drop(name, axis=1)
else:
if df_index_name is None:
df_index_name = name
else:
Console.error("There should only be one Index column")
Console.quit("Invalid timestamp format")
last_idx = int(df["index"].tail(1))
Console.info("Found", last_idx, "timestamp records in", filename)
for index, row in df.iterrows():
row["epoch_timestamp"] = self.string_to_epoch(
row["combined"], row["combined_format"]
)
df = df.drop("combined", axis=1)
df = df.drop("combined_format", axis=1)
df[df_index_name] = df[df_index_name].astype(int)
self.df = df.set_index(df_index_name)
| en | 0.756748 | # -*- coding: utf-8 -*- Copyright (c) 2020, University of Southampton All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE.md file in the project root for full license information. # Make the object callable (e.g. operator() ) # Get the name without extension # If it is not index columns, concatenate all columns into one | 2.939476 | 3 |
setlistspy/app/models.py | coreybobco/setlistspy-api | 6 | 6614713 | <reponame>coreybobco/setlistspy-api<filename>setlistspy/app/models.py
import os
from django.db import models
from setlistspy.app.base_model import BaseSetSpyModel
from playhouse.postgres_ext import *
# def get_db():
# return PostgresqlExtDatabase(
# os.getenv('POSTGRES_HOST'),
# user=os.getenv('POSTGRES_USER'),
# password=os.getenv('<PASSWORD>PASSWORD'),
# host="localhost",
# port=os.getenv('POSTGRES_PORT'),
# register_hstore=False
# )
class DJ(BaseSetSpyModel):
name = models.CharField(max_length=255)
url = models.CharField(max_length=255, unique=True)
xml_md5 = models.CharField(max_length=32, default='')
last_check_time = models.DateTimeField(null=True, blank=True)
class Meta:
indexes = [
models.Index(fields=['name']),
models.Index(fields=['last_check_time']),
models.Index(fields=['name', 'last_check_time'])
]
def __str__(self):
return f'{self.name}'
class Setlist(BaseSetSpyModel):
dj = models.ForeignKey(DJ, on_delete=models.PROTECT, related_name='setlists')
title = models.CharField(max_length=255)
mixesdb_id = models.IntegerField()
mixesdb_mod_time = models.DateTimeField()
xml_sha1 = models.CharField(max_length=31, null=True)
b2b = models.NullBooleanField('Other DJs on deck', null=True)
class Meta:
indexes = [
models.Index(fields=['dj']),
models.Index(fields=['mixesdb_mod_time']),
models.Index(fields=['dj', 'mixesdb_mod_time'])
]
unique_together = (
('dj', 'mixesdb_id'),
)
def __str__(self):
return f'{self.title}'
class Artist(BaseSetSpyModel):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return f'{self.name}'
class Label(BaseSetSpyModel):
name = models.CharField(max_length=255, unique=True)
discogs_id = models.IntegerField(null=True)
def __str__(self):
return f'{self.name}'
class Track(BaseSetSpyModel):
artist = models.ForeignKey(Artist, on_delete=models.PROTECT, related_name="tracks")
title = models.CharField(max_length=255)
setlists = models.ManyToManyField(Setlist, through="TrackPlay", related_name="tracks")
def __str__(self):
return f'{self.artist.name} - {self.title}'
class Meta:
indexes = [
models.Index(fields=['artist']),
models.Index(fields=['title']),
models.Index(fields=['artist', 'title']),
]
unique_together = (
('artist', 'title'),
)
class TrackPlay(BaseSetSpyModel):
track = models.ForeignKey(Track, related_name='plays', on_delete=models.PROTECT)
setlist = models.ForeignKey(Setlist, related_name='track_plays', on_delete=models.PROTECT)
set_order = models.IntegerField()
label = models.ForeignKey(Label, null=True, related_name='track_plays', on_delete=models.PROTECT)
class Meta:
indexes = [
models.Index(fields=['track']),
models.Index(fields=['setlist']),
models.Index(fields=['track', 'setlist']),
]
unique_together = (
('setlist', 'set_order'),
)
def __str__(self):
return f'{self.setlist.title} - {self.set_order}. {self.track.artist.name} - {self.track.title}' | import os
from django.db import models
from setlistspy.app.base_model import BaseSetSpyModel
from playhouse.postgres_ext import *
# def get_db():
# return PostgresqlExtDatabase(
# os.getenv('POSTGRES_HOST'),
# user=os.getenv('POSTGRES_USER'),
# password=os.getenv('<PASSWORD>PASSWORD'),
# host="localhost",
# port=os.getenv('POSTGRES_PORT'),
# register_hstore=False
# )
class DJ(BaseSetSpyModel):
name = models.CharField(max_length=255)
url = models.CharField(max_length=255, unique=True)
xml_md5 = models.CharField(max_length=32, default='')
last_check_time = models.DateTimeField(null=True, blank=True)
class Meta:
indexes = [
models.Index(fields=['name']),
models.Index(fields=['last_check_time']),
models.Index(fields=['name', 'last_check_time'])
]
def __str__(self):
return f'{self.name}'
class Setlist(BaseSetSpyModel):
dj = models.ForeignKey(DJ, on_delete=models.PROTECT, related_name='setlists')
title = models.CharField(max_length=255)
mixesdb_id = models.IntegerField()
mixesdb_mod_time = models.DateTimeField()
xml_sha1 = models.CharField(max_length=31, null=True)
b2b = models.NullBooleanField('Other DJs on deck', null=True)
class Meta:
indexes = [
models.Index(fields=['dj']),
models.Index(fields=['mixesdb_mod_time']),
models.Index(fields=['dj', 'mixesdb_mod_time'])
]
unique_together = (
('dj', 'mixesdb_id'),
)
def __str__(self):
return f'{self.title}'
class Artist(BaseSetSpyModel):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return f'{self.name}'
class Label(BaseSetSpyModel):
name = models.CharField(max_length=255, unique=True)
discogs_id = models.IntegerField(null=True)
def __str__(self):
return f'{self.name}'
class Track(BaseSetSpyModel):
artist = models.ForeignKey(Artist, on_delete=models.PROTECT, related_name="tracks")
title = models.CharField(max_length=255)
setlists = models.ManyToManyField(Setlist, through="TrackPlay", related_name="tracks")
def __str__(self):
return f'{self.artist.name} - {self.title}'
class Meta:
indexes = [
models.Index(fields=['artist']),
models.Index(fields=['title']),
models.Index(fields=['artist', 'title']),
]
unique_together = (
('artist', 'title'),
)
class TrackPlay(BaseSetSpyModel):
track = models.ForeignKey(Track, related_name='plays', on_delete=models.PROTECT)
setlist = models.ForeignKey(Setlist, related_name='track_plays', on_delete=models.PROTECT)
set_order = models.IntegerField()
label = models.ForeignKey(Label, null=True, related_name='track_plays', on_delete=models.PROTECT)
class Meta:
indexes = [
models.Index(fields=['track']),
models.Index(fields=['setlist']),
models.Index(fields=['track', 'setlist']),
]
unique_together = (
('setlist', 'set_order'),
)
def __str__(self):
return f'{self.setlist.title} - {self.set_order}. {self.track.artist.name} - {self.track.title}' | en | 0.213035 | # def get_db(): # return PostgresqlExtDatabase( # os.getenv('POSTGRES_HOST'), # user=os.getenv('POSTGRES_USER'), # password=os.getenv('<PASSWORD>PASSWORD'), # host="localhost", # port=os.getenv('POSTGRES_PORT'), # register_hstore=False # ) | 2.228719 | 2 |
patchworks/patch/ebpatcher.py | meunierd/patchworks | 0 | 6614714 | import codecs
import json
from .ips import IPSParser
class EBPatcher(IPSParser):
EXTENSION = 'ebp'
def parse_metadata(self):
reader = codecs.getreader('utf-8')
self.metadata = json.load(reader(self._file))
| import codecs
import json
from .ips import IPSParser
class EBPatcher(IPSParser):
EXTENSION = 'ebp'
def parse_metadata(self):
reader = codecs.getreader('utf-8')
self.metadata = json.load(reader(self._file))
| none | 1 | 2.336276 | 2 | |
xhorizon/evap/evap.py | jcschindler01/xhorizon | 1 | 6614715 |
"""
This module provides method for making forming and evaporation BH diagrams.
This module imports the entire xhorizon package.
It is meant for a higher level usage than the other subpackages, none of the
guts of xhorizon rely on this.
"""
import numpy as np
import matplotlib.pyplot as plt
import copy, pprint
import scipy.optimize as opt
import xhorizon as xh
from helpers import *
###############################################################################################################3
def funclist_chain(funclist, seed=0, du=None, dv=None, r0p=None, r0f=None, u0=None, v0=None, ps_matchmode=None, fs_matchmode=None):
"""
Create a chain of matched regions, starting at seed region which is unmodified.
Each region except ends has two slices through it, a future slice fslice and past slice pslice.
Each fslice and pslice can be either active or passive, but there can only be one active slice per region.
The index i refers to each region in the sequence for all variables.
Inputs:
funclist = list of func objects, in order, to chain together
seed = index value for seed region (seed region has trivial transforms to target coords)
du = list of du values so that du[i] will always be size of region[i]
dv = list of du values so that du[i] will always be size of region[i]
r0p = list of r0 values for past slice so that r0p will always be ps_r0 when pslice is active
r0f = list of r0 values for future slice so that r0f will always be fs_r0 when fslice is active
u0 = list of offset values for range of u values in slice, defaults to zero
v0 = list of offset values for range of v values in slice, defaults to zero
ps_matchmode = list of strings, each either 'ru' or 'rv', to determine how past slice is sliced when pslice is active
ps_matchmode = list of strings, each either 'ru' or 'rv', to determine how future slice is sliced when fslice is active
"""
print "du funclist_chain"
print repr(du)
print "dv funclist_chain"
print repr(dv)
## init default values
if u0==None:
u0 = np.zeros(len(funclist))
if v0==None:
v0 = np.zeros(len(funclist))
if ps_matchmode==None:
ps_matchmode = ['rv' for func in funclist]
if fs_matchmode==None:
fs_matchmode = ['rv' for func in funclist]
## set irrelevant first and last du and dv values to zero
du[0], du[-1] = 0., 0.
dv[0], dv[-1] = 0., 0.
## init internal variables
reglist = [xh.reg.EFreg(funcx, boundary=False, rlines=False) for funcx in funclist]
pslice = [None for funcx in funclist]
fslice = [None for funcx in funclist]
Rh = [funcx.rj[-2] for funcx in funclist]
ps_r0 = [np.nan for funcx in funclist]
ps_u0 = [np.nan for funcx in funclist]
ps_v0 = [np.nan for funcx in funclist]
fs_r0 = [np.nan for funcx in funclist]
fs_u0 = [np.nan for funcx in funclist]
fs_v0 = [np.nan for funcx in funclist]
i0 = range(len(funclist))[1*seed]
ps_matchpop = [mp(mmm) for mmm in ps_matchmode]
fs_matchpop = [mp(mmm) for mmm in fs_matchmode]
## seed region
i = 1*i0
for i in [1*i0]:
###### past passive slice
## past passive slice input params (not mutually consistent)
ps_u0[i] = u0[i] - 0.5*du[i]
ps_v0[i] = v0[i] - 0.5*dv[i]
ps_r0[i] = 1.*r0p[i]
## get past passive slice location from inputs and matchpop
sliceloc = dict(u0=ps_u0[i], v0=ps_v0[i], r0=ps_r0[i])
sliceloc.pop(ps_matchpop[i])
print "i=%s pslice loc: %s"%(i,sliceloc)
## execute past passive slice at sliceloc
pslice[i] = xh.junc.pslice(reglist[i], ublocks=[-1], vblocks=range(len(reglist[i].blocks)), **sliceloc)
## update past passive slice location to true values
ps_u0[i], ps_v0[i], ps_r0[i] = 1.*pslice[i].u0, 1.*pslice[i].v0, 1.*pslice[i].r0
#### future passive slice
## future passive slice input params (not mutually consistent)
fs_u0[i] = 1.*ps_u0[i] + 1.*du[i]
fs_v0[i] = 1.*ps_v0[i] + 1.*dv[i]
fs_r0[i] = 1.*r0f[i]
## get future passive slice location from inputs and matchpop
sliceloc = dict(u0=fs_u0[i], v0=fs_v0[i], r0=fs_r0[i])
sliceloc.pop(fs_matchpop[i])
print "i=%s fslice loc: %s"%(i,sliceloc)
## execute future passive slice at sliceloc
fslice[i] = xh.junc.pslice(reglist[i], ublocks=[-1], vblocks=range(len(reglist[i].blocks)), **sliceloc)
## update future passive slice location to true values
fs_u0[i], fs_v0[i], fs_r0[i] = 1.*fslice[i].u0, 1.*fslice[i].v0, 1.*fslice[i].r0
## forward regions
i = 1*i0 + 1
while i < len(reglist):
###### past active slice
## past active slice input params (not mutually consistent)
ps_u0[i] = u0[i] - 0.5*du[i]
ps_v0[i] = v0[i] - 0.5*dv[i]
ps_r0[i] = 1.*fs_r0[i-1]
## get past active slice location from inputs and matchpop
sliceloc = dict(u0=ps_u0[i], v0=ps_v0[i], r0=ps_r0[i])
sliceloc.pop(ps_matchpop[i])
print "i=%s pslice loc: %s"%(i,sliceloc)
## execute past active slice at sliceloc
pslice[i] = xh.junc.aslice(reglist[i], ublocks=[-1], vblocks=range(len(reglist[i].blocks)), U0=fslice[i-1].U_of_r_at_v0, V0=fslice[i-1].V_of_r_at_u0, r_refs=[fslice[i-1].reg.metfunc.r_ref], **sliceloc)
## update past active slice location to true values
ps_u0[i], ps_v0[i], ps_r0[i] = 1.*pslice[i].u0, 1.*pslice[i].v0, 1.*pslice[i].r0
#### modify transformations
## adjust transformations
reglist[i].U_of_udl = pslice[i].U_of_udl_at_v0
reglist[i].V_of_vdl = pslice[i].V_of_vdl_at_u0
#### future passive slice
## future passive slice input params (not mutually consistent)
fs_u0[i] = 1.*ps_u0[i] + 1.*du[i]
fs_v0[i] = 1.*ps_v0[i] + 1.*dv[i]
fs_r0[i] = 1.*r0f[i]
## get past active slice location from inputs and matchpop
sliceloc = dict(u0=fs_u0[i], v0=fs_v0[i], r0=fs_r0[i])
sliceloc.pop(fs_matchpop[i])
print "i=%s fslice loc: %s"%(i,sliceloc)
## execute future passive slice at sliceloc
fslice[i] = xh.junc.pslice(reglist[i], ublocks=[-1], vblocks=range(len(reglist[i].blocks)), **sliceloc)
## update future passive slice location to true values
fs_u0[i], fs_v0[i], fs_r0[i] = 1.*fslice[i].u0, 1.*fslice[i].v0, 1.*fslice[i].r0
##### iterate
## iterate
i += 1
## backward regions
i = 1*i0 - 1
while i>=0:
###### future active slice
## past active slice input params (not mutually consistent)
fs_u0[i] = u0[i] - 0.5*du[i]
fs_v0[i] = v0[i] - 0.5*dv[i]
fs_r0[i] = 1.*ps_r0[i+1]
## get future active slice location from inputs and matchpop
sliceloc = dict(u0=fs_u0[i], v0=fs_v0[i], r0=fs_r0[i])
sliceloc.pop(fs_matchpop[i])
print "i=%s fslice loc: %s"%(i,sliceloc)
## execute future active slice at sliceloc
fslice[i] = xh.junc.aslice(reglist[i], ublocks=[-1], vblocks=range(len(reglist[i].blocks)), U0=pslice[i+1].U_of_r_at_v0, V0=pslice[i+1].V_of_r_at_u0, r_refs=[pslice[i+1].reg.metfunc.r_ref], **sliceloc)
## update future active slice location to true values
fs_u0[i], fs_v0[i], fs_r0[i] = 1.*fslice[i].u0, 1.*fslice[i].v0, 1.*fslice[i].r0
#### modify transformations
## adjust transformations
reglist[i].U_of_udl = fslice[i].U_of_udl_at_v0
reglist[i].V_of_vdl = fslice[i].V_of_vdl_at_u0
#### past passive slice
## past passive slice input params (not mutually consistent)
ps_u0[i] = 1.*fs_u0[i] - 1.*du[i]
ps_v0[i] = 1.*fs_v0[i] - 1.*dv[i]
ps_r0[i] = 1.*r0p[i]
## get past passive slice location from inputs and matchpop
sliceloc = dict(u0=ps_u0[i], v0=ps_v0[i], r0=ps_r0[i])
sliceloc.pop(ps_matchpop[i])
print "i=%s pslice loc: %s"%(i,sliceloc)
## execute past passive slice at sliceloc
pslice[i] = xh.junc.pslice(reglist[i], ublocks=[-1], vblocks=range(len(reglist[i].blocks)), **sliceloc)
## update future passive slice location to true values
ps_u0[i], ps_v0[i], ps_r0[i] = 1.*pslice[i].u0, 1.*pslice[i].v0, 1.*pslice[i].r0
##### iterate
## iterate
i -= 1
## make sliceparams dict
chainparams = dict(Rh=1.*np.array(Rh), ps_u0=1.*np.array(ps_u0), ps_v0=1.*np.array(ps_v0), ps_r0=1.*np.array(ps_r0), fs_u0=1.*np.array(fs_u0), fs_v0=1.*np.array(fs_v0), fs_r0=1.*np.array(fs_r0), i0=1*i0, ps_matchmode=ps_matchmode, fs_matchmode=fs_matchmode, funclist=funclist)
##
print "\n"
pprint.pprint(chainparams)
print "\n"
## return
return reglist, chainparams
def chain_masker(reglist, chainparams):
"""
"""
##
for i in range(len(reglist)):
## mask interior blocks
for b in reglist[i].blocks[:-1]:
## past
if i>0:
b.uvbounds.update(dict(vmin=chainparams['ps_v0'][i]))
## future
if i<len(reglist)-1:
b.uvbounds.update(dict(vmax=chainparams['fs_v0'][i]))
## mask final blocks for part that is always there
for b in reglist[i].blocks[-1:]:
## past
if i>0:
b.uvbounds.update(dict(vmin=chainparams['ps_v0'][i], umin=chainparams['ps_u0'][i]))
## future
if i<len(reglist)-1:
b.uvbounds.update(dict(vmax=chainparams['fs_v0'][i]))
## copy final block for parts which depend on radius change values
for b in reglist[i].blocks[-1:]:
## copies
ba = xh.block(b.master, b.j, b.bparams)
bb = xh.block(b.master, b.j, b.bparams)
bc = xh.block(b.master, b.j, b.bparams)
## mask a=top b=bottom c=right
ba.uvbounds.update(dict(vmin=chainparams['fs_v0'][i], vmax= np.inf, umin=chainparams['ps_u0'][i], umax=chainparams['fs_u0'][i]))
bb.uvbounds.update(dict(vmin=chainparams['ps_v0'][i], vmax=chainparams['fs_v0'][i], umin=-np.inf, umax=chainparams['ps_u0'][i]))
bc.uvbounds.update(dict(vmin=chainparams['fs_v0'][i], vmax= np.inf, umin=-np.inf, umax=chainparams['ps_u0'][i]))
## add bottom if increasing from past
if i>0 and chainparams['Rh'][i-1]<chainparams['Rh'][i]:
reglist[i].blocks += [bb]
## add top if decreasing to future
if i<len(reglist)-1 and chainparams['Rh'][i+1]<chainparams['Rh'][i]:
reglist[i].blocks += [ba]
## add right if both
if i>0 and i<len(reglist)-1 and chainparams['Rh'][i-1]<chainparams['Rh'][i] and chainparams['Rh'][i+1]<chainparams['Rh'][i]:
reglist[i].blocks += [bc]
## add masses to chainparams
chainparams.update(dict(m=getmm(reglist)))
## return
return reglist, chainparams
def shellparams_list(Rmax=1., le=.1, Nevap=5, Tevap=10., Tacc=1., Naccrete=1, functype=xh.mf.schwarzschild, fparams=dict()):
"""
"""
## init
m, du, dv = xh.evap.SSp.SSduvm(Nevap=1*Nevap, Tevap=1.*Tevap, M=0.5*Rmax, le=1.*le)
m, du, dv = m[::-1], du[::-1], dv[::-1]
mdudv = [m, du, dv]
## get shellparams
sp = []
for i in range(len(m)):
func = functype(R=2.*m[i], **fparams)
sp += [dict(func=copy.deepcopy(func), Rself=1.*func.fparams['R'], du=1.*du[i], dv=1.*dv[i], le=1.*le, Tevap=1.*Tevap, Nevap=1*Nevap, mdudv=mdudv)]
## edit final one
sp[-1]['dv'] = 1.*Tacc/float(max(Naccrete-1,1))
## print
pprint.pprint(sp)
## return
return sp
def cp_from_fdudv(funclist, du=None, dv=None, le=None, uoff=0., voff=0., ueta=1., veta=1.):
"""
"""
## init
funclist = funclist
reglist = [xh.reg.EFreg(funcx, boundary=None, rlines=None) for funcx in funclist]
Rh = np.array([funclist[i].rj[-2] for i in range(len(funclist))])
du = 1.*du
dv = 1.*dv
r0f = 1.*Rh + 1.*le
r0p = 1.*np.roll(r0f,1)
u0 = 1.*ueta*np.cumsum(du-du[0]) + 1.*uoff
v0 = 1.*veta*np.cumsum(dv-dv[0]) + 1.*voff
ps_matchmode = None #['ru' for i in range(len(funclist))] ### Edit matchmode here
fs_matchmode = None #['ru' for i in range(len(funclist))] ### Edit matchmode here
## iterator
ii = range(len(funclist))
## get rinf
rinf = np.nan * Rh
for i in ii:
ia, ib = max(0, i-1), min(i+2, len(ii))
rinf[i] = get_rinf_uv0(reglist[ia:ib], v0=1.*v0)
print rinf
## correct first and last r0 values
r0p[0] = 1.*rinf[0]
r0f[-1] = 1.*rinf[-1]
## correct r0 values for formation and evaporation
for i in ii:
## past
if i>0:
## accretion
if Rh[i]>=Rh[i-1]:
r0p[i] = 1.*rinf[i]
## evaporation
if Rh[i]< Rh[i-1]:
r0p[i] = 1.*Rh[i-1] + 1.*le
## future
if i<len(ii)-1:
## accretion
if Rh[i]<=Rh[i+1]:
r0f[i] = 1.*rinf[i]
## evaporation
if Rh[i]> Rh[i+1]:
r0f[i] = 1.*Rh[i] + 1.*le
## make cp
cp = dict(du=1.*du, dv=1.*dv, r0p=1.*r0p, r0f=1.*r0f, u0=1.*u0, v0=1.*v0, ps_matchmode=ps_matchmode, fs_matchmode=fs_matchmode)
# ## return
return cp.copy()
def formevap_input(Rmax=1., le=.01, Tevap=1., Tacc=1., Nevap=5, Naccrete=5, uoff=0., voff=0., ueta=1., veta=1., functype0=xh.mf.minkowski, fparams0=dict(), functype1=xh.mf.schwarzschild, fparams1=dict()):
"""
Build inputs in reverse order starting from far future.
funclist, seed=0, du=None, dv=None, r0p=None, r0f=None, u0=None, v0=None, ps_matchmode=None, fs_matchmode=None
"""
## init
funclist = []
du = []
dv = []
## final region
funclist += [functype0(**fparams0)]
du += [0.]
dv += [0.]
## evap
sp = shellparams_list(Rmax=1.*Rmax, Nevap=Nevap, le=1.*le, Tevap=1.*Tevap, Naccrete=1*Naccrete, Tacc=1.*Tacc, functype=functype1, fparams=fparams1)
for i in range(len(sp)):
funclist += [sp[i]['func']]
du += [sp[i]['du']]
dv += [sp[i]['dv']]
## max radius
Rmax = sp[-1]['Rself']
## accrete params
RR = np.linspace(Rmax,0.5*Rmax, Naccrete)[1:]
for R in RR:
funclist += [functype1(R=1.*R, **fparams1)]
du += [0.]
dv += [Tacc/float(Naccrete-1)]
## first region
funclist += [functype0(**fparams0)]
du += [0.]
dv += [0.]
## prep for output
funclist = funclist[::-1]
du = np.array(du[::-1])
dv = np.array(dv[::-1])
le = 1.*le
## get chain params
cp = cp_from_fdudv(funclist, du=1.*du, dv=1.*dv, le=1.*le, uoff=1.*uoff, voff=1.*voff, ueta=1.*ueta, veta=1.*veta)
##
pprint.pprint(cp)
## return
return funclist, cp
def create_evap(params, seed=0):
"""
Takes input parameters of the form:
"""
##
import pprint
## print
pprint.pprint("params = %s"%(params))
pprint.pprint("seed = %s"%(seed))
## formevap_input
print "inputs"
funclist, cp = xh.evap.formevap_input(**params)
## funclist_chain
print "chain"
reglist, chainparams = xh.evap.funclist_chain(funclist, seed=seed, **cp)
## chain_masker
print "mask"
reglist, chainparams = xh.evap.chain_masker(reglist, chainparams)
## print
pprint.pprint(chainparams)
## return
return reglist, chainparams
def evapsave(path="temp/temp", params=None, chainparams=None, seed=None, sfp=dict(), temp_only=False, massplot=False):
"""
Save figure with timestamp and txt notes.
"""
##
import shutil
import time
import pprint
import matplotlib.pyplot as plt
## get path with timestamp
ts = str(time.time()).replace(".","")
## save figure
print( "save...")
plt.figure(1)
sfpp = dict(dpi=400)
sfpp.update(sfp)
plt.savefig("%s_%s.png"%(path,ts), **sfpp)
print( "save done")
##save text
print( "save txt")
ff = open("%s_%s.txt"%(path,ts), 'w')
ff.write("%s_%s\n"%(path,ts))
ff.write('\n')
ff.write('Input:\nparams=\n%s\nseed=\n%s\n'%(pprint.pformat(params),seed))
ff.write('\n')
ff.write('Output:\nchainparams=\n%s\n'%(pprint.pformat(chainparams)))
ff.close()
##save massplot
if massplot==True:
print( "save massplot...")
xh.evap.massplot.massplotrc()
plt.figure(99)
plt.savefig("%s_%s_mass.png"%(path,ts), **sfpp)
print( "save done")
## copy to temp
print( "copy...")
## copy normally
if temp_only==False:
tempsave = shutil.copy
if temp_only==True:
tempsave = shutil.move
## copy or move
tempsave("%s_%s.png"%(path,ts), path+"_temp.png")
tempsave("%s_%s.txt"%(path,ts), path+"_temp.txt")
tempsave("%s_%s_mass.png"%(path,ts), path+"_temp_mass.png")
## print
print( "copy done")
if __name__=='__main__':
pass
##################################################################################################################
|
"""
This module provides method for making forming and evaporation BH diagrams.
This module imports the entire xhorizon package.
It is meant for a higher level usage than the other subpackages, none of the
guts of xhorizon rely on this.
"""
import numpy as np
import matplotlib.pyplot as plt
import copy, pprint
import scipy.optimize as opt
import xhorizon as xh
from helpers import *
###############################################################################################################3
def funclist_chain(funclist, seed=0, du=None, dv=None, r0p=None, r0f=None, u0=None, v0=None, ps_matchmode=None, fs_matchmode=None):
"""
Create a chain of matched regions, starting at seed region which is unmodified.
Each region except ends has two slices through it, a future slice fslice and past slice pslice.
Each fslice and pslice can be either active or passive, but there can only be one active slice per region.
The index i refers to each region in the sequence for all variables.
Inputs:
funclist = list of func objects, in order, to chain together
seed = index value for seed region (seed region has trivial transforms to target coords)
du = list of du values so that du[i] will always be size of region[i]
dv = list of du values so that du[i] will always be size of region[i]
r0p = list of r0 values for past slice so that r0p will always be ps_r0 when pslice is active
r0f = list of r0 values for future slice so that r0f will always be fs_r0 when fslice is active
u0 = list of offset values for range of u values in slice, defaults to zero
v0 = list of offset values for range of v values in slice, defaults to zero
ps_matchmode = list of strings, each either 'ru' or 'rv', to determine how past slice is sliced when pslice is active
ps_matchmode = list of strings, each either 'ru' or 'rv', to determine how future slice is sliced when fslice is active
"""
print "du funclist_chain"
print repr(du)
print "dv funclist_chain"
print repr(dv)
## init default values
if u0==None:
u0 = np.zeros(len(funclist))
if v0==None:
v0 = np.zeros(len(funclist))
if ps_matchmode==None:
ps_matchmode = ['rv' for func in funclist]
if fs_matchmode==None:
fs_matchmode = ['rv' for func in funclist]
## set irrelevant first and last du and dv values to zero
du[0], du[-1] = 0., 0.
dv[0], dv[-1] = 0., 0.
## init internal variables
reglist = [xh.reg.EFreg(funcx, boundary=False, rlines=False) for funcx in funclist]
pslice = [None for funcx in funclist]
fslice = [None for funcx in funclist]
Rh = [funcx.rj[-2] for funcx in funclist]
ps_r0 = [np.nan for funcx in funclist]
ps_u0 = [np.nan for funcx in funclist]
ps_v0 = [np.nan for funcx in funclist]
fs_r0 = [np.nan for funcx in funclist]
fs_u0 = [np.nan for funcx in funclist]
fs_v0 = [np.nan for funcx in funclist]
i0 = range(len(funclist))[1*seed]
ps_matchpop = [mp(mmm) for mmm in ps_matchmode]
fs_matchpop = [mp(mmm) for mmm in fs_matchmode]
## seed region
i = 1*i0
for i in [1*i0]:
###### past passive slice
## past passive slice input params (not mutually consistent)
ps_u0[i] = u0[i] - 0.5*du[i]
ps_v0[i] = v0[i] - 0.5*dv[i]
ps_r0[i] = 1.*r0p[i]
## get past passive slice location from inputs and matchpop
sliceloc = dict(u0=ps_u0[i], v0=ps_v0[i], r0=ps_r0[i])
sliceloc.pop(ps_matchpop[i])
print "i=%s pslice loc: %s"%(i,sliceloc)
## execute past passive slice at sliceloc
pslice[i] = xh.junc.pslice(reglist[i], ublocks=[-1], vblocks=range(len(reglist[i].blocks)), **sliceloc)
## update past passive slice location to true values
ps_u0[i], ps_v0[i], ps_r0[i] = 1.*pslice[i].u0, 1.*pslice[i].v0, 1.*pslice[i].r0
#### future passive slice
## future passive slice input params (not mutually consistent)
fs_u0[i] = 1.*ps_u0[i] + 1.*du[i]
fs_v0[i] = 1.*ps_v0[i] + 1.*dv[i]
fs_r0[i] = 1.*r0f[i]
## get future passive slice location from inputs and matchpop
sliceloc = dict(u0=fs_u0[i], v0=fs_v0[i], r0=fs_r0[i])
sliceloc.pop(fs_matchpop[i])
print "i=%s fslice loc: %s"%(i,sliceloc)
## execute future passive slice at sliceloc
fslice[i] = xh.junc.pslice(reglist[i], ublocks=[-1], vblocks=range(len(reglist[i].blocks)), **sliceloc)
## update future passive slice location to true values
fs_u0[i], fs_v0[i], fs_r0[i] = 1.*fslice[i].u0, 1.*fslice[i].v0, 1.*fslice[i].r0
## forward regions
i = 1*i0 + 1
while i < len(reglist):
###### past active slice
## past active slice input params (not mutually consistent)
ps_u0[i] = u0[i] - 0.5*du[i]
ps_v0[i] = v0[i] - 0.5*dv[i]
ps_r0[i] = 1.*fs_r0[i-1]
## get past active slice location from inputs and matchpop
sliceloc = dict(u0=ps_u0[i], v0=ps_v0[i], r0=ps_r0[i])
sliceloc.pop(ps_matchpop[i])
print "i=%s pslice loc: %s"%(i,sliceloc)
## execute past active slice at sliceloc
pslice[i] = xh.junc.aslice(reglist[i], ublocks=[-1], vblocks=range(len(reglist[i].blocks)), U0=fslice[i-1].U_of_r_at_v0, V0=fslice[i-1].V_of_r_at_u0, r_refs=[fslice[i-1].reg.metfunc.r_ref], **sliceloc)
## update past active slice location to true values
ps_u0[i], ps_v0[i], ps_r0[i] = 1.*pslice[i].u0, 1.*pslice[i].v0, 1.*pslice[i].r0
#### modify transformations
## adjust transformations
reglist[i].U_of_udl = pslice[i].U_of_udl_at_v0
reglist[i].V_of_vdl = pslice[i].V_of_vdl_at_u0
#### future passive slice
## future passive slice input params (not mutually consistent)
fs_u0[i] = 1.*ps_u0[i] + 1.*du[i]
fs_v0[i] = 1.*ps_v0[i] + 1.*dv[i]
fs_r0[i] = 1.*r0f[i]
## get past active slice location from inputs and matchpop
sliceloc = dict(u0=fs_u0[i], v0=fs_v0[i], r0=fs_r0[i])
sliceloc.pop(fs_matchpop[i])
print "i=%s fslice loc: %s"%(i,sliceloc)
## execute future passive slice at sliceloc
fslice[i] = xh.junc.pslice(reglist[i], ublocks=[-1], vblocks=range(len(reglist[i].blocks)), **sliceloc)
## update future passive slice location to true values
fs_u0[i], fs_v0[i], fs_r0[i] = 1.*fslice[i].u0, 1.*fslice[i].v0, 1.*fslice[i].r0
##### iterate
## iterate
i += 1
## backward regions
i = 1*i0 - 1
while i>=0:
###### future active slice
## past active slice input params (not mutually consistent)
fs_u0[i] = u0[i] - 0.5*du[i]
fs_v0[i] = v0[i] - 0.5*dv[i]
fs_r0[i] = 1.*ps_r0[i+1]
## get future active slice location from inputs and matchpop
sliceloc = dict(u0=fs_u0[i], v0=fs_v0[i], r0=fs_r0[i])
sliceloc.pop(fs_matchpop[i])
print "i=%s fslice loc: %s"%(i,sliceloc)
## execute future active slice at sliceloc
fslice[i] = xh.junc.aslice(reglist[i], ublocks=[-1], vblocks=range(len(reglist[i].blocks)), U0=pslice[i+1].U_of_r_at_v0, V0=pslice[i+1].V_of_r_at_u0, r_refs=[pslice[i+1].reg.metfunc.r_ref], **sliceloc)
## update future active slice location to true values
fs_u0[i], fs_v0[i], fs_r0[i] = 1.*fslice[i].u0, 1.*fslice[i].v0, 1.*fslice[i].r0
#### modify transformations
## adjust transformations
reglist[i].U_of_udl = fslice[i].U_of_udl_at_v0
reglist[i].V_of_vdl = fslice[i].V_of_vdl_at_u0
#### past passive slice
## past passive slice input params (not mutually consistent)
ps_u0[i] = 1.*fs_u0[i] - 1.*du[i]
ps_v0[i] = 1.*fs_v0[i] - 1.*dv[i]
ps_r0[i] = 1.*r0p[i]
## get past passive slice location from inputs and matchpop
sliceloc = dict(u0=ps_u0[i], v0=ps_v0[i], r0=ps_r0[i])
sliceloc.pop(ps_matchpop[i])
print "i=%s pslice loc: %s"%(i,sliceloc)
## execute past passive slice at sliceloc
pslice[i] = xh.junc.pslice(reglist[i], ublocks=[-1], vblocks=range(len(reglist[i].blocks)), **sliceloc)
## update future passive slice location to true values
ps_u0[i], ps_v0[i], ps_r0[i] = 1.*pslice[i].u0, 1.*pslice[i].v0, 1.*pslice[i].r0
##### iterate
## iterate
i -= 1
## make sliceparams dict
chainparams = dict(Rh=1.*np.array(Rh), ps_u0=1.*np.array(ps_u0), ps_v0=1.*np.array(ps_v0), ps_r0=1.*np.array(ps_r0), fs_u0=1.*np.array(fs_u0), fs_v0=1.*np.array(fs_v0), fs_r0=1.*np.array(fs_r0), i0=1*i0, ps_matchmode=ps_matchmode, fs_matchmode=fs_matchmode, funclist=funclist)
##
print "\n"
pprint.pprint(chainparams)
print "\n"
## return
return reglist, chainparams
def chain_masker(reglist, chainparams):
"""
"""
##
for i in range(len(reglist)):
## mask interior blocks
for b in reglist[i].blocks[:-1]:
## past
if i>0:
b.uvbounds.update(dict(vmin=chainparams['ps_v0'][i]))
## future
if i<len(reglist)-1:
b.uvbounds.update(dict(vmax=chainparams['fs_v0'][i]))
## mask final blocks for part that is always there
for b in reglist[i].blocks[-1:]:
## past
if i>0:
b.uvbounds.update(dict(vmin=chainparams['ps_v0'][i], umin=chainparams['ps_u0'][i]))
## future
if i<len(reglist)-1:
b.uvbounds.update(dict(vmax=chainparams['fs_v0'][i]))
## copy final block for parts which depend on radius change values
for b in reglist[i].blocks[-1:]:
## copies
ba = xh.block(b.master, b.j, b.bparams)
bb = xh.block(b.master, b.j, b.bparams)
bc = xh.block(b.master, b.j, b.bparams)
## mask a=top b=bottom c=right
ba.uvbounds.update(dict(vmin=chainparams['fs_v0'][i], vmax= np.inf, umin=chainparams['ps_u0'][i], umax=chainparams['fs_u0'][i]))
bb.uvbounds.update(dict(vmin=chainparams['ps_v0'][i], vmax=chainparams['fs_v0'][i], umin=-np.inf, umax=chainparams['ps_u0'][i]))
bc.uvbounds.update(dict(vmin=chainparams['fs_v0'][i], vmax= np.inf, umin=-np.inf, umax=chainparams['ps_u0'][i]))
## add bottom if increasing from past
if i>0 and chainparams['Rh'][i-1]<chainparams['Rh'][i]:
reglist[i].blocks += [bb]
## add top if decreasing to future
if i<len(reglist)-1 and chainparams['Rh'][i+1]<chainparams['Rh'][i]:
reglist[i].blocks += [ba]
## add right if both
if i>0 and i<len(reglist)-1 and chainparams['Rh'][i-1]<chainparams['Rh'][i] and chainparams['Rh'][i+1]<chainparams['Rh'][i]:
reglist[i].blocks += [bc]
## add masses to chainparams
chainparams.update(dict(m=getmm(reglist)))
## return
return reglist, chainparams
def shellparams_list(Rmax=1., le=.1, Nevap=5, Tevap=10., Tacc=1., Naccrete=1, functype=xh.mf.schwarzschild, fparams=dict()):
"""
"""
## init
m, du, dv = xh.evap.SSp.SSduvm(Nevap=1*Nevap, Tevap=1.*Tevap, M=0.5*Rmax, le=1.*le)
m, du, dv = m[::-1], du[::-1], dv[::-1]
mdudv = [m, du, dv]
## get shellparams
sp = []
for i in range(len(m)):
func = functype(R=2.*m[i], **fparams)
sp += [dict(func=copy.deepcopy(func), Rself=1.*func.fparams['R'], du=1.*du[i], dv=1.*dv[i], le=1.*le, Tevap=1.*Tevap, Nevap=1*Nevap, mdudv=mdudv)]
## edit final one
sp[-1]['dv'] = 1.*Tacc/float(max(Naccrete-1,1))
## print
pprint.pprint(sp)
## return
return sp
def cp_from_fdudv(funclist, du=None, dv=None, le=None, uoff=0., voff=0., ueta=1., veta=1.):
"""
"""
## init
funclist = funclist
reglist = [xh.reg.EFreg(funcx, boundary=None, rlines=None) for funcx in funclist]
Rh = np.array([funclist[i].rj[-2] for i in range(len(funclist))])
du = 1.*du
dv = 1.*dv
r0f = 1.*Rh + 1.*le
r0p = 1.*np.roll(r0f,1)
u0 = 1.*ueta*np.cumsum(du-du[0]) + 1.*uoff
v0 = 1.*veta*np.cumsum(dv-dv[0]) + 1.*voff
ps_matchmode = None #['ru' for i in range(len(funclist))] ### Edit matchmode here
fs_matchmode = None #['ru' for i in range(len(funclist))] ### Edit matchmode here
## iterator
ii = range(len(funclist))
## get rinf
rinf = np.nan * Rh
for i in ii:
ia, ib = max(0, i-1), min(i+2, len(ii))
rinf[i] = get_rinf_uv0(reglist[ia:ib], v0=1.*v0)
print rinf
## correct first and last r0 values
r0p[0] = 1.*rinf[0]
r0f[-1] = 1.*rinf[-1]
## correct r0 values for formation and evaporation
for i in ii:
## past
if i>0:
## accretion
if Rh[i]>=Rh[i-1]:
r0p[i] = 1.*rinf[i]
## evaporation
if Rh[i]< Rh[i-1]:
r0p[i] = 1.*Rh[i-1] + 1.*le
## future
if i<len(ii)-1:
## accretion
if Rh[i]<=Rh[i+1]:
r0f[i] = 1.*rinf[i]
## evaporation
if Rh[i]> Rh[i+1]:
r0f[i] = 1.*Rh[i] + 1.*le
## make cp
cp = dict(du=1.*du, dv=1.*dv, r0p=1.*r0p, r0f=1.*r0f, u0=1.*u0, v0=1.*v0, ps_matchmode=ps_matchmode, fs_matchmode=fs_matchmode)
# ## return
return cp.copy()
def formevap_input(Rmax=1., le=.01, Tevap=1., Tacc=1., Nevap=5, Naccrete=5, uoff=0., voff=0., ueta=1., veta=1., functype0=xh.mf.minkowski, fparams0=dict(), functype1=xh.mf.schwarzschild, fparams1=dict()):
"""
Build inputs in reverse order starting from far future.
funclist, seed=0, du=None, dv=None, r0p=None, r0f=None, u0=None, v0=None, ps_matchmode=None, fs_matchmode=None
"""
## init
funclist = []
du = []
dv = []
## final region
funclist += [functype0(**fparams0)]
du += [0.]
dv += [0.]
## evap
sp = shellparams_list(Rmax=1.*Rmax, Nevap=Nevap, le=1.*le, Tevap=1.*Tevap, Naccrete=1*Naccrete, Tacc=1.*Tacc, functype=functype1, fparams=fparams1)
for i in range(len(sp)):
funclist += [sp[i]['func']]
du += [sp[i]['du']]
dv += [sp[i]['dv']]
## max radius
Rmax = sp[-1]['Rself']
## accrete params
RR = np.linspace(Rmax,0.5*Rmax, Naccrete)[1:]
for R in RR:
funclist += [functype1(R=1.*R, **fparams1)]
du += [0.]
dv += [Tacc/float(Naccrete-1)]
## first region
funclist += [functype0(**fparams0)]
du += [0.]
dv += [0.]
## prep for output
funclist = funclist[::-1]
du = np.array(du[::-1])
dv = np.array(dv[::-1])
le = 1.*le
## get chain params
cp = cp_from_fdudv(funclist, du=1.*du, dv=1.*dv, le=1.*le, uoff=1.*uoff, voff=1.*voff, ueta=1.*ueta, veta=1.*veta)
##
pprint.pprint(cp)
## return
return funclist, cp
def create_evap(params, seed=0):
"""
Takes input parameters of the form:
"""
##
import pprint
## print
pprint.pprint("params = %s"%(params))
pprint.pprint("seed = %s"%(seed))
## formevap_input
print "inputs"
funclist, cp = xh.evap.formevap_input(**params)
## funclist_chain
print "chain"
reglist, chainparams = xh.evap.funclist_chain(funclist, seed=seed, **cp)
## chain_masker
print "mask"
reglist, chainparams = xh.evap.chain_masker(reglist, chainparams)
## print
pprint.pprint(chainparams)
## return
return reglist, chainparams
def evapsave(path="temp/temp", params=None, chainparams=None, seed=None, sfp=dict(), temp_only=False, massplot=False):
"""
Save figure with timestamp and txt notes.
"""
##
import shutil
import time
import pprint
import matplotlib.pyplot as plt
## get path with timestamp
ts = str(time.time()).replace(".","")
## save figure
print( "save...")
plt.figure(1)
sfpp = dict(dpi=400)
sfpp.update(sfp)
plt.savefig("%s_%s.png"%(path,ts), **sfpp)
print( "save done")
##save text
print( "save txt")
ff = open("%s_%s.txt"%(path,ts), 'w')
ff.write("%s_%s\n"%(path,ts))
ff.write('\n')
ff.write('Input:\nparams=\n%s\nseed=\n%s\n'%(pprint.pformat(params),seed))
ff.write('\n')
ff.write('Output:\nchainparams=\n%s\n'%(pprint.pformat(chainparams)))
ff.close()
##save massplot
if massplot==True:
print( "save massplot...")
xh.evap.massplot.massplotrc()
plt.figure(99)
plt.savefig("%s_%s_mass.png"%(path,ts), **sfpp)
print( "save done")
## copy to temp
print( "copy...")
## copy normally
if temp_only==False:
tempsave = shutil.copy
if temp_only==True:
tempsave = shutil.move
## copy or move
tempsave("%s_%s.png"%(path,ts), path+"_temp.png")
tempsave("%s_%s.txt"%(path,ts), path+"_temp.txt")
tempsave("%s_%s_mass.png"%(path,ts), path+"_temp_mass.png")
## print
print( "copy done")
if __name__=='__main__':
pass
##################################################################################################################
| en | 0.532633 | This module provides method for making forming and evaporation BH diagrams. This module imports the entire xhorizon package. It is meant for a higher level usage than the other subpackages, none of the guts of xhorizon rely on this. ###############################################################################################################3 Create a chain of matched regions, starting at seed region which is unmodified. Each region except ends has two slices through it, a future slice fslice and past slice pslice. Each fslice and pslice can be either active or passive, but there can only be one active slice per region. The index i refers to each region in the sequence for all variables. Inputs: funclist = list of func objects, in order, to chain together seed = index value for seed region (seed region has trivial transforms to target coords) du = list of du values so that du[i] will always be size of region[i] dv = list of du values so that du[i] will always be size of region[i] r0p = list of r0 values for past slice so that r0p will always be ps_r0 when pslice is active r0f = list of r0 values for future slice so that r0f will always be fs_r0 when fslice is active u0 = list of offset values for range of u values in slice, defaults to zero v0 = list of offset values for range of v values in slice, defaults to zero ps_matchmode = list of strings, each either 'ru' or 'rv', to determine how past slice is sliced when pslice is active ps_matchmode = list of strings, each either 'ru' or 'rv', to determine how future slice is sliced when fslice is active ## init default values ## set irrelevant first and last du and dv values to zero ## init internal variables ## seed region ###### past passive slice ## past passive slice input params (not mutually consistent) ## get past passive slice location from inputs and matchpop ## execute past passive slice at sliceloc ## update past passive slice location to true values #### future passive slice ## future passive slice input params (not mutually consistent) ## get future passive slice location from inputs and matchpop ## execute future passive slice at sliceloc ## update future passive slice location to true values ## forward regions ###### past active slice ## past active slice input params (not mutually consistent) ## get past active slice location from inputs and matchpop ## execute past active slice at sliceloc ## update past active slice location to true values #### modify transformations ## adjust transformations #### future passive slice ## future passive slice input params (not mutually consistent) ## get past active slice location from inputs and matchpop ## execute future passive slice at sliceloc ## update future passive slice location to true values ##### iterate ## iterate ## backward regions ###### future active slice ## past active slice input params (not mutually consistent) ## get future active slice location from inputs and matchpop ## execute future active slice at sliceloc ## update future active slice location to true values #### modify transformations ## adjust transformations #### past passive slice ## past passive slice input params (not mutually consistent) ## get past passive slice location from inputs and matchpop ## execute past passive slice at sliceloc ## update future passive slice location to true values ##### iterate ## iterate ## make sliceparams dict ## ## return ## ## mask interior blocks ## past ## future ## mask final blocks for part that is always there ## past ## future ## copy final block for parts which depend on radius change values ## copies ## mask a=top b=bottom c=right ## add bottom if increasing from past ## add top if decreasing to future ## add right if both ## add masses to chainparams ## return ## init ## get shellparams ## edit final one ## print ## return ## init #['ru' for i in range(len(funclist))] ### Edit matchmode here #['ru' for i in range(len(funclist))] ### Edit matchmode here ## iterator ## get rinf ## correct first and last r0 values ## correct r0 values for formation and evaporation ## past ## accretion ## evaporation ## future ## accretion ## evaporation ## make cp # ## return Build inputs in reverse order starting from far future. funclist, seed=0, du=None, dv=None, r0p=None, r0f=None, u0=None, v0=None, ps_matchmode=None, fs_matchmode=None ## init ## final region ## evap ## max radius ## accrete params ## first region ## prep for output ## get chain params ## ## return Takes input parameters of the form: ## ## print ## formevap_input ## funclist_chain ## chain_masker ## print ## return Save figure with timestamp and txt notes. ## ## get path with timestamp ## save figure ##save text ##save massplot ## copy to temp ## copy normally ## copy or move ## print ################################################################################################################## | 2.620858 | 3 |
lib/api/permissions.py | jamedadi/jobnet | 3 | 6614716 | from rest_framework.permissions import BasePermission, SAFE_METHODS, IsAuthenticatedOrReadOnly
class IsAdminOrReadOnly(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
return request.user.is_staff
class IsEmployerOrReadOnly(IsAuthenticatedOrReadOnly):
def has_permission(self, request, view):
return super().has_permission(request, view) and request.user.is_employer
class IsObjectEmployerOrReadOnly(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
user = request.user
return bool(user.is_authenticated and user.is_employer)
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
user = request.user
return bool(user.is_authenticated and user.is_employer and obj.employer == user.employer)
class IsEmployer(IsObjectEmployerOrReadOnly):
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
user = request.user
return bool(user.is_authenticated and user.is_employer)
class IsEmployerOwnedEmployeeOrReadOnly(IsObjectEmployerOrReadOnly):
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
user = request.user
return bool(user.is_authenticated and user.is_employer and obj.company.employer == user.employer)
| from rest_framework.permissions import BasePermission, SAFE_METHODS, IsAuthenticatedOrReadOnly
class IsAdminOrReadOnly(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
return request.user.is_staff
class IsEmployerOrReadOnly(IsAuthenticatedOrReadOnly):
def has_permission(self, request, view):
return super().has_permission(request, view) and request.user.is_employer
class IsObjectEmployerOrReadOnly(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
user = request.user
return bool(user.is_authenticated and user.is_employer)
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
user = request.user
return bool(user.is_authenticated and user.is_employer and obj.employer == user.employer)
class IsEmployer(IsObjectEmployerOrReadOnly):
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
user = request.user
return bool(user.is_authenticated and user.is_employer)
class IsEmployerOwnedEmployeeOrReadOnly(IsObjectEmployerOrReadOnly):
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
user = request.user
return bool(user.is_authenticated and user.is_employer and obj.company.employer == user.employer)
| none | 1 | 2.363668 | 2 | |
tests/parser/checker.20.test.py | veltri/DLV2 | 0 | 6614717 | input = """
% +
% a /
% + PMCok - not b - +
% b / e /
% + PMCf
% c /
% +
% d |
% MCf
a | useless1.
b | useless2.
c | useless3 :- b.
d | useless4 :- b.
e | useless5 :- not b.
:- not mbt1, e.
mbt2 :- mbt1.
:- not mbt3, e.
mbt4 :- mbt3.
mbt1 | mbt2 | mbt3 :- mbt4.
mbt2 | mbt3 | mbt4 :- mbt1.
mbt3 | mbt4 | mbt1 :- mbt2.
:- b, not v.
uf1 :- v.
uf2 :- v.
uf3 :- v.
uf1 | uf3 | uf2.
v :- uf1.
"""
output = """
% +
% a /
% + PMCok - not b - +
% b / e /
% + PMCf
% c /
% +
% d |
% MCf
a | useless1.
b | useless2.
c | useless3 :- b.
d | useless4 :- b.
e | useless5 :- not b.
:- not mbt1, e.
mbt2 :- mbt1.
:- not mbt3, e.
mbt4 :- mbt3.
mbt1 | mbt2 | mbt3 :- mbt4.
mbt2 | mbt3 | mbt4 :- mbt1.
mbt3 | mbt4 | mbt1 :- mbt2.
:- b, not v.
uf1 :- v.
uf2 :- v.
uf3 :- v.
uf1 | uf3 | uf2.
v :- uf1.
"""
| input = """
% +
% a /
% + PMCok - not b - +
% b / e /
% + PMCf
% c /
% +
% d |
% MCf
a | useless1.
b | useless2.
c | useless3 :- b.
d | useless4 :- b.
e | useless5 :- not b.
:- not mbt1, e.
mbt2 :- mbt1.
:- not mbt3, e.
mbt4 :- mbt3.
mbt1 | mbt2 | mbt3 :- mbt4.
mbt2 | mbt3 | mbt4 :- mbt1.
mbt3 | mbt4 | mbt1 :- mbt2.
:- b, not v.
uf1 :- v.
uf2 :- v.
uf3 :- v.
uf1 | uf3 | uf2.
v :- uf1.
"""
output = """
% +
% a /
% + PMCok - not b - +
% b / e /
% + PMCf
% c /
% +
% d |
% MCf
a | useless1.
b | useless2.
c | useless3 :- b.
d | useless4 :- b.
e | useless5 :- not b.
:- not mbt1, e.
mbt2 :- mbt1.
:- not mbt3, e.
mbt4 :- mbt3.
mbt1 | mbt2 | mbt3 :- mbt4.
mbt2 | mbt3 | mbt4 :- mbt1.
mbt3 | mbt4 | mbt1 :- mbt2.
:- b, not v.
uf1 :- v.
uf2 :- v.
uf3 :- v.
uf1 | uf3 | uf2.
v :- uf1.
"""
| en | 0.085285 | % +
% a /
% + PMCok - not b - +
% b / e /
% + PMCf
% c /
% +
% d |
% MCf
a | useless1.
b | useless2.
c | useless3 :- b.
d | useless4 :- b.
e | useless5 :- not b.
:- not mbt1, e.
mbt2 :- mbt1.
:- not mbt3, e.
mbt4 :- mbt3.
mbt1 | mbt2 | mbt3 :- mbt4.
mbt2 | mbt3 | mbt4 :- mbt1.
mbt3 | mbt4 | mbt1 :- mbt2.
:- b, not v.
uf1 :- v.
uf2 :- v.
uf3 :- v.
uf1 | uf3 | uf2.
v :- uf1. % +
% a /
% + PMCok - not b - +
% b / e /
% + PMCf
% c /
% +
% d |
% MCf
a | useless1.
b | useless2.
c | useless3 :- b.
d | useless4 :- b.
e | useless5 :- not b.
:- not mbt1, e.
mbt2 :- mbt1.
:- not mbt3, e.
mbt4 :- mbt3.
mbt1 | mbt2 | mbt3 :- mbt4.
mbt2 | mbt3 | mbt4 :- mbt1.
mbt3 | mbt4 | mbt1 :- mbt2.
:- b, not v.
uf1 :- v.
uf2 :- v.
uf3 :- v.
uf1 | uf3 | uf2.
v :- uf1. | 1.842937 | 2 |
tests/test_date_validation.py | OskaRRRitoS/osu_ranked_score_progress | 0 | 6614718 | import unittest
import date_validation as dv
class EnsureValidDate(unittest.TestCase):
def test_one_valid_date(self):
self.assertEqual(None, dv.ensure_valid_date(2007, 10))
class ValidateYears(unittest.TestCase):
def test_one_valid_year(self):
self.assertEqual(None, dv._validate_years(["2007"]))
self.assertEqual(None, dv._validate_years(["2010"]))
self.assertEqual(None, dv._validate_years(["2020"]))
def test_multiple_valid_years(self):
self.assertEqual(None, dv._validate_years(["2010", "2011", "2012"]))
self.assertEqual(None, dv._validate_years(["2007", "2020"]))
self.assertEqual(None, dv._validate_years(["2010", "2009", "2011"]))
def test_multiple_of_same_valid_year(self):
self.assertEqual(None, dv._validate_years(["2007", "2007"]))
self.assertEqual(None, dv._validate_years(["2007", "2007", "2007", "2007"]))
self.assertEqual(None, dv._validate_years(["2007", "2010", "2010", "2013", "2014", "2010"]))
def test_invalid_year_past(self):
self.assertRaises(ValueError, dv._validate_years, ["2006"])
self.assertRaises(ValueError, dv._validate_years, ["2010", "2008", "2005"])
self.assertRaises(ValueError, dv._validate_years, ["256"])
self.assertRaises(ValueError, dv._validate_years, ["-550"])
def test_invalid_year_future(self):
self.assertRaises(ValueError, dv._validate_years, ["2025"])
self.assertRaises(ValueError, dv._validate_years, ["2010", "2012", "2020", "2040"])
self.assertRaises(ValueError, dv._validate_years, ["25000"])
self.assertRaises(ValueError, dv._validate_years, ["231568624546436737"])
class ValidateYearsMonths(unittest.TestCase):
def test_one_valid_yearmonth(self):
self.assertEqual(None, dv._validate_yearmonth(["200710"]))
if __name__ == '__main__':
unittest.main()
| import unittest
import date_validation as dv
class EnsureValidDate(unittest.TestCase):
def test_one_valid_date(self):
self.assertEqual(None, dv.ensure_valid_date(2007, 10))
class ValidateYears(unittest.TestCase):
def test_one_valid_year(self):
self.assertEqual(None, dv._validate_years(["2007"]))
self.assertEqual(None, dv._validate_years(["2010"]))
self.assertEqual(None, dv._validate_years(["2020"]))
def test_multiple_valid_years(self):
self.assertEqual(None, dv._validate_years(["2010", "2011", "2012"]))
self.assertEqual(None, dv._validate_years(["2007", "2020"]))
self.assertEqual(None, dv._validate_years(["2010", "2009", "2011"]))
def test_multiple_of_same_valid_year(self):
self.assertEqual(None, dv._validate_years(["2007", "2007"]))
self.assertEqual(None, dv._validate_years(["2007", "2007", "2007", "2007"]))
self.assertEqual(None, dv._validate_years(["2007", "2010", "2010", "2013", "2014", "2010"]))
def test_invalid_year_past(self):
self.assertRaises(ValueError, dv._validate_years, ["2006"])
self.assertRaises(ValueError, dv._validate_years, ["2010", "2008", "2005"])
self.assertRaises(ValueError, dv._validate_years, ["256"])
self.assertRaises(ValueError, dv._validate_years, ["-550"])
def test_invalid_year_future(self):
self.assertRaises(ValueError, dv._validate_years, ["2025"])
self.assertRaises(ValueError, dv._validate_years, ["2010", "2012", "2020", "2040"])
self.assertRaises(ValueError, dv._validate_years, ["25000"])
self.assertRaises(ValueError, dv._validate_years, ["231568624546436737"])
class ValidateYearsMonths(unittest.TestCase):
def test_one_valid_yearmonth(self):
self.assertEqual(None, dv._validate_yearmonth(["200710"]))
if __name__ == '__main__':
unittest.main()
| none | 1 | 3.211442 | 3 | |
tests/test_cleanup.py | flavianmissi/django-extreme-tdd | 10 | 6614719 | from unittest import TestCase, skip
from mock import patch, MagicMock
from extreme.cleanup import truncate_tables
class CleanUpTests(TestCase):
@skip("for now")
@patch("extreme.cleanup.connections")
def test_truncate_tables(self, connections_mock):
cursor_mock = MagicMock()
connections_mock.__getitem__.return_value.cursor.return_value = cursor_mock
truncate_tables()
self.assertTrue(cursor_mock.execute.called)
expected_sql = "TRUNCATE myapp_userprofile,myapp_companyprofile RESTART IDENTITY CASCADE;"
cursor_mock.execute.assert_called_once_with(expected_sql)
| from unittest import TestCase, skip
from mock import patch, MagicMock
from extreme.cleanup import truncate_tables
class CleanUpTests(TestCase):
@skip("for now")
@patch("extreme.cleanup.connections")
def test_truncate_tables(self, connections_mock):
cursor_mock = MagicMock()
connections_mock.__getitem__.return_value.cursor.return_value = cursor_mock
truncate_tables()
self.assertTrue(cursor_mock.execute.called)
expected_sql = "TRUNCATE myapp_userprofile,myapp_companyprofile RESTART IDENTITY CASCADE;"
cursor_mock.execute.assert_called_once_with(expected_sql)
| none | 1 | 2.446869 | 2 | |
magic_admin/magic.py | tong181567/magic-admin | 19 | 6614720 | import os
import magic_admin
from magic_admin.config import api_secret_api_key_missing_message
from magic_admin.error import AuthenticationError
from magic_admin.resources.base import ResourceComponent
RETRIES = 3
TIMEOUT = 10
BACKOFF_FACTOR = 0.02
class Magic:
def __getattr__(self, attribute_name):
try:
return getattr(self._resource, attribute_name)
except AttributeError:
pass
return super().__getattribute__(attribute_name)
def __init__(
self,
api_secret_key=None,
retries=RETRIES,
timeout=TIMEOUT,
backoff_factor=BACKOFF_FACTOR,
):
self._resource = ResourceComponent()
self._resource.setup_request_client(retries, timeout, backoff_factor)
self._set_api_secret_key(api_secret_key)
def _set_api_secret_key(self, api_secret_key):
magic_admin.api_secret_key = api_secret_key or os.environ.get(
'MAGIC_API_SECRET_KEY',
)
if magic_admin.api_secret_key is None:
raise AuthenticationError(api_secret_api_key_missing_message)
| import os
import magic_admin
from magic_admin.config import api_secret_api_key_missing_message
from magic_admin.error import AuthenticationError
from magic_admin.resources.base import ResourceComponent
RETRIES = 3
TIMEOUT = 10
BACKOFF_FACTOR = 0.02
class Magic:
def __getattr__(self, attribute_name):
try:
return getattr(self._resource, attribute_name)
except AttributeError:
pass
return super().__getattribute__(attribute_name)
def __init__(
self,
api_secret_key=None,
retries=RETRIES,
timeout=TIMEOUT,
backoff_factor=BACKOFF_FACTOR,
):
self._resource = ResourceComponent()
self._resource.setup_request_client(retries, timeout, backoff_factor)
self._set_api_secret_key(api_secret_key)
def _set_api_secret_key(self, api_secret_key):
magic_admin.api_secret_key = api_secret_key or os.environ.get(
'MAGIC_API_SECRET_KEY',
)
if magic_admin.api_secret_key is None:
raise AuthenticationError(api_secret_api_key_missing_message)
| none | 1 | 2.35178 | 2 | |
zstackwoodpecker/zstackwoodpecker/zstack_test/zstack_test_kvm_host.py | sherry546/zstack-woodpecker | 2 | 6614721 | <gh_stars>1-10
'''
zstack KVM Host class
@author: Youyk
'''
import zstackwoodpecker.header.host as host_header
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.test_util as test_util
MAINTAIN_EVENT = 'maintain'
ENABLE_EVENT = 'enable'
DISABLE_EVENT = 'disable'
PREMAINTAIN_EVENT = 'preMaintain'
state_event_dict = {MAINTAIN_EVENT: host_header.MAINTENANCE,
ENABLE_EVENT: host_header.ENABLED,
DISABLE_EVENT: host_header.DISABLED}
class ZstackTestKvmHost(host_header.TestHost):
def __init__(self):
self.host_creation_option = test_util.HostOption()
super(ZstackTestKvmHost, self).__init__()
def add(self):
self.host = host_ops.add_kvm_host(self.host_creation_option)
super(ZstackTestKvmHost, self).create()
def set_host(self, host_inv):
self.host = host_inv
self.state = host_inv.state
self.connection_state = host_inv.status
def delete(self):
host_ops.delete_host(self.host.uuid)
super(ZstackTestKvmHost, self).delete()
def check(self):
import zstackwoodpecker.zstack_test.checker_factory as checker_factory
checker = checker_factory.CheckerFactory().create_checker(self)
checker.check()
super(ZstackTestKvmHost, self).check()
def set_creation_option(self, host_creation_option):
self.host_creation_option = host_creation_option
def get_creation_option(self):
return self.host_creation_option
def change_state(self, state):
host_ops.change_host_state(self.host.uuid, state)
self.state = state_event_dict[state]
def maintain(self):
self.change_state(MAINTAIN_EVENT)
def enable(self):
self.change_state(ENABLE_EVENT)
def disable(self):
self.change_state(DISABLE_EVENT)
def reconnect(self):
host_ops.reconnect_host(self.host.uuid)
| '''
zstack KVM Host class
@author: Youyk
'''
import zstackwoodpecker.header.host as host_header
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.test_util as test_util
MAINTAIN_EVENT = 'maintain'
ENABLE_EVENT = 'enable'
DISABLE_EVENT = 'disable'
PREMAINTAIN_EVENT = 'preMaintain'
state_event_dict = {MAINTAIN_EVENT: host_header.MAINTENANCE,
ENABLE_EVENT: host_header.ENABLED,
DISABLE_EVENT: host_header.DISABLED}
class ZstackTestKvmHost(host_header.TestHost):
def __init__(self):
self.host_creation_option = test_util.HostOption()
super(ZstackTestKvmHost, self).__init__()
def add(self):
self.host = host_ops.add_kvm_host(self.host_creation_option)
super(ZstackTestKvmHost, self).create()
def set_host(self, host_inv):
self.host = host_inv
self.state = host_inv.state
self.connection_state = host_inv.status
def delete(self):
host_ops.delete_host(self.host.uuid)
super(ZstackTestKvmHost, self).delete()
def check(self):
import zstackwoodpecker.zstack_test.checker_factory as checker_factory
checker = checker_factory.CheckerFactory().create_checker(self)
checker.check()
super(ZstackTestKvmHost, self).check()
def set_creation_option(self, host_creation_option):
self.host_creation_option = host_creation_option
def get_creation_option(self):
return self.host_creation_option
def change_state(self, state):
host_ops.change_host_state(self.host.uuid, state)
self.state = state_event_dict[state]
def maintain(self):
self.change_state(MAINTAIN_EVENT)
def enable(self):
self.change_state(ENABLE_EVENT)
def disable(self):
self.change_state(DISABLE_EVENT)
def reconnect(self):
host_ops.reconnect_host(self.host.uuid) | en | 0.295751 | zstack KVM Host class @author: Youyk | 1.877657 | 2 |
tests/unit/webapi25/test_so2indexparser.py | jpelaezClub/pyowm | 0 | 6614722 | <gh_stars>0
import unittest
from pyowm.webapi25.so2indexparser import SO2IndexParser
from pyowm.exceptions.parse_response_error import ParseResponseError
from tests.unit.webapi25.json_test_responses import (
SO2INDEX_JSON, SO2INDEX_MALFORMED_JSON)
class TestSO2IndexParser(unittest.TestCase):
__instance = SO2IndexParser()
def test_parse_JSON(self):
result = self.__instance.parse_JSON(SO2INDEX_JSON)
self.assertIsNotNone(result)
self.assertIsNotNone(result.get_reference_time())
self.assertIsNotNone(result.get_reference_time())
loc = result.get_location()
self.assertIsNotNone(loc)
self.assertIsNone(loc.get_name())
self.assertIsNone(loc.get_ID())
self.assertIsNotNone(loc.get_lon())
self.assertIsNotNone(loc.get_lat())
self.assertIsNone(result.get_interval())
self.assertNotEquals(0, len(result.get_so2_samples()))
def test_parse_JSON_fails_when_JSON_data_is_None(self):
self.assertRaises(ParseResponseError, SO2IndexParser.parse_JSON,
self.__instance, None)
def test_parse_JSON_fails_with_malformed_JSON_data(self):
self.assertRaises(ParseResponseError, SO2IndexParser.parse_JSON,
self.__instance, SO2INDEX_MALFORMED_JSON)
| import unittest
from pyowm.webapi25.so2indexparser import SO2IndexParser
from pyowm.exceptions.parse_response_error import ParseResponseError
from tests.unit.webapi25.json_test_responses import (
SO2INDEX_JSON, SO2INDEX_MALFORMED_JSON)
class TestSO2IndexParser(unittest.TestCase):
__instance = SO2IndexParser()
def test_parse_JSON(self):
result = self.__instance.parse_JSON(SO2INDEX_JSON)
self.assertIsNotNone(result)
self.assertIsNotNone(result.get_reference_time())
self.assertIsNotNone(result.get_reference_time())
loc = result.get_location()
self.assertIsNotNone(loc)
self.assertIsNone(loc.get_name())
self.assertIsNone(loc.get_ID())
self.assertIsNotNone(loc.get_lon())
self.assertIsNotNone(loc.get_lat())
self.assertIsNone(result.get_interval())
self.assertNotEquals(0, len(result.get_so2_samples()))
def test_parse_JSON_fails_when_JSON_data_is_None(self):
self.assertRaises(ParseResponseError, SO2IndexParser.parse_JSON,
self.__instance, None)
def test_parse_JSON_fails_with_malformed_JSON_data(self):
self.assertRaises(ParseResponseError, SO2IndexParser.parse_JSON,
self.__instance, SO2INDEX_MALFORMED_JSON) | none | 1 | 2.74735 | 3 | |
Contrib/psr/multiply/test/test_sysfunction.py | veekooFIN/gigatron-rom | 172 | 6614723 | """Tests for the implementation of SYS_MultiplyBytes_126"""
import os.path
import pathlib
from importlib import reload
from types import SimpleNamespace
from hypothesis import given
from hypothesis import strategies as st
import asm
from gtemu import RAM, Emulator
MAX_CYCLES = 120
SYS_DIR = (pathlib.Path(__file__).parent / ".." / "sys").resolve()
SCRIPT = SYS_DIR / "ROM.asm.py"
def setup_module():
global vars
"""Load the Emulator from the ROM script"""
reload(asm)
name, _ = os.path.splitext(os.path.basename(SCRIPT))
script_globals = {"__file__": str(SCRIPT.absolute()), "__name__": name}
with SCRIPT.open("rb") as file:
exec(compile(file.read(), SCRIPT, "exec"), script_globals)
Emulator.load_rom_from_asm_module()
vars = SimpleNamespace(**script_globals)
def setup_function():
RAM[vars.sysFn : vars.sysFn + 2] = asm.symbol("SYS_MultiplyBytes_120").to_bytes(
2, "little"
)
RAM[vars.vTicks] = 75
Emulator.next_instruction = "SYS"
Emulator.AC = 270 - max(14, MAX_CYCLES // 2)
def test_timing_both_lt_128():
"""Follow the routine through, checking the timing comments
This follows the case where both values are less than 128
I'm just trying to check that the comments are correct!
"""
RAM[vars.sysArgs : vars.sysArgs + 2] = 3, 5
# fmt: off
cycles = 9 # On entry to SYS, 9 cycles have already elapsed
cycles += Emulator.run_to("SYS_MultiplyBytes_120"); assert 14 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.tableEntry"); assert 29 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.high-byte-action.store-inverted"); assert 35 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.tableExit"); assert 40 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes#44"); assert 43 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.tableEntry"); assert 51 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.high-byte-action.restore-and-add"); assert 57 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.tableExit"); assert 64 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes#68"); assert 67 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("NEXTY"); assert 90 == cycles # noqa: E702, E241, E272
# fmt: on
def test_timing_neither_lt_128():
"""Follow the routine through, checking the timing comments
This follows the case where neither value is less than 128
"""
RAM[vars.sysArgs : vars.sysArgs + 2] = 172, 160
# fmt: off
cycles = 9 # On entry to SYS, 9 cycles have already elapsed
cycles += Emulator.run_to("sys_MultiplyBytes#68"); assert 67 == cycles # noqa: E702, E241, E272, E221
cycles += Emulator.run_to("sys_MultiplyBytes#92"); assert 91 == cycles # noqa: E702, E241, E272, E221
cycles += Emulator.run_to("sys_MultiplyBytes#114"); assert 113 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("NEXTY"); assert 118 == cycles # noqa: E702, E241, E272
# fmt: on
def test_timing_one_lt_128():
"""Follow the routine through, checking the timing comments
This follows the case where one value is less than 128
"""
RAM[vars.sysArgs : vars.sysArgs + 2] = 3, 160
# fmt: off
cycles = 9 # On entry to SYS, 9 cycles have already elapsed
cycles += Emulator.run_to("sys_MultiplyBytes#68"); assert 67 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.oneMsbSetCase"); assert 85 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes#92"); assert 91 == cycles # noqa: E702, E241, E272
# fmt: on
def _sign_extend(byte_):
if byte_ & 0x80:
return ~0xFF | byte_
return byte_
_bytes = st.integers(min_value=0, max_value=255)
@given(a=_bytes, b=_bytes)
def test_multiply_bytes(a, b):
setup_function()
RAM[vars.sysArgs : vars.sysArgs + 2] = a, b
cycles = 10 # Because Next is marked as zero
cycles += Emulator.run_to("NEXT")
assert cycles <= MAX_CYCLES
assert cycles == _sign_extend(Emulator.AC) * -2
assert a * b == Emulator.vAC
| """Tests for the implementation of SYS_MultiplyBytes_126"""
import os.path
import pathlib
from importlib import reload
from types import SimpleNamespace
from hypothesis import given
from hypothesis import strategies as st
import asm
from gtemu import RAM, Emulator
MAX_CYCLES = 120
SYS_DIR = (pathlib.Path(__file__).parent / ".." / "sys").resolve()
SCRIPT = SYS_DIR / "ROM.asm.py"
def setup_module():
global vars
"""Load the Emulator from the ROM script"""
reload(asm)
name, _ = os.path.splitext(os.path.basename(SCRIPT))
script_globals = {"__file__": str(SCRIPT.absolute()), "__name__": name}
with SCRIPT.open("rb") as file:
exec(compile(file.read(), SCRIPT, "exec"), script_globals)
Emulator.load_rom_from_asm_module()
vars = SimpleNamespace(**script_globals)
def setup_function():
RAM[vars.sysFn : vars.sysFn + 2] = asm.symbol("SYS_MultiplyBytes_120").to_bytes(
2, "little"
)
RAM[vars.vTicks] = 75
Emulator.next_instruction = "SYS"
Emulator.AC = 270 - max(14, MAX_CYCLES // 2)
def test_timing_both_lt_128():
"""Follow the routine through, checking the timing comments
This follows the case where both values are less than 128
I'm just trying to check that the comments are correct!
"""
RAM[vars.sysArgs : vars.sysArgs + 2] = 3, 5
# fmt: off
cycles = 9 # On entry to SYS, 9 cycles have already elapsed
cycles += Emulator.run_to("SYS_MultiplyBytes_120"); assert 14 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.tableEntry"); assert 29 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.high-byte-action.store-inverted"); assert 35 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.tableExit"); assert 40 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes#44"); assert 43 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.tableEntry"); assert 51 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.high-byte-action.restore-and-add"); assert 57 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.tableExit"); assert 64 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes#68"); assert 67 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("NEXTY"); assert 90 == cycles # noqa: E702, E241, E272
# fmt: on
def test_timing_neither_lt_128():
"""Follow the routine through, checking the timing comments
This follows the case where neither value is less than 128
"""
RAM[vars.sysArgs : vars.sysArgs + 2] = 172, 160
# fmt: off
cycles = 9 # On entry to SYS, 9 cycles have already elapsed
cycles += Emulator.run_to("sys_MultiplyBytes#68"); assert 67 == cycles # noqa: E702, E241, E272, E221
cycles += Emulator.run_to("sys_MultiplyBytes#92"); assert 91 == cycles # noqa: E702, E241, E272, E221
cycles += Emulator.run_to("sys_MultiplyBytes#114"); assert 113 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("NEXTY"); assert 118 == cycles # noqa: E702, E241, E272
# fmt: on
def test_timing_one_lt_128():
"""Follow the routine through, checking the timing comments
This follows the case where one value is less than 128
"""
RAM[vars.sysArgs : vars.sysArgs + 2] = 3, 160
# fmt: off
cycles = 9 # On entry to SYS, 9 cycles have already elapsed
cycles += Emulator.run_to("sys_MultiplyBytes#68"); assert 67 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes.oneMsbSetCase"); assert 85 == cycles # noqa: E702, E241, E272
cycles += Emulator.run_to("sys_MultiplyBytes#92"); assert 91 == cycles # noqa: E702, E241, E272
# fmt: on
def _sign_extend(byte_):
if byte_ & 0x80:
return ~0xFF | byte_
return byte_
_bytes = st.integers(min_value=0, max_value=255)
@given(a=_bytes, b=_bytes)
def test_multiply_bytes(a, b):
setup_function()
RAM[vars.sysArgs : vars.sysArgs + 2] = a, b
cycles = 10 # Because Next is marked as zero
cycles += Emulator.run_to("NEXT")
assert cycles <= MAX_CYCLES
assert cycles == _sign_extend(Emulator.AC) * -2
assert a * b == Emulator.vAC
| en | 0.740813 | Tests for the implementation of SYS_MultiplyBytes_126 Load the Emulator from the ROM script Follow the routine through, checking the timing comments This follows the case where both values are less than 128 I'm just trying to check that the comments are correct! # fmt: off # On entry to SYS, 9 cycles have already elapsed # noqa: E702, E241, E272 # noqa: E702, E241, E272 # noqa: E702, E241, E272 # noqa: E702, E241, E272 #44"); assert 43 == cycles # noqa: E702, E241, E272 # noqa: E702, E241, E272 # noqa: E702, E241, E272 # noqa: E702, E241, E272 #68"); assert 67 == cycles # noqa: E702, E241, E272 # noqa: E702, E241, E272 # fmt: on Follow the routine through, checking the timing comments This follows the case where neither value is less than 128 # fmt: off # On entry to SYS, 9 cycles have already elapsed #68"); assert 67 == cycles # noqa: E702, E241, E272, E221 #92"); assert 91 == cycles # noqa: E702, E241, E272, E221 #114"); assert 113 == cycles # noqa: E702, E241, E272 # noqa: E702, E241, E272 # fmt: on Follow the routine through, checking the timing comments This follows the case where one value is less than 128 # fmt: off # On entry to SYS, 9 cycles have already elapsed #68"); assert 67 == cycles # noqa: E702, E241, E272 # noqa: E702, E241, E272 #92"); assert 91 == cycles # noqa: E702, E241, E272 # fmt: on # Because Next is marked as zero | 2.20587 | 2 |
pysg/geometry.py | alonblade/pysg | 1 | 6614724 | # -*- coding: utf-8 -*-
""" Create basic geometries which are used to create buffered primitives in vRAM."""
import math
from typing import Tuple
import numpy as np
def create_cube(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create standard cube of size one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
# half dimension
width = 0.5
height = 0.5
depth = 0.5
vertices = np.array([
# front
# top right
(width, height, depth),
# top left
(-width, height, depth),
# bottom left
(-width, -height, depth),
# bottom right
(width, -height, depth),
# right
# top right
(width, height, -depth),
# top left
(width, height, depth),
# bottom left
(width, -height, depth),
# bottom right
(width, -height, -depth),
# back
# top right
(-width, height, -depth),
# top left
(width, height, -depth),
# bottom left
(width, -height, -depth),
# bottom right
(-width, -height, -depth),
# left
# top right
(-width, height, depth),
# top left
(-width, height, -depth),
# bottom left
(-width, -height, -depth),
# bottom right
(-width, -height, depth),
# top
# top right
(width, height, -depth),
# top left
(-width, height, -depth),
# bottom left
(-width, height, depth),
# bottom right
(width, height, depth),
# bottom
# top right
(width, -height, depth),
# top left
(-width, -height, depth),
# bottom left
(-width, -height, -depth),
# bottom right
(width, -height, -depth),
], dtype=dtype)
# For triangle type counter clockwise
# top right -> top left -> bottom left
# top right -> bottom left -> bottom right
indices = np.tile(np.array([0, 1, 2, 0, 2, 3], dtype='int'), (6, 1))
for face in range(6):
indices[face] += (face * 4)
indices.shape = (-1,)
normals = np.array([
# front
(0, 0, 1,),
(0, 0, 1,),
(0, 0, 1,),
(0, 0, 1,),
# right
(1, 0, 0,),
(1, 0, 0,),
(1, 0, 0,),
(1, 0, 0,),
# back
(0, 0, -1,),
(0, 0, -1,),
(0, 0, -1,),
(0, 0, -1,),
# left
(-1, 0, 0,),
(-1, 0, 0,),
(-1, 0, 0,),
(-1, 0, 0,),
# top
(0, 1, 0,),
(0, 1, 0,),
(0, 1, 0,),
(0, 1, 0,),
# bottom
(0, -1, 0,),
(0, -1, 0,),
(0, -1, 0,),
(0, -1, 0,),
], dtype=dtype)
return vertices, indices, normals
def create_icosahedron(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create icosahedron geometry with radius one.
seealso:: http://www.songho.ca/opengl/gl_sphere.html
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
# Fixed radius of 1
RADIUS = 1.
h_angle_steps = math.pi / 180 * 72 # 72 degree = 360 / 5
v_angle_steps = math.atan(1. / 2.) # elevation = 26.565 degree
vertices = np.zeros((60, 3), dtype=dtype) # array of 60 vertices (20 triangles)
h_angle_1st_row = -math.pi / 2. - h_angle_steps / 2. # start from -126 deg at 1st row
h_angle_2nd_row = -math.pi / 2. # start from -90 deg at 2nd row
normals = np.zeros((60, 3), dtype=dtype)
# Top vertex at(0, 0, r)
v_top = np.array([0, 0, RADIUS])
# 10 vertices at 1st and 2nd rows
z = RADIUS * math.sin(v_angle_steps) # elevation
xy = RADIUS * math.cos(v_angle_steps) # length on XY plane
v_1st_row = np.zeros((5, 3))
v_2nd_row = np.zeros((5, 3))
for idx in range(0, 5):
x_1 = xy * math.cos(h_angle_1st_row)
x_2 = xy * math.cos(h_angle_2nd_row)
y_1 = xy * math.sin(h_angle_1st_row)
y_2 = xy * math.sin(h_angle_2nd_row)
v_1st_row[idx] = np.array([x_1, y_1, z])
v_2nd_row[idx] = np.array([x_2, y_2, -z])
# next horizontal angles
h_angle_1st_row += h_angle_steps
h_angle_2nd_row += h_angle_steps
# Bottom vertex at (0, 0, -r)
v_bottom = np.array([0., 0., -RADIUS])
# Helper function
def set_normals(v_idx):
v1 = vertices[v_idx] - vertices[v_idx + 1]
v2 = vertices[v_idx] - vertices[v_idx + 2]
normals[v_idx: v_idx + 2] = np.cross(v1, v2)
# Set vertices and normals
for idx in range(0, 5):
# Top
v_idx = idx * 3
next_idx = (idx + 1) % 5
vertices[v_idx] = v_top
vertices[v_idx + 1] = v_1st_row[idx]
vertices[v_idx + 2] = v_1st_row[next_idx]
set_normals(v_idx)
# First row
v_idx = idx * 3 + (5 * 3)
vertices[v_idx] = v_1st_row[next_idx]
vertices[v_idx + 1] = v_1st_row[idx]
vertices[v_idx + 2] = v_2nd_row[idx]
set_normals(v_idx)
# Second row
v_idx = idx * 3 + (10 * 3)
vertices[v_idx] = v_2nd_row[idx]
vertices[v_idx + 1] = v_2nd_row[next_idx]
vertices[v_idx + 2] = v_1st_row[next_idx]
set_normals(v_idx)
# Bottom
v_idx = idx * 3 + (15 * 3)
vertices[v_idx] = v_bottom
vertices[v_idx + 1] = v_2nd_row[next_idx]
vertices[v_idx + 2] = v_2nd_row[idx]
set_normals(v_idx)
indices = np.arange(0, 60, dtype='int')
return vertices, indices, normals
def create_plane(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create standard plane of size one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
# half dimension
width = 0.5
height = 0.5
vertices = np.array([
# top right
(width, 0.0, -height),
# top left
(-width, 0.0, -height),
# bottom left
(-width, 0.0, height),
# bottom right
(width, 0.0, height),
], dtype=dtype)
# For triangle type counter clockwise
# top right -> top left -> bottom left
# top right -> bottom left -> bottom right
indices = np.array([0, 1, 2, 0, 2, 3], dtype='int')
normals = np.array([
(0, 1, 0,),
(0, 1, 0,),
(0, 1, 0,),
(0, 1, 0,)
], dtype=dtype)
return vertices, indices, normals
def create_circle(dtype='float32', radius=1., fan_vertices=40) -> Tuple[np.array, np.array, np.array]:
""" Create standard circle with radius one.
Args:
radius: Radius of circle.
fan_vertices: Number of vertices used for triangle fan.
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
vertices = np.zeros((1 + fan_vertices, 3), dtype=dtype)
vertices[0] = (0., 0., 0.)
angle_step = (2 * math.pi) / fan_vertices
angle = 0
for idx in range(1, fan_vertices + 1):
x = math.cos(angle) * radius
y = math.sin(angle) * radius
vertices[idx] = (x, 0., y)
angle += angle_step
indices = np.arange(0, 1 + fan_vertices, dtype='int')[::-1]
normals = np.array([(0, 1, 0,), ] * (fan_vertices + 1), dtype=dtype)
return vertices, indices, normals
def create_triangle(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create standard triangle with side length one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
h = 0.5 * math.sqrt(3)
inner_circle_radius = math.sqrt(3) / 6.
vertices = np.array([
(0, 0, h - inner_circle_radius),
(0.5, 0, -inner_circle_radius),
(-0.5, 0, -inner_circle_radius),
], dtype=dtype)
indices = np.arange(0, 3, dtype='int')
normals = np.array([(0, 1, 0,), ] * 3, dtype=dtype)
return vertices, indices, normals
def create_cylinder(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create standard cylinder with height two and radius one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
height = 2.
radius = 1.
sides = 6
# Top and bottom share one center vertices and the triangles form a fan.
# Each sides needs two unique triangle to render correct normals
# Vertices layout: (top (1), upper_circle (sides), middle (4*sides) ,lower_circle (sides), bottom (1).
vertices = np.zeros((sides * 6 + 2, 3), dtype=dtype)
normals = np.zeros(vertices.shape, dtype=dtype)
# Every side has 4 triangles (two for middle, one for top, and one for bottom).
indices = np.zeros((sides * 4, 3), dtype='int')
y = height / 2.
vertices[0] = (0., y, 0.)
normals[0] = (0, 1, 0)
vertices[-1] = (0., -y, 0.)
normals[-1] = (0, -1, 0)
angle_step = (2 * math.pi) / sides
angle = 0
for idx in range(1, sides + 1):
x = math.cos(angle) * radius
z = math.sin(angle) * radius
# Top circle
vertices[idx] = (x, y, z)
normals[idx] = (0, 1, 0)
# Bottom circle
vertices[idx + (sides * 5)] = (x, -y, z)
normals[-idx - 1] = (0, -1, 0)
angle += angle_step
# Top indices
indices[0:sides] = [(0, (i + 1) % sides + 1, i + 1) for i in range(sides)]
# Bottom indices
offset = len(vertices) - 1
indices[-sides:] = [(offset, offset - sides + i, offset - sides + (i + 1) % sides) for i in range(sides)]
for idx in range(0, sides):
array_idx = sides + idx * 4 + 1
top_left = vertices[idx + 1]
next_idx_top = idx + 2 if idx + 1 < sides else 1
top_right = vertices[next_idx_top]
bottom_left = vertices[idx - sides - 1]
next_idx_bottom = idx - sides if idx - sides <= -2 else -sides - 1
bottom_right = vertices[next_idx_bottom]
vertices[array_idx] = top_left
vertices[array_idx + 1] = top_right
vertices[array_idx + 2] = bottom_left
vertices[array_idx + 3] = bottom_right
v1 = top_right - top_left
v2 = bottom_left - top_left
normal = np.cross(v1, v2) / np.linalg.norm(np.cross(v1, v2))
normals[array_idx: (array_idx + 4)] = normal
indices[sides + idx] = (array_idx, array_idx + 1, array_idx + 2)
indices[sides * 2 + idx] = (array_idx + 1, array_idx + 3, array_idx + 2)
indices = indices.flatten()
return vertices, indices, normals
def create_tetrahedral(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create tetrahedral geometry with radius one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
size = 0.5
v1 = np.array((size, size, size))
v2 = np.array((size, -size, -size))
v3 = np.array((-size, size, -size))
v4 = np.array((-size, -size, size))
vertices = np.array([
# 1
v4,
v3,
v2,
# 2
v3,
v4,
v1,
# 3
v1,
v4,
v2,
# 4
v2,
v3,
v1,
], dtype=dtype)
norm_1 = tuple(np.cross((v4 - v2), (v3 - v2)))
norm_2 = tuple(np.cross((v3 - v1), (v4 - v1)))
norm_3 = tuple(np.cross((v4 - v1), (v2 - v1)))
norm_4 = tuple(np.cross((v2 - v1), (v3 - v1)))
normals = np.array([
norm_1 * 3,
norm_2 * 3,
norm_3 * 3,
norm_4 * 3,
])
indices = np.arange(0, 12, dtype='int')
return vertices, indices, normals
def create_pyramid(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create regular pyramid geometry with square base with base size and height one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
base_height = -0.333333
tip_vert = np.array((0, 0.666666, 0))
base_top_right_vert = np.array((0.5, base_height, 0.5))
base_top_left_vert = np.array((-0.5, base_height, 0.5))
base_bottom_right_vert = np.array((0.5, base_height, -0.5))
base_bottom_left_vert = np.array((-0.5, base_height, -0.5))
vertices = np.array([
# Bottom
base_top_right_vert,
base_top_left_vert,
base_bottom_left_vert,
base_bottom_right_vert,
# Front
tip_vert,
base_bottom_right_vert,
base_bottom_left_vert,
# Back
tip_vert,
base_top_left_vert,
base_top_right_vert,
# Right
tip_vert,
base_top_right_vert,
base_bottom_right_vert,
# Left
tip_vert,
base_bottom_left_vert,
base_top_left_vert,
], dtype=dtype)
norm_back = tuple(np.cross((base_top_left_vert - tip_vert), (base_top_right_vert - tip_vert)))
norm_front = tuple(np.cross((base_bottom_right_vert - tip_vert), (base_bottom_left_vert - tip_vert)))
norm_right = tuple(np.cross((base_top_right_vert - tip_vert), (base_bottom_right_vert - tip_vert)))
norm_left = tuple(np.cross((base_bottom_left_vert - tip_vert), (base_top_left_vert - tip_vert)))
normals = np.concatenate([
(0, -1, 0) * 4, # Bottom
norm_front * 3, # Front
norm_back * 3, # Back
norm_right * 3, # Right
norm_left * 3 # Left
]).flatten()
bottom_indices = np.array([0, 1, 2, 0, 2, 3])
indices = np.concatenate([bottom_indices, np.arange(4, 16, dtype='int')])
return vertices, indices, normals
| # -*- coding: utf-8 -*-
""" Create basic geometries which are used to create buffered primitives in vRAM."""
import math
from typing import Tuple
import numpy as np
def create_cube(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create standard cube of size one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
# half dimension
width = 0.5
height = 0.5
depth = 0.5
vertices = np.array([
# front
# top right
(width, height, depth),
# top left
(-width, height, depth),
# bottom left
(-width, -height, depth),
# bottom right
(width, -height, depth),
# right
# top right
(width, height, -depth),
# top left
(width, height, depth),
# bottom left
(width, -height, depth),
# bottom right
(width, -height, -depth),
# back
# top right
(-width, height, -depth),
# top left
(width, height, -depth),
# bottom left
(width, -height, -depth),
# bottom right
(-width, -height, -depth),
# left
# top right
(-width, height, depth),
# top left
(-width, height, -depth),
# bottom left
(-width, -height, -depth),
# bottom right
(-width, -height, depth),
# top
# top right
(width, height, -depth),
# top left
(-width, height, -depth),
# bottom left
(-width, height, depth),
# bottom right
(width, height, depth),
# bottom
# top right
(width, -height, depth),
# top left
(-width, -height, depth),
# bottom left
(-width, -height, -depth),
# bottom right
(width, -height, -depth),
], dtype=dtype)
# For triangle type counter clockwise
# top right -> top left -> bottom left
# top right -> bottom left -> bottom right
indices = np.tile(np.array([0, 1, 2, 0, 2, 3], dtype='int'), (6, 1))
for face in range(6):
indices[face] += (face * 4)
indices.shape = (-1,)
normals = np.array([
# front
(0, 0, 1,),
(0, 0, 1,),
(0, 0, 1,),
(0, 0, 1,),
# right
(1, 0, 0,),
(1, 0, 0,),
(1, 0, 0,),
(1, 0, 0,),
# back
(0, 0, -1,),
(0, 0, -1,),
(0, 0, -1,),
(0, 0, -1,),
# left
(-1, 0, 0,),
(-1, 0, 0,),
(-1, 0, 0,),
(-1, 0, 0,),
# top
(0, 1, 0,),
(0, 1, 0,),
(0, 1, 0,),
(0, 1, 0,),
# bottom
(0, -1, 0,),
(0, -1, 0,),
(0, -1, 0,),
(0, -1, 0,),
], dtype=dtype)
return vertices, indices, normals
def create_icosahedron(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create icosahedron geometry with radius one.
seealso:: http://www.songho.ca/opengl/gl_sphere.html
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
# Fixed radius of 1
RADIUS = 1.
h_angle_steps = math.pi / 180 * 72 # 72 degree = 360 / 5
v_angle_steps = math.atan(1. / 2.) # elevation = 26.565 degree
vertices = np.zeros((60, 3), dtype=dtype) # array of 60 vertices (20 triangles)
h_angle_1st_row = -math.pi / 2. - h_angle_steps / 2. # start from -126 deg at 1st row
h_angle_2nd_row = -math.pi / 2. # start from -90 deg at 2nd row
normals = np.zeros((60, 3), dtype=dtype)
# Top vertex at(0, 0, r)
v_top = np.array([0, 0, RADIUS])
# 10 vertices at 1st and 2nd rows
z = RADIUS * math.sin(v_angle_steps) # elevation
xy = RADIUS * math.cos(v_angle_steps) # length on XY plane
v_1st_row = np.zeros((5, 3))
v_2nd_row = np.zeros((5, 3))
for idx in range(0, 5):
x_1 = xy * math.cos(h_angle_1st_row)
x_2 = xy * math.cos(h_angle_2nd_row)
y_1 = xy * math.sin(h_angle_1st_row)
y_2 = xy * math.sin(h_angle_2nd_row)
v_1st_row[idx] = np.array([x_1, y_1, z])
v_2nd_row[idx] = np.array([x_2, y_2, -z])
# next horizontal angles
h_angle_1st_row += h_angle_steps
h_angle_2nd_row += h_angle_steps
# Bottom vertex at (0, 0, -r)
v_bottom = np.array([0., 0., -RADIUS])
# Helper function
def set_normals(v_idx):
v1 = vertices[v_idx] - vertices[v_idx + 1]
v2 = vertices[v_idx] - vertices[v_idx + 2]
normals[v_idx: v_idx + 2] = np.cross(v1, v2)
# Set vertices and normals
for idx in range(0, 5):
# Top
v_idx = idx * 3
next_idx = (idx + 1) % 5
vertices[v_idx] = v_top
vertices[v_idx + 1] = v_1st_row[idx]
vertices[v_idx + 2] = v_1st_row[next_idx]
set_normals(v_idx)
# First row
v_idx = idx * 3 + (5 * 3)
vertices[v_idx] = v_1st_row[next_idx]
vertices[v_idx + 1] = v_1st_row[idx]
vertices[v_idx + 2] = v_2nd_row[idx]
set_normals(v_idx)
# Second row
v_idx = idx * 3 + (10 * 3)
vertices[v_idx] = v_2nd_row[idx]
vertices[v_idx + 1] = v_2nd_row[next_idx]
vertices[v_idx + 2] = v_1st_row[next_idx]
set_normals(v_idx)
# Bottom
v_idx = idx * 3 + (15 * 3)
vertices[v_idx] = v_bottom
vertices[v_idx + 1] = v_2nd_row[next_idx]
vertices[v_idx + 2] = v_2nd_row[idx]
set_normals(v_idx)
indices = np.arange(0, 60, dtype='int')
return vertices, indices, normals
def create_plane(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create standard plane of size one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
# half dimension
width = 0.5
height = 0.5
vertices = np.array([
# top right
(width, 0.0, -height),
# top left
(-width, 0.0, -height),
# bottom left
(-width, 0.0, height),
# bottom right
(width, 0.0, height),
], dtype=dtype)
# For triangle type counter clockwise
# top right -> top left -> bottom left
# top right -> bottom left -> bottom right
indices = np.array([0, 1, 2, 0, 2, 3], dtype='int')
normals = np.array([
(0, 1, 0,),
(0, 1, 0,),
(0, 1, 0,),
(0, 1, 0,)
], dtype=dtype)
return vertices, indices, normals
def create_circle(dtype='float32', radius=1., fan_vertices=40) -> Tuple[np.array, np.array, np.array]:
""" Create standard circle with radius one.
Args:
radius: Radius of circle.
fan_vertices: Number of vertices used for triangle fan.
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
vertices = np.zeros((1 + fan_vertices, 3), dtype=dtype)
vertices[0] = (0., 0., 0.)
angle_step = (2 * math.pi) / fan_vertices
angle = 0
for idx in range(1, fan_vertices + 1):
x = math.cos(angle) * radius
y = math.sin(angle) * radius
vertices[idx] = (x, 0., y)
angle += angle_step
indices = np.arange(0, 1 + fan_vertices, dtype='int')[::-1]
normals = np.array([(0, 1, 0,), ] * (fan_vertices + 1), dtype=dtype)
return vertices, indices, normals
def create_triangle(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create standard triangle with side length one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
h = 0.5 * math.sqrt(3)
inner_circle_radius = math.sqrt(3) / 6.
vertices = np.array([
(0, 0, h - inner_circle_radius),
(0.5, 0, -inner_circle_radius),
(-0.5, 0, -inner_circle_radius),
], dtype=dtype)
indices = np.arange(0, 3, dtype='int')
normals = np.array([(0, 1, 0,), ] * 3, dtype=dtype)
return vertices, indices, normals
def create_cylinder(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create standard cylinder with height two and radius one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
height = 2.
radius = 1.
sides = 6
# Top and bottom share one center vertices and the triangles form a fan.
# Each sides needs two unique triangle to render correct normals
# Vertices layout: (top (1), upper_circle (sides), middle (4*sides) ,lower_circle (sides), bottom (1).
vertices = np.zeros((sides * 6 + 2, 3), dtype=dtype)
normals = np.zeros(vertices.shape, dtype=dtype)
# Every side has 4 triangles (two for middle, one for top, and one for bottom).
indices = np.zeros((sides * 4, 3), dtype='int')
y = height / 2.
vertices[0] = (0., y, 0.)
normals[0] = (0, 1, 0)
vertices[-1] = (0., -y, 0.)
normals[-1] = (0, -1, 0)
angle_step = (2 * math.pi) / sides
angle = 0
for idx in range(1, sides + 1):
x = math.cos(angle) * radius
z = math.sin(angle) * radius
# Top circle
vertices[idx] = (x, y, z)
normals[idx] = (0, 1, 0)
# Bottom circle
vertices[idx + (sides * 5)] = (x, -y, z)
normals[-idx - 1] = (0, -1, 0)
angle += angle_step
# Top indices
indices[0:sides] = [(0, (i + 1) % sides + 1, i + 1) for i in range(sides)]
# Bottom indices
offset = len(vertices) - 1
indices[-sides:] = [(offset, offset - sides + i, offset - sides + (i + 1) % sides) for i in range(sides)]
for idx in range(0, sides):
array_idx = sides + idx * 4 + 1
top_left = vertices[idx + 1]
next_idx_top = idx + 2 if idx + 1 < sides else 1
top_right = vertices[next_idx_top]
bottom_left = vertices[idx - sides - 1]
next_idx_bottom = idx - sides if idx - sides <= -2 else -sides - 1
bottom_right = vertices[next_idx_bottom]
vertices[array_idx] = top_left
vertices[array_idx + 1] = top_right
vertices[array_idx + 2] = bottom_left
vertices[array_idx + 3] = bottom_right
v1 = top_right - top_left
v2 = bottom_left - top_left
normal = np.cross(v1, v2) / np.linalg.norm(np.cross(v1, v2))
normals[array_idx: (array_idx + 4)] = normal
indices[sides + idx] = (array_idx, array_idx + 1, array_idx + 2)
indices[sides * 2 + idx] = (array_idx + 1, array_idx + 3, array_idx + 2)
indices = indices.flatten()
return vertices, indices, normals
def create_tetrahedral(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create tetrahedral geometry with radius one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
size = 0.5
v1 = np.array((size, size, size))
v2 = np.array((size, -size, -size))
v3 = np.array((-size, size, -size))
v4 = np.array((-size, -size, size))
vertices = np.array([
# 1
v4,
v3,
v2,
# 2
v3,
v4,
v1,
# 3
v1,
v4,
v2,
# 4
v2,
v3,
v1,
], dtype=dtype)
norm_1 = tuple(np.cross((v4 - v2), (v3 - v2)))
norm_2 = tuple(np.cross((v3 - v1), (v4 - v1)))
norm_3 = tuple(np.cross((v4 - v1), (v2 - v1)))
norm_4 = tuple(np.cross((v2 - v1), (v3 - v1)))
normals = np.array([
norm_1 * 3,
norm_2 * 3,
norm_3 * 3,
norm_4 * 3,
])
indices = np.arange(0, 12, dtype='int')
return vertices, indices, normals
def create_pyramid(dtype='float32') -> Tuple[np.array, np.array, np.array]:
""" Create regular pyramid geometry with square base with base size and height one.
Args:
dtype: Data type of output numpy array.
Returns:
Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices,
and last for the normals.
"""
base_height = -0.333333
tip_vert = np.array((0, 0.666666, 0))
base_top_right_vert = np.array((0.5, base_height, 0.5))
base_top_left_vert = np.array((-0.5, base_height, 0.5))
base_bottom_right_vert = np.array((0.5, base_height, -0.5))
base_bottom_left_vert = np.array((-0.5, base_height, -0.5))
vertices = np.array([
# Bottom
base_top_right_vert,
base_top_left_vert,
base_bottom_left_vert,
base_bottom_right_vert,
# Front
tip_vert,
base_bottom_right_vert,
base_bottom_left_vert,
# Back
tip_vert,
base_top_left_vert,
base_top_right_vert,
# Right
tip_vert,
base_top_right_vert,
base_bottom_right_vert,
# Left
tip_vert,
base_bottom_left_vert,
base_top_left_vert,
], dtype=dtype)
norm_back = tuple(np.cross((base_top_left_vert - tip_vert), (base_top_right_vert - tip_vert)))
norm_front = tuple(np.cross((base_bottom_right_vert - tip_vert), (base_bottom_left_vert - tip_vert)))
norm_right = tuple(np.cross((base_top_right_vert - tip_vert), (base_bottom_right_vert - tip_vert)))
norm_left = tuple(np.cross((base_bottom_left_vert - tip_vert), (base_top_left_vert - tip_vert)))
normals = np.concatenate([
(0, -1, 0) * 4, # Bottom
norm_front * 3, # Front
norm_back * 3, # Back
norm_right * 3, # Right
norm_left * 3 # Left
]).flatten()
bottom_indices = np.array([0, 1, 2, 0, 2, 3])
indices = np.concatenate([bottom_indices, np.arange(4, 16, dtype='int')])
return vertices, indices, normals
| en | 0.578573 | # -*- coding: utf-8 -*- Create basic geometries which are used to create buffered primitives in vRAM. Create standard cube of size one. Args: dtype: Data type of output numpy array. Returns: Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices, and last for the normals. # half dimension # front # top right # top left # bottom left # bottom right # right # top right # top left # bottom left # bottom right # back # top right # top left # bottom left # bottom right # left # top right # top left # bottom left # bottom right # top # top right # top left # bottom left # bottom right # bottom # top right # top left # bottom left # bottom right # For triangle type counter clockwise # top right -> top left -> bottom left # top right -> bottom left -> bottom right # front # right # back # left # top # bottom Create icosahedron geometry with radius one. seealso:: http://www.songho.ca/opengl/gl_sphere.html Args: dtype: Data type of output numpy array. Returns: Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices, and last for the normals. # Fixed radius of 1 # 72 degree = 360 / 5 # elevation = 26.565 degree # array of 60 vertices (20 triangles) # start from -126 deg at 1st row # start from -90 deg at 2nd row # Top vertex at(0, 0, r) # 10 vertices at 1st and 2nd rows # elevation # length on XY plane # next horizontal angles # Bottom vertex at (0, 0, -r) # Helper function # Set vertices and normals # Top # First row # Second row # Bottom Create standard plane of size one. Args: dtype: Data type of output numpy array. Returns: Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices, and last for the normals. # half dimension # top right # top left # bottom left # bottom right # For triangle type counter clockwise # top right -> top left -> bottom left # top right -> bottom left -> bottom right Create standard circle with radius one. Args: radius: Radius of circle. fan_vertices: Number of vertices used for triangle fan. dtype: Data type of output numpy array. Returns: Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices, and last for the normals. Create standard triangle with side length one. Args: dtype: Data type of output numpy array. Returns: Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices, and last for the normals. Create standard cylinder with height two and radius one. Args: dtype: Data type of output numpy array. Returns: Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices, and last for the normals. # Top and bottom share one center vertices and the triangles form a fan. # Each sides needs two unique triangle to render correct normals # Vertices layout: (top (1), upper_circle (sides), middle (4*sides) ,lower_circle (sides), bottom (1). # Every side has 4 triangles (two for middle, one for top, and one for bottom). # Top circle # Bottom circle # Top indices # Bottom indices Create tetrahedral geometry with radius one. Args: dtype: Data type of output numpy array. Returns: Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices, and last for the normals. # 1 # 2 # 3 # 4 Create regular pyramid geometry with square base with base size and height one. Args: dtype: Data type of output numpy array. Returns: Tuple[np.array,np.array,np.array]: Tuple of size 3. First is np array for vertices, second for indices, and last for the normals. # Bottom # Front # Back # Right # Left # Bottom # Front # Back # Right # Left | 3.544977 | 4 |
tests/conftest.py | Yelp/pidtree-bcc | 20 | 6614725 | import sys
from unittest.mock import MagicMock
# Globally mock bcc module
bcc = MagicMock()
sys.modules.setdefault('bcc', bcc)
| import sys
from unittest.mock import MagicMock
# Globally mock bcc module
bcc = MagicMock()
sys.modules.setdefault('bcc', bcc)
| en | 0.411492 | # Globally mock bcc module | 1.884083 | 2 |
roast/testlibs/linux/sysdevices.py | Xilinx/roast-xilinx | 1 | 6614726 | <gh_stars>1-10
#
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
import logging
log = logging.getLogger(__name__)
class SysDevices:
def get_channels(self, dts_list, peripheral):
self.console.sync()
self.channels = []
for dt_node in dts_list:
self.console.runcmd(
f"ls {self.sys_class_dev[peripheral]} -l | awk '{{print $NF}}'"
f" | grep {dt_node}",
expected="\r\n",
)
if not self.console.output():
log.info(f"No channels found for {dt_node}")
else:
if self.console.output():
self.channels.extend(self.console.output().split("\n"))
self.channels = [s.split("/")[-1].rstrip() for s in self.channels]
return self.channels
| #
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
import logging
log = logging.getLogger(__name__)
class SysDevices:
def get_channels(self, dts_list, peripheral):
self.console.sync()
self.channels = []
for dt_node in dts_list:
self.console.runcmd(
f"ls {self.sys_class_dev[peripheral]} -l | awk '{{print $NF}}'"
f" | grep {dt_node}",
expected="\r\n",
)
if not self.console.output():
log.info(f"No channels found for {dt_node}")
else:
if self.console.output():
self.channels.extend(self.console.output().split("\n"))
self.channels = [s.split("/")[-1].rstrip() for s in self.channels]
return self.channels | en | 0.497328 | # # Copyright (c) 2020 Xilinx, Inc. All rights reserved. # SPDX-License-Identifier: MIT # | 2.331048 | 2 |
seisspark.py | keshava/kampa | 1 | 6614727 | """
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
httpwww.apache.orglicensesLICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import segypy
import seisspark_config
from seisspark_config import dprint
def KV_flatList(kv):
assert (type(kv) is tuple)
out = list()
values = kv[1]
if type(values) is list:
for value in values:
out.append((None, value))
else:
if type(values) is not bytearray:
dprint("KV_flatList error: ", type(values))
assert (type(values) is bytearray)
out.append((None, values))
return out
def KV_concatenateByteArray(kvlist):
out = bytearray()
assert (type(kvlist) is list)
for kv in kvlist:
value = kv[1]
assert (type(value) is bytearray)
out.extend(value)
return out
def concatenateByteArray(vlist):
out = bytearray()
assert (type(vlist) is list)
for v in vlist:
assert (type(v) is bytearray)
out.extend(v)
return out
def RDD_backToFlat(rdd):
rdd = rdd.flatMap(KV_flatList)
#rdd = rdd.map (lambda x: (None, x[0]))
return rdd
def RDD_printKeys(rdd, n=1000):
# for i in range (1, 10):
# print ('KEYS')
kvlist = rdd.take(n)
for kv in kvlist:
assert (type(kv) is tuple)
print('KEY ' + str(kv[0]))
def RDD_test(st, rdd):
print(st)
kv = rdd.take(1)[0]
ok = True
if type(kv) is not tuple:
print('KV type', type(kv))
ok = False
# assert (type (kv) is tuple)
print("KEY", kv[0])
values = kv[1]
if type(values) is list:
print("GATHER len", len(values), "trace len", len(values[0]))
if type(values[0]) is not bytearray:
print('Trace type', type(values[0]))
ok = False
# assert (type (values[0]) is bytearray)
else:
if type(values) is not bytearray:
print('Trace type', type(values))
ok = False
# assert (type (values) is bytearray)
print("Trace len", len(values))
if ok == False:
print(st, 'Failed!')
exit(0)
print(st, 'Ok')
def RDD_printValue(rdd):
kv = rdd.take(1)
assert (type(kv) is tuple)
print(kv[0])
values = kv[1]
if type(values) is list:
print(len(values))
for value in values:
print(value)
else:
assert (type(values) is bytearray)
print(values)
def KV_printValue(kv):
assert (type(kv) is tuple)
print(kv[0])
values = kv[1]
if type(values) is list:
print(len(values))
for value in values:
print(value)
else:
assert (type(values) is bytearray)
print(values)
class KV_HeaderAccess:
def __init__(self, THN):
self._THN = THN
def getHeaderKV(self, kv):
assert (type(kv) is tuple)
assert (type(kv[1]) is bytearray)
data = kv[1]
value = self.getHeaderV(data)
return (value, data)
def getHeaderV(self, data):
assert (type(data) is bytearray)
THpos = segypy.STH_def[self._THN]["pos"]
THformat = segypy.STH_def[self._THN]["type"]
segypy.printverbose('THN ' + str(self._THN))
segypy.printverbose('THpos ' + str(THpos))
segypy.printverbose('THformat ' + str(THformat))
segypy.printverbose('data ' + str(data))
value, index = segypy.getValue(data, THpos, THformat, segypy.endian, 1)
return value
class KV_HeaderFilter:
def __init__(self, THN, first, last):
self._ha = KV_HeaderAccess(THN)
self._first = first
self._last = last
def filtKV(self, kv):
assert (type(kv) is tuple)
assert (type(kv[1]) is bytearray)
key = kv[0]
data = kv[1]
value = self._ha.getHeaderV(data)
return value >= self._first and value <= self._last
# output is flat
class RDD_SetKeyByHeader:
def __init__(self, THN):
self._ha = KV_HeaderAccess(THN)
def do(self, rdd):
dprint("SetKeyByHeader")
rdd = RDD_backToFlat(rdd)
rdd = rdd.map(self._ha.getHeaderKV)
if seisspark_config.debug:
RDD_test("End SetKeyByHeader", rdd)
return rdd
# output is gather
class RDD_GroupByHeader:
def __init__(self, THN):
self._sk = RDD_SetKeyByHeader(THN)
def do(self, rdd):
dprint("GroupByHeader")
rdd = RDD_backToFlat(rdd)
rdd = self._sk.do(rdd) # set key
rdd = rdd.groupByKey().mapValues(list)
#rdd = rdd.sortByKey()
if seisspark_config.debug:
RDD_test("End GroupByHeader", rdd)
return rdd
# output is flat
class RDD_FilterByHeader:
def __init__(self, THN, first, last):
self._hf = KV_HeaderFilter(THN, first, last)
def do(self, rdd):
if seisspark_config.debug:
dprint("FilterByHeader")
rdd = RDD_backToFlat(rdd)
rdd = rdd.filter(self._hf.filtKV)
if seisspark_config.debug:
RDD_test("End FilterByHeader", rdd)
return rdd
# input and output are gathers
class RDD_Processing:
def __init__(self, args):
self._args = args
self._p = None
dprint('RDD_Processing', args)
return
def pipe(self, kv):
import subprocess
assert (type(kv) is tuple)
assert (type(kv[1]) is list) # should be gather
assert (type(kv[1][0]) is bytearray) # should be trace
# if self._p == None:
p = subprocess.Popen(
self._args, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
# simplest communication
in_data = concatenateByteArray(kv[1])
out, err = p.communicate(input=in_data)
out_data_array = bytearray(out)
dprint('ERROR STREAM', err)
ns = KV_HeaderAccess('ns').getHeaderV(out_data_array)
bps = 4
trace_len = 240+bps*ns
trace_count = int (len(out_data_array)/trace_len)
assert (trace_len*trace_count == len(out_data_array))
out_data = []
for i in range(trace_count):
trace = out_data_array[i*trace_len:(i+1)*trace_len]
out_data.append(trace)
# in_data = kv[1]
# for d in in_data:
# self._p.stdin.write(d) # write one by one
# self._p.stdin.close()
#
# out_data = []
# while True:
# head = bytearray(self._p.stdout.read(240))
# if not head:
# break
#
# head = bytearray(head)
# ns = KV_HeaderAccess('ns').getHeaderV(head)
# bps = 4
#
# body = self._p.stdout.read(ns * bps)
# if not body:
# print('cannot read trace body')
# exit(1)
# body = bytearray(body)
#
# data = head
# head.extend(body)
#
# out_data.append(data)
# TODO optimization sometimes i can use kv[0] as new key
return (None, out_data)
def do(self, rdd):
dprint("RDD_Processing")
##
## TODO why we do not use spark pipe?!!!!
##
rdd = rdd.map(self.pipe)
if seisspark_config.debug:
RDD_test("End RDD_Processing", rdd)
return rdd
def loadData(sc, filename, sort=None):
rdd = sc.sequenceFile(filename)
if seisspark_config.debug:
RDD_test("loadData", rdd)
if sort != None:
rdd = RDD_GroupByHeader(sort).do(rdd)
if seisspark_config.debug:
RDD_test("End loadData", rdd)
return rdd
def saveData(rdd, filename):
dprint("saveData")
rdd = RDD_backToFlat(rdd)
rdd.saveAsSequenceFile(filename)
def prepareRDDtoDraw(rdd, count=None):
dprint("prepareRDDtoDraw")
from numpy import transpose
from numpy import reshape
import struct
rdd = RDD_backToFlat(rdd)
if count == None:
DataFromRDD = rdd.collect()
else:
DataFromRDD = rdd.take(count)
assert (type(DataFromRDD) is list)
assert (type(DataFromRDD[0]) is tuple)
assert (type(DataFromRDD[0][1]) is bytearray)
first_trace = DataFromRDD[0][1]
ns = KV_HeaderAccess('ns').getHeaderV(first_trace)
dt = KV_HeaderAccess('dt').getHeaderV(first_trace)
ntraces = len(DataFromRDD)
bps = 4
ndummy_samples = 240 / bps
number = ntraces * (ns + ndummy_samples)
# concatenate to single bytearray
Data = KV_concatenateByteArray(DataFromRDD)
header = []
for d in DataFromRDD:
header.append(d[1][0:240])
# convert to matrix
Data = struct.unpack(segypy.endian + 'f' * number, Data)
Data = reshape(Data, (ntraces, ns + ndummy_samples))
Data = Data[:, ndummy_samples:(ns + ndummy_samples)]
Data = transpose(Data)
dprint("End prepareRDDtoDraw")
return Data, header
def drawRDD(rdd, label, count=None):
Data, header = prepareRDDtoDraw(rdd, count)
segypy.imageSegy(Data, label, 'jet')
| """
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
httpwww.apache.orglicensesLICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import segypy
import seisspark_config
from seisspark_config import dprint
def KV_flatList(kv):
assert (type(kv) is tuple)
out = list()
values = kv[1]
if type(values) is list:
for value in values:
out.append((None, value))
else:
if type(values) is not bytearray:
dprint("KV_flatList error: ", type(values))
assert (type(values) is bytearray)
out.append((None, values))
return out
def KV_concatenateByteArray(kvlist):
out = bytearray()
assert (type(kvlist) is list)
for kv in kvlist:
value = kv[1]
assert (type(value) is bytearray)
out.extend(value)
return out
def concatenateByteArray(vlist):
out = bytearray()
assert (type(vlist) is list)
for v in vlist:
assert (type(v) is bytearray)
out.extend(v)
return out
def RDD_backToFlat(rdd):
rdd = rdd.flatMap(KV_flatList)
#rdd = rdd.map (lambda x: (None, x[0]))
return rdd
def RDD_printKeys(rdd, n=1000):
# for i in range (1, 10):
# print ('KEYS')
kvlist = rdd.take(n)
for kv in kvlist:
assert (type(kv) is tuple)
print('KEY ' + str(kv[0]))
def RDD_test(st, rdd):
print(st)
kv = rdd.take(1)[0]
ok = True
if type(kv) is not tuple:
print('KV type', type(kv))
ok = False
# assert (type (kv) is tuple)
print("KEY", kv[0])
values = kv[1]
if type(values) is list:
print("GATHER len", len(values), "trace len", len(values[0]))
if type(values[0]) is not bytearray:
print('Trace type', type(values[0]))
ok = False
# assert (type (values[0]) is bytearray)
else:
if type(values) is not bytearray:
print('Trace type', type(values))
ok = False
# assert (type (values) is bytearray)
print("Trace len", len(values))
if ok == False:
print(st, 'Failed!')
exit(0)
print(st, 'Ok')
def RDD_printValue(rdd):
kv = rdd.take(1)
assert (type(kv) is tuple)
print(kv[0])
values = kv[1]
if type(values) is list:
print(len(values))
for value in values:
print(value)
else:
assert (type(values) is bytearray)
print(values)
def KV_printValue(kv):
assert (type(kv) is tuple)
print(kv[0])
values = kv[1]
if type(values) is list:
print(len(values))
for value in values:
print(value)
else:
assert (type(values) is bytearray)
print(values)
class KV_HeaderAccess:
def __init__(self, THN):
self._THN = THN
def getHeaderKV(self, kv):
assert (type(kv) is tuple)
assert (type(kv[1]) is bytearray)
data = kv[1]
value = self.getHeaderV(data)
return (value, data)
def getHeaderV(self, data):
assert (type(data) is bytearray)
THpos = segypy.STH_def[self._THN]["pos"]
THformat = segypy.STH_def[self._THN]["type"]
segypy.printverbose('THN ' + str(self._THN))
segypy.printverbose('THpos ' + str(THpos))
segypy.printverbose('THformat ' + str(THformat))
segypy.printverbose('data ' + str(data))
value, index = segypy.getValue(data, THpos, THformat, segypy.endian, 1)
return value
class KV_HeaderFilter:
def __init__(self, THN, first, last):
self._ha = KV_HeaderAccess(THN)
self._first = first
self._last = last
def filtKV(self, kv):
assert (type(kv) is tuple)
assert (type(kv[1]) is bytearray)
key = kv[0]
data = kv[1]
value = self._ha.getHeaderV(data)
return value >= self._first and value <= self._last
# output is flat
class RDD_SetKeyByHeader:
def __init__(self, THN):
self._ha = KV_HeaderAccess(THN)
def do(self, rdd):
dprint("SetKeyByHeader")
rdd = RDD_backToFlat(rdd)
rdd = rdd.map(self._ha.getHeaderKV)
if seisspark_config.debug:
RDD_test("End SetKeyByHeader", rdd)
return rdd
# output is gather
class RDD_GroupByHeader:
def __init__(self, THN):
self._sk = RDD_SetKeyByHeader(THN)
def do(self, rdd):
dprint("GroupByHeader")
rdd = RDD_backToFlat(rdd)
rdd = self._sk.do(rdd) # set key
rdd = rdd.groupByKey().mapValues(list)
#rdd = rdd.sortByKey()
if seisspark_config.debug:
RDD_test("End GroupByHeader", rdd)
return rdd
# output is flat
class RDD_FilterByHeader:
def __init__(self, THN, first, last):
self._hf = KV_HeaderFilter(THN, first, last)
def do(self, rdd):
if seisspark_config.debug:
dprint("FilterByHeader")
rdd = RDD_backToFlat(rdd)
rdd = rdd.filter(self._hf.filtKV)
if seisspark_config.debug:
RDD_test("End FilterByHeader", rdd)
return rdd
# input and output are gathers
class RDD_Processing:
def __init__(self, args):
self._args = args
self._p = None
dprint('RDD_Processing', args)
return
def pipe(self, kv):
import subprocess
assert (type(kv) is tuple)
assert (type(kv[1]) is list) # should be gather
assert (type(kv[1][0]) is bytearray) # should be trace
# if self._p == None:
p = subprocess.Popen(
self._args, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
# simplest communication
in_data = concatenateByteArray(kv[1])
out, err = p.communicate(input=in_data)
out_data_array = bytearray(out)
dprint('ERROR STREAM', err)
ns = KV_HeaderAccess('ns').getHeaderV(out_data_array)
bps = 4
trace_len = 240+bps*ns
trace_count = int (len(out_data_array)/trace_len)
assert (trace_len*trace_count == len(out_data_array))
out_data = []
for i in range(trace_count):
trace = out_data_array[i*trace_len:(i+1)*trace_len]
out_data.append(trace)
# in_data = kv[1]
# for d in in_data:
# self._p.stdin.write(d) # write one by one
# self._p.stdin.close()
#
# out_data = []
# while True:
# head = bytearray(self._p.stdout.read(240))
# if not head:
# break
#
# head = bytearray(head)
# ns = KV_HeaderAccess('ns').getHeaderV(head)
# bps = 4
#
# body = self._p.stdout.read(ns * bps)
# if not body:
# print('cannot read trace body')
# exit(1)
# body = bytearray(body)
#
# data = head
# head.extend(body)
#
# out_data.append(data)
# TODO optimization sometimes i can use kv[0] as new key
return (None, out_data)
def do(self, rdd):
dprint("RDD_Processing")
##
## TODO why we do not use spark pipe?!!!!
##
rdd = rdd.map(self.pipe)
if seisspark_config.debug:
RDD_test("End RDD_Processing", rdd)
return rdd
def loadData(sc, filename, sort=None):
rdd = sc.sequenceFile(filename)
if seisspark_config.debug:
RDD_test("loadData", rdd)
if sort != None:
rdd = RDD_GroupByHeader(sort).do(rdd)
if seisspark_config.debug:
RDD_test("End loadData", rdd)
return rdd
def saveData(rdd, filename):
dprint("saveData")
rdd = RDD_backToFlat(rdd)
rdd.saveAsSequenceFile(filename)
def prepareRDDtoDraw(rdd, count=None):
dprint("prepareRDDtoDraw")
from numpy import transpose
from numpy import reshape
import struct
rdd = RDD_backToFlat(rdd)
if count == None:
DataFromRDD = rdd.collect()
else:
DataFromRDD = rdd.take(count)
assert (type(DataFromRDD) is list)
assert (type(DataFromRDD[0]) is tuple)
assert (type(DataFromRDD[0][1]) is bytearray)
first_trace = DataFromRDD[0][1]
ns = KV_HeaderAccess('ns').getHeaderV(first_trace)
dt = KV_HeaderAccess('dt').getHeaderV(first_trace)
ntraces = len(DataFromRDD)
bps = 4
ndummy_samples = 240 / bps
number = ntraces * (ns + ndummy_samples)
# concatenate to single bytearray
Data = KV_concatenateByteArray(DataFromRDD)
header = []
for d in DataFromRDD:
header.append(d[1][0:240])
# convert to matrix
Data = struct.unpack(segypy.endian + 'f' * number, Data)
Data = reshape(Data, (ntraces, ns + ndummy_samples))
Data = Data[:, ndummy_samples:(ns + ndummy_samples)]
Data = transpose(Data)
dprint("End prepareRDDtoDraw")
return Data, header
def drawRDD(rdd, label, count=None):
Data, header = prepareRDDtoDraw(rdd, count)
segypy.imageSegy(Data, label, 'jet')
| en | 0.622871 | Copyright 2016 <NAME> Licensed under the Apache License, Version 2.0 (the License); you may not use this file except in compliance with the License. You may obtain a copy of the License at httpwww.apache.orglicensesLICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. #rdd = rdd.map (lambda x: (None, x[0])) # for i in range (1, 10): # print ('KEYS') # assert (type (kv) is tuple) # assert (type (values[0]) is bytearray) # assert (type (values) is bytearray) # output is flat # output is gather # set key #rdd = rdd.sortByKey() # output is flat # input and output are gathers # should be gather # should be trace # if self._p == None: # simplest communication # in_data = kv[1] # for d in in_data: # self._p.stdin.write(d) # write one by one # self._p.stdin.close() # # out_data = [] # while True: # head = bytearray(self._p.stdout.read(240)) # if not head: # break # # head = bytearray(head) # ns = KV_HeaderAccess('ns').getHeaderV(head) # bps = 4 # # body = self._p.stdout.read(ns * bps) # if not body: # print('cannot read trace body') # exit(1) # body = bytearray(body) # # data = head # head.extend(body) # # out_data.append(data) # TODO optimization sometimes i can use kv[0] as new key ## ## TODO why we do not use spark pipe?!!!! ## # concatenate to single bytearray # convert to matrix | 2.067552 | 2 |
webdispatch/tests/test_methoddispatcher.py | aodag/WebDispatch | 1 | 6614728 | """ tests for webdispatch.methoddispatcher"""
import mock
from testfixtures import compare, ShouldRaise
from webdispatch.testing import setup_environ
def dummy_get_app(*dummy):
""" dummy app """
return ["get"]
class TestMethodDispatcher(object):
""" test for webdispatch.methoddispatcher.MethodDispatcher"""
@staticmethod
def _get_target():
""" get class under test """
from webdispatch.methoddispatcher import MethodDispatcher
return MethodDispatcher
def _make_one(self, *args, **kwargs):
""" create object under test """
return self._get_target()(*args, **kwargs)
def test_it(self):
""" test basic using"""
app = self._make_one(get=dummy_get_app)
environ = setup_environ()
start_response = mock.Mock()
result = app(environ, start_response)
compare(result, ["get"])
def test_not_allowed(self):
""" test not found views"""
app = self._make_one(get=dummy_get_app)
environ = setup_environ(REQUEST_METHOD='POST')
start_response = mock.Mock()
result = app(environ, start_response)
compare(result, [b"Method Not Allowed"])
start_response.assert_called_with(
'405 Method Not Allowed', [('Content-type', 'text/plain')])
def test_register_app(self):
""" test registering app"""
app = self._make_one()
app.register_app("get", dummy_get_app)
environ = setup_environ()
start_response = mock.Mock()
result = app(environ, start_response)
compare(result, ["get"])
def test_register_app_decorator(self):
""" test registering app"""
app = self._make_one()
dec = app.register_app("get")
controller = dummy_get_app
ret = dec(controller)
compare(ret, controller)
environ = setup_environ()
start_response = mock.Mock()
result = app(environ, start_response)
compare(result, ["get"])
class TestActionHandlerAdapter(object):
""" test for webdispatch.methoddispatcher.action_handler_adapter"""
@staticmethod
def _call_fut(*args, **kwargs):
""" call function under test """
from webdispatch.methoddispatcher import action_handler_adapter
return action_handler_adapter(*args, **kwargs)
def test_call(self):
""" test basic using """
class DummyAction(object):
""" dummy action class"""
def __init__(self):
self.message = b"Hello"
def get_message(self):
""" get message to return body"""
return self.message
def action(self, _, start_response):
""" dummy action """
start_response("200 OK",
[("Content-type", "text/plain")])
return [self.get_message()]
target = self._call_fut(DummyAction, "action")
environ = setup_environ(REQUEST_METHOD='POST')
start_response = mock.Mock()
result = target(environ, start_response)
compare(result, [b"Hello"])
start_response.assert_called_with(
'200 OK', [('Content-type', 'text/plain')])
def test_invalid_name(self):
""" test using invalid attr name """
with ShouldRaise(ValueError):
self._call_fut(object, "actionx")
class TestActionDispatcher(object):
""" test for webdispatch.methoddispatcher.ActionDispatcher"""
@staticmethod
def _get_target():
""" get class under test"""
from webdispatch.methoddispatcher import ActionDispatcher
return ActionDispatcher
def _make_one(self, *args, **kwargs):
""" create object under test"""
return self._get_target()(*args, **kwargs)
def test_it(self):
""" test for basic usage"""
app = self._make_one()
def test_app(*_):
""" dummy app"""
return [b'got action']
app.register_app('test_app', test_app)
routing_args = [(), {'action': 'test_app'}]
environ = setup_environ()
environ.update({'wsgiorg.routing_args': routing_args})
start_response = mock.Mock()
result = app(environ, start_response)
compare(result, [b"got action"])
def test_register_action_handler(self):
""" test register """
app = self._make_one()
class DummyHandler(object):
""" dummy handler """
@staticmethod
def get_body():
""" get body to return action """
return [b"test action"]
def test_action(self, *_):
""" dummy action """
return self.get_body()
app.register_actionhandler(DummyHandler)
routing_args = [(), {'action': 'test_action'}]
environ = setup_environ()
environ.update({'wsgiorg.routing_args': routing_args})
start_response = mock.Mock()
result = app(environ, start_response)
compare(result, [b"test action"])
def test_not_found(self):
""" test called not registered action """
app = self._make_one()
app.register_app('test_app',
None)
routing_args = [(), {'action': 'no_app'}]
env = {'wsgiorg.routing_args': routing_args}
environ = setup_environ()
environ.update(env)
start_response = mock.Mock()
result = app(environ, start_response)
start_response.assert_called_with(
'404 Not Found', [('Content-type', 'text/plain')])
compare(result,
[b"Not Found ",
b"http://127.0.0.1/"])
| """ tests for webdispatch.methoddispatcher"""
import mock
from testfixtures import compare, ShouldRaise
from webdispatch.testing import setup_environ
def dummy_get_app(*dummy):
""" dummy app """
return ["get"]
class TestMethodDispatcher(object):
""" test for webdispatch.methoddispatcher.MethodDispatcher"""
@staticmethod
def _get_target():
""" get class under test """
from webdispatch.methoddispatcher import MethodDispatcher
return MethodDispatcher
def _make_one(self, *args, **kwargs):
""" create object under test """
return self._get_target()(*args, **kwargs)
def test_it(self):
""" test basic using"""
app = self._make_one(get=dummy_get_app)
environ = setup_environ()
start_response = mock.Mock()
result = app(environ, start_response)
compare(result, ["get"])
def test_not_allowed(self):
""" test not found views"""
app = self._make_one(get=dummy_get_app)
environ = setup_environ(REQUEST_METHOD='POST')
start_response = mock.Mock()
result = app(environ, start_response)
compare(result, [b"Method Not Allowed"])
start_response.assert_called_with(
'405 Method Not Allowed', [('Content-type', 'text/plain')])
def test_register_app(self):
""" test registering app"""
app = self._make_one()
app.register_app("get", dummy_get_app)
environ = setup_environ()
start_response = mock.Mock()
result = app(environ, start_response)
compare(result, ["get"])
def test_register_app_decorator(self):
""" test registering app"""
app = self._make_one()
dec = app.register_app("get")
controller = dummy_get_app
ret = dec(controller)
compare(ret, controller)
environ = setup_environ()
start_response = mock.Mock()
result = app(environ, start_response)
compare(result, ["get"])
class TestActionHandlerAdapter(object):
""" test for webdispatch.methoddispatcher.action_handler_adapter"""
@staticmethod
def _call_fut(*args, **kwargs):
""" call function under test """
from webdispatch.methoddispatcher import action_handler_adapter
return action_handler_adapter(*args, **kwargs)
def test_call(self):
""" test basic using """
class DummyAction(object):
""" dummy action class"""
def __init__(self):
self.message = b"Hello"
def get_message(self):
""" get message to return body"""
return self.message
def action(self, _, start_response):
""" dummy action """
start_response("200 OK",
[("Content-type", "text/plain")])
return [self.get_message()]
target = self._call_fut(DummyAction, "action")
environ = setup_environ(REQUEST_METHOD='POST')
start_response = mock.Mock()
result = target(environ, start_response)
compare(result, [b"Hello"])
start_response.assert_called_with(
'200 OK', [('Content-type', 'text/plain')])
def test_invalid_name(self):
""" test using invalid attr name """
with ShouldRaise(ValueError):
self._call_fut(object, "actionx")
class TestActionDispatcher(object):
""" test for webdispatch.methoddispatcher.ActionDispatcher"""
@staticmethod
def _get_target():
""" get class under test"""
from webdispatch.methoddispatcher import ActionDispatcher
return ActionDispatcher
def _make_one(self, *args, **kwargs):
""" create object under test"""
return self._get_target()(*args, **kwargs)
def test_it(self):
""" test for basic usage"""
app = self._make_one()
def test_app(*_):
""" dummy app"""
return [b'got action']
app.register_app('test_app', test_app)
routing_args = [(), {'action': 'test_app'}]
environ = setup_environ()
environ.update({'wsgiorg.routing_args': routing_args})
start_response = mock.Mock()
result = app(environ, start_response)
compare(result, [b"got action"])
def test_register_action_handler(self):
""" test register """
app = self._make_one()
class DummyHandler(object):
""" dummy handler """
@staticmethod
def get_body():
""" get body to return action """
return [b"test action"]
def test_action(self, *_):
""" dummy action """
return self.get_body()
app.register_actionhandler(DummyHandler)
routing_args = [(), {'action': 'test_action'}]
environ = setup_environ()
environ.update({'wsgiorg.routing_args': routing_args})
start_response = mock.Mock()
result = app(environ, start_response)
compare(result, [b"test action"])
def test_not_found(self):
""" test called not registered action """
app = self._make_one()
app.register_app('test_app',
None)
routing_args = [(), {'action': 'no_app'}]
env = {'wsgiorg.routing_args': routing_args}
environ = setup_environ()
environ.update(env)
start_response = mock.Mock()
result = app(environ, start_response)
start_response.assert_called_with(
'404 Not Found', [('Content-type', 'text/plain')])
compare(result,
[b"Not Found ",
b"http://127.0.0.1/"])
| en | 0.542013 | tests for webdispatch.methoddispatcher dummy app test for webdispatch.methoddispatcher.MethodDispatcher get class under test create object under test test basic using test not found views test registering app test registering app test for webdispatch.methoddispatcher.action_handler_adapter call function under test test basic using dummy action class get message to return body dummy action test using invalid attr name test for webdispatch.methoddispatcher.ActionDispatcher get class under test create object under test test for basic usage dummy app test register dummy handler get body to return action dummy action test called not registered action | 2.555187 | 3 |
templates/preproc_column_transformer.py | eric373/ml-py | 0 | 6614729 | <reponame>eric373/ml-py
#exec(open('.\\templates\\preproc_column_transformer.py').read())
import subprocess as sp
import importlib as il
import pickle as pk
import numpy as np
import sklearn.compose as sc
import sklearn.preprocessing as pp
import sklearn.pipeline as pl
import sklearn.ensemble as ensemble
import sklearn.model_selection as ms
import datacfg
if __name__ == '__main__':
sp.call('cls', shell = True)
il.reload(datacfg)
with open(datacfg.datacfg()['adult']['filepath'], 'rb') as fl:
df = pk.load(fl)
# Set feature and target columns.
ycols = set(['class'])
xcols = set(df.columns) - ycols
# Set numeric and non-numeric columns.
numerics = set(df.select_dtypes([np.number]).columns)
nonnumerics = xcols - numerics
# xcols = xcols - set(['native-country'])
xcols = list(xcols)
idxnumerics = [xcols.index(col) for col in numerics]
idxnonnumerics = [xcols.index(col) for col in nonnumerics]
# Designate data.
X = df.loc[:, xcols].values
y = np.ravel(df.loc[:, ycols].values)
# Split data.
Xtrain, Xtest, ytrain, ytest = ms.train_test_split(X, y, test_size = 0.33
,random_state = 0)
# Cross-validation.
k = 3
cvsplitter = ms.KFold(n_splits = k, shuffle = True, random_state = 0)
# Apply a transformation for each column.
transformers = list()
transformers.append(('StandardScaler', pp.StandardScaler(), idxnumerics))
transformers.append(('OneHotEncoder', pp.OneHotEncoder(sparse = False, drop = 'first', handle_unknown = 'ignore'), idxnonnumerics))
ct = sc.ColumnTransformer(transformers, remainder = 'passthrough')
ct.fit(Xtrain)
Xtrain_transformed = ct.transform(Xtrain)
print('Feature Names: {0}'.format(ct.get_feature_names_out()))
# Use the transformer in a pipeline.
estimators = list()
estimators.append(('ColumnTransformer', sc.ColumnTransformer(transformers, remainder = 'passthrough')))
estimators.append(('RandomForestClassifier', ensemble.RandomForestClassifier(n_estimators = 100, max_features = 3)))
ppl = pl.Pipeline(estimators)
accuracy = ms.cross_val_score(ppl, Xtrain, ytrain, cv = cvsplitter)
print('Accuracy of pipeline: {0:.2f}'.format(accuracy.mean())) | #exec(open('.\\templates\\preproc_column_transformer.py').read())
import subprocess as sp
import importlib as il
import pickle as pk
import numpy as np
import sklearn.compose as sc
import sklearn.preprocessing as pp
import sklearn.pipeline as pl
import sklearn.ensemble as ensemble
import sklearn.model_selection as ms
import datacfg
if __name__ == '__main__':
sp.call('cls', shell = True)
il.reload(datacfg)
with open(datacfg.datacfg()['adult']['filepath'], 'rb') as fl:
df = pk.load(fl)
# Set feature and target columns.
ycols = set(['class'])
xcols = set(df.columns) - ycols
# Set numeric and non-numeric columns.
numerics = set(df.select_dtypes([np.number]).columns)
nonnumerics = xcols - numerics
# xcols = xcols - set(['native-country'])
xcols = list(xcols)
idxnumerics = [xcols.index(col) for col in numerics]
idxnonnumerics = [xcols.index(col) for col in nonnumerics]
# Designate data.
X = df.loc[:, xcols].values
y = np.ravel(df.loc[:, ycols].values)
# Split data.
Xtrain, Xtest, ytrain, ytest = ms.train_test_split(X, y, test_size = 0.33
,random_state = 0)
# Cross-validation.
k = 3
cvsplitter = ms.KFold(n_splits = k, shuffle = True, random_state = 0)
# Apply a transformation for each column.
transformers = list()
transformers.append(('StandardScaler', pp.StandardScaler(), idxnumerics))
transformers.append(('OneHotEncoder', pp.OneHotEncoder(sparse = False, drop = 'first', handle_unknown = 'ignore'), idxnonnumerics))
ct = sc.ColumnTransformer(transformers, remainder = 'passthrough')
ct.fit(Xtrain)
Xtrain_transformed = ct.transform(Xtrain)
print('Feature Names: {0}'.format(ct.get_feature_names_out()))
# Use the transformer in a pipeline.
estimators = list()
estimators.append(('ColumnTransformer', sc.ColumnTransformer(transformers, remainder = 'passthrough')))
estimators.append(('RandomForestClassifier', ensemble.RandomForestClassifier(n_estimators = 100, max_features = 3)))
ppl = pl.Pipeline(estimators)
accuracy = ms.cross_val_score(ppl, Xtrain, ytrain, cv = cvsplitter)
print('Accuracy of pipeline: {0:.2f}'.format(accuracy.mean())) | en | 0.445385 | #exec(open('.\\templates\\preproc_column_transformer.py').read()) # Set feature and target columns. # Set numeric and non-numeric columns. # xcols = xcols - set(['native-country']) # Designate data. # Split data. # Cross-validation. # Apply a transformation for each column. # Use the transformer in a pipeline. | 2.444368 | 2 |
setup.py | BlizardWizard/Easybot | 0 | 6614730 | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='easybot',
version='0.0.6',
download_url='https://github.com/BlizardWizard/easybot/archive/0.0.6.tar.gz',
install_requires=[
'discord',
'asyncio'
],
description='Easy Discord bot library with Python',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/BlizardWizard/Easybot',
author='Blizard_Wizard',
author_email='<EMAIL>',
license='MIT',
packages=['easybot'],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
],
keywords=[
'Discord', 'Python', 'bot', 'easybot', 'easy'
],
zip_safe=False
)
| from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='easybot',
version='0.0.6',
download_url='https://github.com/BlizardWizard/easybot/archive/0.0.6.tar.gz',
install_requires=[
'discord',
'asyncio'
],
description='Easy Discord bot library with Python',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/BlizardWizard/Easybot',
author='Blizard_Wizard',
author_email='<EMAIL>',
license='MIT',
packages=['easybot'],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
],
keywords=[
'Discord', 'Python', 'bot', 'easybot', 'easy'
],
zip_safe=False
)
| none | 1 | 1.364426 | 1 | |
projects/migrations/0001_initial.py | CobwebOrg/cobweb-django | 7 | 6614731 | <filename>projects/migrations/0001_initial.py
# Generated by Django 2.0.5 on 2018-06-04 22:22
import cobweb.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('languages_plus', '0004_auto_20171214_0004'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Claim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
('deleted', models.BooleanField(default=False)),
('has_holding', models.BooleanField(default=False)),
('crawl_scope', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.CrawlScope')),
],
bases=(cobweb.models.CobwebModelMixin, models.Model),
),
migrations.CreateModel(
name='Nomination',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=200, null=True)),
('description', models.TextField(blank=True, null=True)),
('deleted', models.BooleanField(default=False)),
('rationale', models.TextField(blank=True, null=True)),
('suggested_crawl_frequency', models.CharField(blank=True, choices=[('Hourly', 'Hourly'), ('Daily', 'Daily'), ('Weekly', 'Weekly'), ('Monthly', 'Monthly')], max_length=50, null=True)),
('suggested_crawl_end_date', models.DateTimeField(blank=True, null=True)),
('language', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='languages_plus.Language')),
('nominated_by', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
bases=(cobweb.models.CobwebModelMixin, models.Model),
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500, unique=True)),
('description', models.TextField(null=True)),
('nomination_policy', models.CharField(choices=[('Public', "Public: anyone can nominate, even if they're not logged in."), ('Cobweb Users', 'Cobweb Users: anyone with a Cobweb account can nominate.'), ('Restricted', 'Restricted: only selected users and organizations can nominate.')], default='Public', max_length=10)),
('status', models.CharField(choices=[('Open', 'Open for Nomination'), ('Deprecated', 'Deprecated (no further nominations recommended)'), ('Inactive', 'Inactive (closed to nomination)'), ('Deleted', 'Deleted')], default='Active', max_length=8)),
('administrators', models.ManyToManyField(related_name='projects_administered', to=settings.AUTH_USER_MODEL, verbose_name='administrators')),
('nominator_blacklist', models.ManyToManyField(blank=True, related_name='projects_blacklisted', to=settings.AUTH_USER_MODEL)),
('nominator_orgs', models.ManyToManyField(blank=True, related_name='projects_nominating', to='core.Organization')),
('nominators', models.ManyToManyField(blank=True, related_name='projects_nominating', to=settings.AUTH_USER_MODEL)),
('subject_headings', models.ManyToManyField(blank=True, to='core.SubjectHeading')),
('tags', models.ManyToManyField(blank=True, to='core.Tag')),
],
bases=(cobweb.models.CobwebModelMixin, models.Model),
),
migrations.AddField(
model_name='nomination',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nominations', to='projects.Project'),
),
migrations.AddField(
model_name='nomination',
name='resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='nominations', to='core.Resource'),
),
migrations.AddField(
model_name='nomination',
name='subject_headings',
field=models.ManyToManyField(blank=True, to='core.SubjectHeading'),
),
migrations.AddField(
model_name='nomination',
name='tags',
field=models.ManyToManyField(blank=True, to='core.Tag'),
),
]
| <filename>projects/migrations/0001_initial.py
# Generated by Django 2.0.5 on 2018-06-04 22:22
import cobweb.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('languages_plus', '0004_auto_20171214_0004'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Claim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
('deleted', models.BooleanField(default=False)),
('has_holding', models.BooleanField(default=False)),
('crawl_scope', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.CrawlScope')),
],
bases=(cobweb.models.CobwebModelMixin, models.Model),
),
migrations.CreateModel(
name='Nomination',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=200, null=True)),
('description', models.TextField(blank=True, null=True)),
('deleted', models.BooleanField(default=False)),
('rationale', models.TextField(blank=True, null=True)),
('suggested_crawl_frequency', models.CharField(blank=True, choices=[('Hourly', 'Hourly'), ('Daily', 'Daily'), ('Weekly', 'Weekly'), ('Monthly', 'Monthly')], max_length=50, null=True)),
('suggested_crawl_end_date', models.DateTimeField(blank=True, null=True)),
('language', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='languages_plus.Language')),
('nominated_by', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
bases=(cobweb.models.CobwebModelMixin, models.Model),
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500, unique=True)),
('description', models.TextField(null=True)),
('nomination_policy', models.CharField(choices=[('Public', "Public: anyone can nominate, even if they're not logged in."), ('Cobweb Users', 'Cobweb Users: anyone with a Cobweb account can nominate.'), ('Restricted', 'Restricted: only selected users and organizations can nominate.')], default='Public', max_length=10)),
('status', models.CharField(choices=[('Open', 'Open for Nomination'), ('Deprecated', 'Deprecated (no further nominations recommended)'), ('Inactive', 'Inactive (closed to nomination)'), ('Deleted', 'Deleted')], default='Active', max_length=8)),
('administrators', models.ManyToManyField(related_name='projects_administered', to=settings.AUTH_USER_MODEL, verbose_name='administrators')),
('nominator_blacklist', models.ManyToManyField(blank=True, related_name='projects_blacklisted', to=settings.AUTH_USER_MODEL)),
('nominator_orgs', models.ManyToManyField(blank=True, related_name='projects_nominating', to='core.Organization')),
('nominators', models.ManyToManyField(blank=True, related_name='projects_nominating', to=settings.AUTH_USER_MODEL)),
('subject_headings', models.ManyToManyField(blank=True, to='core.SubjectHeading')),
('tags', models.ManyToManyField(blank=True, to='core.Tag')),
],
bases=(cobweb.models.CobwebModelMixin, models.Model),
),
migrations.AddField(
model_name='nomination',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nominations', to='projects.Project'),
),
migrations.AddField(
model_name='nomination',
name='resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='nominations', to='core.Resource'),
),
migrations.AddField(
model_name='nomination',
name='subject_headings',
field=models.ManyToManyField(blank=True, to='core.SubjectHeading'),
),
migrations.AddField(
model_name='nomination',
name='tags',
field=models.ManyToManyField(blank=True, to='core.Tag'),
),
]
| en | 0.720797 | # Generated by Django 2.0.5 on 2018-06-04 22:22 | 1.766299 | 2 |
nisse/models/__init__.py | nexocodecom/nisse.io | 0 | 6614732 | from nisse.models.database import * | from nisse.models.database import * | none | 1 | 1.08705 | 1 | |
pages.py | sloria/ROSIEBot | 1 | 6614733 | <reponame>sloria/ROSIEBot
""" The page superclass and subclasses for verifier"""
from bs4 import BeautifulSoup
from settings import base_urls
import os
MIRROR = 'archive/'
# Superclass for page-specific page instances
class Page:
def __init__(self, url):
self.url = url
self.path = self.get_path_from_url(url)
# Set size attribute in KB, inherently checks if file exists
try:
self.file_size = os.path.getsize(self.path) / 1000
except FileNotFoundError:
raise FileNotFoundError
def __str__(self):
return self.path
# Takes a URL and produces its relative file name.
def get_path_from_url(self, url):
# Remove http://domain
tail = url.replace(base_urls[0], '') + 'index.html'
path = MIRROR + tail
return path
def get_content(self):
soup = BeautifulSoup(open(self.path), 'html.parser')
return soup
# Page-specific subclasses
class ProjectDashboardPage(Page):
def __init__(self, url):
super().__init__(url)
class ProjectFilesPage(Page):
def __init__(self, url):
super().__init__(url)
class ProjectWikiPage(Page):
def __init__(self, url):
super().__init__(url)
class ProjectAnalyticsPage(Page):
def __init__(self, url):
super().__init__(url)
class ProjectRegistrationsPage(Page):
def __init__(self, url):
super().__init__(url)
class ProjectForksPage(Page):
def __init__(self, url):
super().__init__(url)
class RegistrationDashboardPage(Page):
def __init__(self, url):
super().__init__(url)
class RegistrationFilesPage(Page):
def __init__(self, url):
super().__init__(url)
class RegistrationWikiPage(Page):
def __init__(self, url):
super().__init__(url)
class RegistrationAnalyticsPage(Page):
def __init__(self, url):
super().__init__(url)
class RegistrationForksPage(Page):
def __init__(self, url):
super().__init__(url)
class UserProfilePage(Page):
def __init__(self, url):
super().__init__(url)
class InstitutionDashboardPage(Page):
def __init__(self, url):
super().__init__(url)
| """ The page superclass and subclasses for verifier"""
from bs4 import BeautifulSoup
from settings import base_urls
import os
MIRROR = 'archive/'
# Superclass for page-specific page instances
class Page:
def __init__(self, url):
self.url = url
self.path = self.get_path_from_url(url)
# Set size attribute in KB, inherently checks if file exists
try:
self.file_size = os.path.getsize(self.path) / 1000
except FileNotFoundError:
raise FileNotFoundError
def __str__(self):
return self.path
# Takes a URL and produces its relative file name.
def get_path_from_url(self, url):
# Remove http://domain
tail = url.replace(base_urls[0], '') + 'index.html'
path = MIRROR + tail
return path
def get_content(self):
soup = BeautifulSoup(open(self.path), 'html.parser')
return soup
# Page-specific subclasses
class ProjectDashboardPage(Page):
def __init__(self, url):
super().__init__(url)
class ProjectFilesPage(Page):
def __init__(self, url):
super().__init__(url)
class ProjectWikiPage(Page):
def __init__(self, url):
super().__init__(url)
class ProjectAnalyticsPage(Page):
def __init__(self, url):
super().__init__(url)
class ProjectRegistrationsPage(Page):
def __init__(self, url):
super().__init__(url)
class ProjectForksPage(Page):
def __init__(self, url):
super().__init__(url)
class RegistrationDashboardPage(Page):
def __init__(self, url):
super().__init__(url)
class RegistrationFilesPage(Page):
def __init__(self, url):
super().__init__(url)
class RegistrationWikiPage(Page):
def __init__(self, url):
super().__init__(url)
class RegistrationAnalyticsPage(Page):
def __init__(self, url):
super().__init__(url)
class RegistrationForksPage(Page):
def __init__(self, url):
super().__init__(url)
class UserProfilePage(Page):
def __init__(self, url):
super().__init__(url)
class InstitutionDashboardPage(Page):
def __init__(self, url):
super().__init__(url) | en | 0.684271 | The page superclass and subclasses for verifier # Superclass for page-specific page instances # Set size attribute in KB, inherently checks if file exists # Takes a URL and produces its relative file name. # Remove http://domain # Page-specific subclasses | 2.879379 | 3 |
trx/filters.py | cmariette/trx | 1 | 6614734 | # -*- coding: utf-8 -*-
"""
module that contains filters and outliers removal procedures
most of them return the data array and a dictionary with additional info
(parameters, statistics, etc)
"""
from __future__ import print_function,division,absolute_import
from . import utils
import copy
import logging
import statsmodels.robust
log = logging.getLogger(__name__) # __name__ is "foo.bar" here
import numpy as np
np.seterr(all='ignore')
def applyFilter(data,boolArray):
for key in data.keys():
if isinstance(data[key],np.ndarray) and \
(data[key].shape[0]==boolArray.shape[0]):
data[key] = data[key][boolArray]
elif isinstance(data[key],dict) and key != 'orig':
data[key]=applyFilter(data[key],boolArray)
return data
def applyFilters(data,funcForAveraging=np.nanmean):
# make copy in this way tr1 = trx.filters.applyFilters(tr) does not modity tr
data = copy.deepcopy(data)
if not "filters" in data: return data
if not "unfiltered" in data: data.unfiltered = \
dict( diffs_in_scan = data.diffs_in_scan,
chi2_0=data.chi2_0,
diff=data.diffs )
data.diffs_in_scan = data.unfiltered.diffs_in_scan
filters = data.filters.keys()
for filt_name in filters:
filt = data.filters[filt_name]
# understand what kind of filter (q-by-q or for every image)
if filt[0].ndim == 1:
for nscan in range(len(data.diffs_in_scan)):
data.diffs_in_scan[nscan] = data.diffs_in_scan[nscan][~filt[nscan]]
data.diffs[nscan] = funcForAveraging( data.diffs_in_scan[nscan],axis=0)
elif filt[0].ndim == 2: # q-by-q kind of filter
for nscan in range(len(data.diffs_in_scan)):
data.diffs_in_scan[nscan][~filt[nscan]] = np.nan
data.diffs[nscan] = funcForAveraging( data.diffs_in_scan[nscan],axis=0)
data.diffs_plus_ref = data.diffs+data.ref_average
return data
def removeZingers(curves,errs=None,norm='auto',threshold=10,useDerivative=False):
""" curves will be normalized internally
if errs is None, calculate mad based noise
useDerivative for data with trends ..
"""
# normalize
if norm == 'auto':
norm = np.nanmean(curves,axis=1)
norm = utils.reshapeToBroadcast(norm,curves)
if useDerivative:
data = np.gradient(curves/norn,axis=0)
else:
data = curves/norm
median = np.median(data,axis=0)
# calculate or normalize error
if errs is None:
errs = statsmodels.robust.mad(data,axis=0)
else:
errs = errs/norm
diff = np.abs(data-median)/errs
idx = diff > threshold
log.debug("Removed %d zingers from %d curves"%(idx.sum(),len(curves)))
print("Removed %d zingers from %d curves"%(idx.sum(),len(curves)))
if idx.sum()>0:
curves[idx]=np.nan
#curves = np.ma.MaskedArray(data=curves,mask=idx)
return curves
def filterOutlier(curves,errs=None,norm=None,threshold=10):
# normalize
if norm == 'auto':
norm = np.nanmean(curves,axis=1)
norm = utils.reshapeToBroadcast(n,curves)
elif norm is None:
norm = 1
curves = curves/norm
if errs is None:
errs = statsmodels.robust.mad(curves,axis=0)
else:
errs = errs/norm
median = np.median(curves)
diff = np.abs(curves-median)/errs
chi2 = np.sum(diff**2)/len(curves)
idx = chi2 < threshold
return curves[idx]
def chi2Filter(data,threshold='auto'):
""" Contrary to removeZingers, this removes entire curves """
if threshold == "auto":
threshold=np.percentile(np.concatenate(data.chi2_0),95)
idx_mask = []
for iscan in range(len(data.diffs_in_scan)):
idx = data.chi2_0[iscan] > threshold
# expand along other axis (q ...)
#idx = utils.reshapeToBroadcast(idx,data.diffsInScanPoint[iscan])
idx_mask.append(idx)
log.info("Chi2 mask, scanpoint: %s, curves filtereout out %d/%d (%.2f%%)"%\
(data.scan[iscan],idx.sum(),len(idx),idx.sum()/len(idx)*100) )
if "filters" not in data: data.filters = dict()
if "filters_pars" not in data: data.filters_pars = dict()
data.filters.chi2 = idx_mask
data.filters_pars.chi2_threshold = threshold
return data
| # -*- coding: utf-8 -*-
"""
module that contains filters and outliers removal procedures
most of them return the data array and a dictionary with additional info
(parameters, statistics, etc)
"""
from __future__ import print_function,division,absolute_import
from . import utils
import copy
import logging
import statsmodels.robust
log = logging.getLogger(__name__) # __name__ is "foo.bar" here
import numpy as np
np.seterr(all='ignore')
def applyFilter(data,boolArray):
for key in data.keys():
if isinstance(data[key],np.ndarray) and \
(data[key].shape[0]==boolArray.shape[0]):
data[key] = data[key][boolArray]
elif isinstance(data[key],dict) and key != 'orig':
data[key]=applyFilter(data[key],boolArray)
return data
def applyFilters(data,funcForAveraging=np.nanmean):
# make copy in this way tr1 = trx.filters.applyFilters(tr) does not modity tr
data = copy.deepcopy(data)
if not "filters" in data: return data
if not "unfiltered" in data: data.unfiltered = \
dict( diffs_in_scan = data.diffs_in_scan,
chi2_0=data.chi2_0,
diff=data.diffs )
data.diffs_in_scan = data.unfiltered.diffs_in_scan
filters = data.filters.keys()
for filt_name in filters:
filt = data.filters[filt_name]
# understand what kind of filter (q-by-q or for every image)
if filt[0].ndim == 1:
for nscan in range(len(data.diffs_in_scan)):
data.diffs_in_scan[nscan] = data.diffs_in_scan[nscan][~filt[nscan]]
data.diffs[nscan] = funcForAveraging( data.diffs_in_scan[nscan],axis=0)
elif filt[0].ndim == 2: # q-by-q kind of filter
for nscan in range(len(data.diffs_in_scan)):
data.diffs_in_scan[nscan][~filt[nscan]] = np.nan
data.diffs[nscan] = funcForAveraging( data.diffs_in_scan[nscan],axis=0)
data.diffs_plus_ref = data.diffs+data.ref_average
return data
def removeZingers(curves,errs=None,norm='auto',threshold=10,useDerivative=False):
""" curves will be normalized internally
if errs is None, calculate mad based noise
useDerivative for data with trends ..
"""
# normalize
if norm == 'auto':
norm = np.nanmean(curves,axis=1)
norm = utils.reshapeToBroadcast(norm,curves)
if useDerivative:
data = np.gradient(curves/norn,axis=0)
else:
data = curves/norm
median = np.median(data,axis=0)
# calculate or normalize error
if errs is None:
errs = statsmodels.robust.mad(data,axis=0)
else:
errs = errs/norm
diff = np.abs(data-median)/errs
idx = diff > threshold
log.debug("Removed %d zingers from %d curves"%(idx.sum(),len(curves)))
print("Removed %d zingers from %d curves"%(idx.sum(),len(curves)))
if idx.sum()>0:
curves[idx]=np.nan
#curves = np.ma.MaskedArray(data=curves,mask=idx)
return curves
def filterOutlier(curves,errs=None,norm=None,threshold=10):
# normalize
if norm == 'auto':
norm = np.nanmean(curves,axis=1)
norm = utils.reshapeToBroadcast(n,curves)
elif norm is None:
norm = 1
curves = curves/norm
if errs is None:
errs = statsmodels.robust.mad(curves,axis=0)
else:
errs = errs/norm
median = np.median(curves)
diff = np.abs(curves-median)/errs
chi2 = np.sum(diff**2)/len(curves)
idx = chi2 < threshold
return curves[idx]
def chi2Filter(data,threshold='auto'):
""" Contrary to removeZingers, this removes entire curves """
if threshold == "auto":
threshold=np.percentile(np.concatenate(data.chi2_0),95)
idx_mask = []
for iscan in range(len(data.diffs_in_scan)):
idx = data.chi2_0[iscan] > threshold
# expand along other axis (q ...)
#idx = utils.reshapeToBroadcast(idx,data.diffsInScanPoint[iscan])
idx_mask.append(idx)
log.info("Chi2 mask, scanpoint: %s, curves filtereout out %d/%d (%.2f%%)"%\
(data.scan[iscan],idx.sum(),len(idx),idx.sum()/len(idx)*100) )
if "filters" not in data: data.filters = dict()
if "filters_pars" not in data: data.filters_pars = dict()
data.filters.chi2 = idx_mask
data.filters_pars.chi2_threshold = threshold
return data
| en | 0.739067 | # -*- coding: utf-8 -*- module that contains filters and outliers removal procedures most of them return the data array and a dictionary with additional info (parameters, statistics, etc) # __name__ is "foo.bar" here # make copy in this way tr1 = trx.filters.applyFilters(tr) does not modity tr # understand what kind of filter (q-by-q or for every image) # q-by-q kind of filter curves will be normalized internally if errs is None, calculate mad based noise useDerivative for data with trends .. # normalize # calculate or normalize error #curves = np.ma.MaskedArray(data=curves,mask=idx) # normalize Contrary to removeZingers, this removes entire curves # expand along other axis (q ...) #idx = utils.reshapeToBroadcast(idx,data.diffsInScanPoint[iscan]) | 2.709489 | 3 |
Python3/126.py | rakhi2001/ecom7 | 854 | 6614735 | __________________________________________________________________________________________________
sample 80 ms submission
#https://leetcode.com/problems/word-ladder-ii/discuss/40482/Python-simple-BFS-layer-by-layer
class Solution:
def findLadders(self, beginWord, endWord, wordList):
'''wordList = set(wordList)
res = []
layer = {}
layer[beginWord] = [[beginWord]]
while layer:
newlayer = collections.defaultdict(list)
for w in layer:
if w == endWord:
res.extend(k for k in layer[w])
else:
for i in range(len(w)):
for c in 'abcdefghijklmnopqrstuvwxyz':
neww = w[:i]+c+w[i+1:]
if neww in wordList:
newlayer[neww]+=[j+[neww] for j in layer[w]]
wordList -= set(newlayer.keys())
layer = newlayer
return res'''
'''if endWord not in wordList or not endWord or not beginWord:
return []
wordList = set(wordList)
forward, backward = {beginWord}, {endWord}
direction = 1
parents = collections.defaultdict(set)
while forward and backward:
if len(forward) > len(backward):
forward, backward = backward, forward
direction *= -1
next_forward = set()
wordList -= forward
for word in forward:
for i in range(len(word)):
first, second = word[:i], word[i+1:]
for ch in string.ascii_lowercase:
combined_word = first + ch + second
if combined_word in wordList:
next_forward.add(combined_word)
if direction == 1:
parents[combined_word].add(word)
else:
parents[word].add(combined_word)
if next_forward & backward:
self.res = []
path = [endWord]
self.dfs(parents, endWord, beginWord,path)
return self.res
forward = next_forward
return []'''
if endWord not in wordList or not beginWord or not endWord:
return []
wordList = set(wordList)
parents = collections.defaultdict(set)
forward, backward = {beginWord}, {endWord}
direction = 1
while forward and backward:
if len(forward) > len(backward):
forward,backward = backward, forward
direction *= -1
nextForward = set()
wordList -= forward
for w in forward:
for i in range(len(w)):
first, second = w[:i], w[i+1:]
for ch in string.ascii_lowercase:
combinedWord = first + ch + second
if combinedWord in wordList:
nextForward.add(combinedWord)
if direction == 1:
parents[combinedWord].add(w)
else:
parents[w].add(combinedWord)
forward = nextForward
if nextForward & backward:
self.res = []
path = [endWord]
self.dfs(parents, endWord, beginWord, path)
return self.res
return []
def dfs(self,parents,cur_w,beginWord,path):
if cur_w == beginWord:
self.res.append(path[::-1])
return
for eword in parents[cur_w]:
path.append(eword)
self.dfs(parents,eword,beginWord,path)
path.pop()
__________________________________________________________________________________________________
sample 14200 kb submission
class Solution:
def findLadders(self,beginWord, endWord, wordList):
tree, words, n = collections.defaultdict(set), set(wordList), len(beginWord)
if endWord not in wordList: return []
found, bq, eq, nq, rev = False, {beginWord}, {endWord}, set(), False
while bq and not found:
words -= set(bq)
for x in bq:
for y in [x[:i] + c + x[i + 1:] for i in range(n) for c in 'abcdefghijklmnopqrstuvwxyz']:
if y in words:
if y in eq: found = True
else: nq.add(y)
tree[y].add(x) if rev else tree[x].add(y)
bq, nq = nq, set()
if len(bq) > len(eq): bq, eq, rev = eq, bq, not rev
def bt(x): return [[x]] if x == endWord else [[x] + rest for y in tree[x] for rest in bt(y)]
return bt(beginWord)
__________________________________________________________________________________________________
| __________________________________________________________________________________________________
sample 80 ms submission
#https://leetcode.com/problems/word-ladder-ii/discuss/40482/Python-simple-BFS-layer-by-layer
class Solution:
def findLadders(self, beginWord, endWord, wordList):
'''wordList = set(wordList)
res = []
layer = {}
layer[beginWord] = [[beginWord]]
while layer:
newlayer = collections.defaultdict(list)
for w in layer:
if w == endWord:
res.extend(k for k in layer[w])
else:
for i in range(len(w)):
for c in 'abcdefghijklmnopqrstuvwxyz':
neww = w[:i]+c+w[i+1:]
if neww in wordList:
newlayer[neww]+=[j+[neww] for j in layer[w]]
wordList -= set(newlayer.keys())
layer = newlayer
return res'''
'''if endWord not in wordList or not endWord or not beginWord:
return []
wordList = set(wordList)
forward, backward = {beginWord}, {endWord}
direction = 1
parents = collections.defaultdict(set)
while forward and backward:
if len(forward) > len(backward):
forward, backward = backward, forward
direction *= -1
next_forward = set()
wordList -= forward
for word in forward:
for i in range(len(word)):
first, second = word[:i], word[i+1:]
for ch in string.ascii_lowercase:
combined_word = first + ch + second
if combined_word in wordList:
next_forward.add(combined_word)
if direction == 1:
parents[combined_word].add(word)
else:
parents[word].add(combined_word)
if next_forward & backward:
self.res = []
path = [endWord]
self.dfs(parents, endWord, beginWord,path)
return self.res
forward = next_forward
return []'''
if endWord not in wordList or not beginWord or not endWord:
return []
wordList = set(wordList)
parents = collections.defaultdict(set)
forward, backward = {beginWord}, {endWord}
direction = 1
while forward and backward:
if len(forward) > len(backward):
forward,backward = backward, forward
direction *= -1
nextForward = set()
wordList -= forward
for w in forward:
for i in range(len(w)):
first, second = w[:i], w[i+1:]
for ch in string.ascii_lowercase:
combinedWord = first + ch + second
if combinedWord in wordList:
nextForward.add(combinedWord)
if direction == 1:
parents[combinedWord].add(w)
else:
parents[w].add(combinedWord)
forward = nextForward
if nextForward & backward:
self.res = []
path = [endWord]
self.dfs(parents, endWord, beginWord, path)
return self.res
return []
def dfs(self,parents,cur_w,beginWord,path):
if cur_w == beginWord:
self.res.append(path[::-1])
return
for eword in parents[cur_w]:
path.append(eword)
self.dfs(parents,eword,beginWord,path)
path.pop()
__________________________________________________________________________________________________
sample 14200 kb submission
class Solution:
def findLadders(self,beginWord, endWord, wordList):
tree, words, n = collections.defaultdict(set), set(wordList), len(beginWord)
if endWord not in wordList: return []
found, bq, eq, nq, rev = False, {beginWord}, {endWord}, set(), False
while bq and not found:
words -= set(bq)
for x in bq:
for y in [x[:i] + c + x[i + 1:] for i in range(n) for c in 'abcdefghijklmnopqrstuvwxyz']:
if y in words:
if y in eq: found = True
else: nq.add(y)
tree[y].add(x) if rev else tree[x].add(y)
bq, nq = nq, set()
if len(bq) > len(eq): bq, eq, rev = eq, bq, not rev
def bt(x): return [[x]] if x == endWord else [[x] + rest for y in tree[x] for rest in bt(y)]
return bt(beginWord)
__________________________________________________________________________________________________
| en | 0.5458 | #https://leetcode.com/problems/word-ladder-ii/discuss/40482/Python-simple-BFS-layer-by-layer wordList = set(wordList) res = [] layer = {} layer[beginWord] = [[beginWord]] while layer: newlayer = collections.defaultdict(list) for w in layer: if w == endWord: res.extend(k for k in layer[w]) else: for i in range(len(w)): for c in 'abcdefghijklmnopqrstuvwxyz': neww = w[:i]+c+w[i+1:] if neww in wordList: newlayer[neww]+=[j+[neww] for j in layer[w]] wordList -= set(newlayer.keys()) layer = newlayer return res if endWord not in wordList or not endWord or not beginWord: return [] wordList = set(wordList) forward, backward = {beginWord}, {endWord} direction = 1 parents = collections.defaultdict(set) while forward and backward: if len(forward) > len(backward): forward, backward = backward, forward direction *= -1 next_forward = set() wordList -= forward for word in forward: for i in range(len(word)): first, second = word[:i], word[i+1:] for ch in string.ascii_lowercase: combined_word = first + ch + second if combined_word in wordList: next_forward.add(combined_word) if direction == 1: parents[combined_word].add(word) else: parents[word].add(combined_word) if next_forward & backward: self.res = [] path = [endWord] self.dfs(parents, endWord, beginWord,path) return self.res forward = next_forward return [] | 3.677281 | 4 |
processing/src/utils.py | Smart-AniMon/server | 0 | 6614736 | <reponame>Smart-AniMon/server<filename>processing/src/utils.py
from enum import Enum
import binascii
class ReturnCodesMQTT():
MESSAGES = {
"0" : "Connection successful",
"1" : "Connection refused – incorrect protocol version",
"2" : "Connection refused – invalid client identifier",
"3" : "Connection refused – server unavailable",
"4" : "Connection refused – bad username or password",
"5" : "Connection refused – not authorised",
"6" : "Currently unused"
}
@classmethod
def get_message(cls, code_rc):
if code_rc > 5:
code_rc = 6
return cls.MESSAGES[str(code_rc)]
def str64_to_bytes(image_base64: str) -> bytes:
image_base64_bytes = image_base64.encode('utf-8') # string to bytes code base64
image_bytes = binascii.a2b_base64(image_base64_bytes) # decode base64
return image_bytes
def get_name(c : object) -> str:
return c.__module__+'.'+c.__class__.__name__
def check_labels(label: str, labels: list, strict_compare=False) -> bool:
for description in labels:
if strict_compare:
if description.upper() == label:
return True
elif description.upper() in label:
return True
return False
| from enum import Enum
import binascii
class ReturnCodesMQTT():
MESSAGES = {
"0" : "Connection successful",
"1" : "Connection refused – incorrect protocol version",
"2" : "Connection refused – invalid client identifier",
"3" : "Connection refused – server unavailable",
"4" : "Connection refused – bad username or password",
"5" : "Connection refused – not authorised",
"6" : "Currently unused"
}
@classmethod
def get_message(cls, code_rc):
if code_rc > 5:
code_rc = 6
return cls.MESSAGES[str(code_rc)]
def str64_to_bytes(image_base64: str) -> bytes:
image_base64_bytes = image_base64.encode('utf-8') # string to bytes code base64
image_bytes = binascii.a2b_base64(image_base64_bytes) # decode base64
return image_bytes
def get_name(c : object) -> str:
return c.__module__+'.'+c.__class__.__name__
def check_labels(label: str, labels: list, strict_compare=False) -> bool:
for description in labels:
if strict_compare:
if description.upper() == label:
return True
elif description.upper() in label:
return True
return False | en | 0.584152 | # string to bytes code base64 # decode base64 | 2.911484 | 3 |
tools/augment_historic_EUROSTAT.py | WOIDMO/WoMo-FrontEnd-V1 | 0 | 6614737 | <gh_stars>0
#
#Augment EUROSTAT historic stats with data from mortality.org
#
import pandas as pd
from datetime import date, timedelta
import datetime
import numpy as np
#Dictionaries and stuff
ccodes = ['AT', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'HU', 'IS', 'IT', 'LI', 'LT', 'LU', 'LV', 'ME', 'NO', 'PT', 'RS', 'SE', 'SI', 'SK', 'UK', ]
ccodes_trans = {
'AUT': 'Austria',
'BEL': 'Belgium',
'BGR': 'Bulgaria',
'CH': 'Switzerland ',
'CZE': 'Czechia',
'DEUTNP': 'Germany',
'DNK': 'Denmark',
'EST': 'Estonia',
'ESP': 'Spain',
'FIN': 'Finland ',
'FRATNP': 'France',
'HUN': 'Hungary',
'ISL': 'Iceland',
'ITA': 'Italy',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LUX': 'Luxembourg',
'LV': 'Latvia',
'ME': 'Montenegro ',
'NLD': 'Netherlands',
'NOR': 'Norway',
'PRT': 'Portugal',
'RS': 'Serbia',
'SWE': 'Sweden',
'SI': 'Slovenia',
'SVK': 'Slovakia',
'GBRTENW': 'England',
'GBR_SCO': 'Scotland',
'USA': 'United States'
}
def get_start_end_dates(year, week):
d = date(year, 1, 1)
if (d.weekday() <= 3):
d = d - timedelta(d.weekday())
else:
d = d + timedelta(7 - d.weekday())
dlt = timedelta(days=(week - 1) * 7)
return d + dlt, d + dlt + timedelta(days=6)
#Vars
start_year = 2010
end_year = 2019
weeks = 52
#Load the historic Eurostat file
eurostat_historic_df = pd.read_csv("../data/EUROSTAT_historic.csv")
#Load the mortality.org file and clean up, Drop the US and everything below 2010
mort_org_df = pd.read_csv("../data/historic-augment/stmf.csv")
mort_org_df.drop(mort_org_df[mort_org_df.CountryCode == 'USA'].index, inplace=True)
mort_org_df.drop(mort_org_df[mort_org_df.Sex == 'f'].index, inplace=True)
mort_org_df.drop(mort_org_df[mort_org_df.Sex == 'm'].index, inplace=True)
mort_org_df.drop(mort_org_df[mort_org_df.Year < start_year].index, inplace=True)
mort_org_df.drop(mort_org_df[mort_org_df.Year > end_year].index, inplace=True)
mort_org_df.reset_index(drop=True, inplace=True)
#Translate the country codes to actual names
totalrows = mort_org_df.shape[0] - 1
row = 0
while (row <= totalrows):
# Process the dataframe here
ccode = mort_org_df.at[row, 'CountryCode'] # Get the corresponding country name from the abr
country = ccodes_trans[ccode]
#print(country)
mort_org_df.loc[row,'jurisdiction'] = country
row +=1
#
# Update United Kingdom, needs combination of Scotland and England
#Combine England and Scotland into United Kingdom
df_scotland = mort_org_df[mort_org_df.jurisdiction == 'Scotland']
df_scotland.reset_index(drop=True, inplace=True)
df_england = mort_org_df[mort_org_df.jurisdiction == 'England']
df_england.reset_index(drop=True, inplace=True)
df_england['natural_cause'] = df_england['DTotal'] + df_scotland['DTotal']
#Add the two entities and append as United Kingdom
print('Processing UK ---------------------')
totalrows = df_england.shape[0] - 1
row = 0
jurisdiction = 'United Kingdom'
while (row <= totalrows):
row_year = df_england['Year'][row]
row_week = df_england['Week'][row]
condition = ((eurostat_historic_df.jurisdiction == jurisdiction) &
(eurostat_historic_df.year == row_year) &
(eurostat_historic_df.week == row_week) &
(eurostat_historic_df.natural_cause > 0)
)
if (condition.any() == False):
query = "jurisdiction == '" + jurisdiction + "' & year == " + str(row_year) + " & week == " + str(row_week)
index_EU = eurostat_historic_df.query(query).index[0]
print("INSERT " + str(row_year) +" "+ str(row_week) +" "+ str(condition.any()) +" "+ str(index_EU))
eurostat_historic_df['natural_cause'][index_EU] = df_england['natural_cause'][row]
row += 1
#
#Update all data from mort_org
#Drop eveything related to UK in mort_org
mort_org_df.drop(mort_org_df[mort_org_df.CountryCode == 'GBRTENW'].index, inplace=True)
mort_org_df.drop(mort_org_df[mort_org_df.CountryCode == 'GBR_SCO'].index, inplace=True)
mort_org_df.reset_index(drop=True, inplace=True)
print('Processing EU -------------------------')
for index, row in mort_org_df.iterrows():
print(row['jurisdiction'])
jurisdiction = row['jurisdiction']
row_year = row['Year']
row_week = row['Week']
row_natural_cause = row['DTotal']
condition = ((eurostat_historic_df.jurisdiction == jurisdiction) &
(eurostat_historic_df.year == row_year) &
(eurostat_historic_df.week == row_week) &
(eurostat_historic_df.natural_cause > 0)
)
if (condition.any() == False):
query = "jurisdiction == '" + jurisdiction + "' & year == " + str(row_year) + " & week == " + str(row_week)
index_EU = eurostat_historic_df.query(query).index[0]
print("INSERT " + str(row_year) + " " + str(row_week) + " " + str(condition.any()) + " " + str(index_EU))
eurostat_historic_df['natural_cause'][index_EU] = row_natural_cause
eurostat_historic_df.to_csv (r'../data/EUROSTAT_historic.csv', header=True, index=False) | #
#Augment EUROSTAT historic stats with data from mortality.org
#
import pandas as pd
from datetime import date, timedelta
import datetime
import numpy as np
#Dictionaries and stuff
ccodes = ['AT', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'HU', 'IS', 'IT', 'LI', 'LT', 'LU', 'LV', 'ME', 'NO', 'PT', 'RS', 'SE', 'SI', 'SK', 'UK', ]
ccodes_trans = {
'AUT': 'Austria',
'BEL': 'Belgium',
'BGR': 'Bulgaria',
'CH': 'Switzerland ',
'CZE': 'Czechia',
'DEUTNP': 'Germany',
'DNK': 'Denmark',
'EST': 'Estonia',
'ESP': 'Spain',
'FIN': 'Finland ',
'FRATNP': 'France',
'HUN': 'Hungary',
'ISL': 'Iceland',
'ITA': 'Italy',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LUX': 'Luxembourg',
'LV': 'Latvia',
'ME': 'Montenegro ',
'NLD': 'Netherlands',
'NOR': 'Norway',
'PRT': 'Portugal',
'RS': 'Serbia',
'SWE': 'Sweden',
'SI': 'Slovenia',
'SVK': 'Slovakia',
'GBRTENW': 'England',
'GBR_SCO': 'Scotland',
'USA': 'United States'
}
def get_start_end_dates(year, week):
d = date(year, 1, 1)
if (d.weekday() <= 3):
d = d - timedelta(d.weekday())
else:
d = d + timedelta(7 - d.weekday())
dlt = timedelta(days=(week - 1) * 7)
return d + dlt, d + dlt + timedelta(days=6)
#Vars
start_year = 2010
end_year = 2019
weeks = 52
#Load the historic Eurostat file
eurostat_historic_df = pd.read_csv("../data/EUROSTAT_historic.csv")
#Load the mortality.org file and clean up, Drop the US and everything below 2010
mort_org_df = pd.read_csv("../data/historic-augment/stmf.csv")
mort_org_df.drop(mort_org_df[mort_org_df.CountryCode == 'USA'].index, inplace=True)
mort_org_df.drop(mort_org_df[mort_org_df.Sex == 'f'].index, inplace=True)
mort_org_df.drop(mort_org_df[mort_org_df.Sex == 'm'].index, inplace=True)
mort_org_df.drop(mort_org_df[mort_org_df.Year < start_year].index, inplace=True)
mort_org_df.drop(mort_org_df[mort_org_df.Year > end_year].index, inplace=True)
mort_org_df.reset_index(drop=True, inplace=True)
#Translate the country codes to actual names
totalrows = mort_org_df.shape[0] - 1
row = 0
while (row <= totalrows):
# Process the dataframe here
ccode = mort_org_df.at[row, 'CountryCode'] # Get the corresponding country name from the abr
country = ccodes_trans[ccode]
#print(country)
mort_org_df.loc[row,'jurisdiction'] = country
row +=1
#
# Update United Kingdom, needs combination of Scotland and England
#Combine England and Scotland into United Kingdom
df_scotland = mort_org_df[mort_org_df.jurisdiction == 'Scotland']
df_scotland.reset_index(drop=True, inplace=True)
df_england = mort_org_df[mort_org_df.jurisdiction == 'England']
df_england.reset_index(drop=True, inplace=True)
df_england['natural_cause'] = df_england['DTotal'] + df_scotland['DTotal']
#Add the two entities and append as United Kingdom
print('Processing UK ---------------------')
totalrows = df_england.shape[0] - 1
row = 0
jurisdiction = 'United Kingdom'
while (row <= totalrows):
row_year = df_england['Year'][row]
row_week = df_england['Week'][row]
condition = ((eurostat_historic_df.jurisdiction == jurisdiction) &
(eurostat_historic_df.year == row_year) &
(eurostat_historic_df.week == row_week) &
(eurostat_historic_df.natural_cause > 0)
)
if (condition.any() == False):
query = "jurisdiction == '" + jurisdiction + "' & year == " + str(row_year) + " & week == " + str(row_week)
index_EU = eurostat_historic_df.query(query).index[0]
print("INSERT " + str(row_year) +" "+ str(row_week) +" "+ str(condition.any()) +" "+ str(index_EU))
eurostat_historic_df['natural_cause'][index_EU] = df_england['natural_cause'][row]
row += 1
#
#Update all data from mort_org
#Drop eveything related to UK in mort_org
mort_org_df.drop(mort_org_df[mort_org_df.CountryCode == 'GBRTENW'].index, inplace=True)
mort_org_df.drop(mort_org_df[mort_org_df.CountryCode == 'GBR_SCO'].index, inplace=True)
mort_org_df.reset_index(drop=True, inplace=True)
print('Processing EU -------------------------')
for index, row in mort_org_df.iterrows():
print(row['jurisdiction'])
jurisdiction = row['jurisdiction']
row_year = row['Year']
row_week = row['Week']
row_natural_cause = row['DTotal']
condition = ((eurostat_historic_df.jurisdiction == jurisdiction) &
(eurostat_historic_df.year == row_year) &
(eurostat_historic_df.week == row_week) &
(eurostat_historic_df.natural_cause > 0)
)
if (condition.any() == False):
query = "jurisdiction == '" + jurisdiction + "' & year == " + str(row_year) + " & week == " + str(row_week)
index_EU = eurostat_historic_df.query(query).index[0]
print("INSERT " + str(row_year) + " " + str(row_week) + " " + str(condition.any()) + " " + str(index_EU))
eurostat_historic_df['natural_cause'][index_EU] = row_natural_cause
eurostat_historic_df.to_csv (r'../data/EUROSTAT_historic.csv', header=True, index=False) | en | 0.764892 | # #Augment EUROSTAT historic stats with data from mortality.org # #Dictionaries and stuff #Vars #Load the historic Eurostat file #Load the mortality.org file and clean up, Drop the US and everything below 2010 #Translate the country codes to actual names # Process the dataframe here # Get the corresponding country name from the abr #print(country) # # Update United Kingdom, needs combination of Scotland and England #Combine England and Scotland into United Kingdom #Add the two entities and append as United Kingdom # #Update all data from mort_org #Drop eveything related to UK in mort_org | 2.329056 | 2 |
majestic-monolith-django/shipping/urls.py | kokospapa8/majestic-monolith-django | 1 | 6614738 | from django.urls import path
from rest_framework.routers import DefaultRouter
from .views import (
ShippingTransportViewSet,
ShippingBatchViewSet,
ShippingItemViewSet,
TransportBatchesView,
TransportBatchesAddView,
TransportStartView,
TransportCompleteView,
BatchShippingitemsView,
BatchShippingitemsAddView
)
router_shippingitem = DefaultRouter()
router_shippingitem.register(
r'shippingitems', ShippingItemViewSet, basename='shippingitem')
router_batch = DefaultRouter()
router_batch.register(r'batches', ShippingBatchViewSet, basename='batch')
router_transport = DefaultRouter()
router_transport.register(r'transports', ShippingTransportViewSet, basename='transport')
urlpatterns = [
# transport
path("transports/<uuid:uuid>/batches/",
TransportBatchesView.as_view(), name="transport_batches"),
path("transports/<uuid:uuid>/add/",
TransportBatchesAddView.as_view(), name="transport_batches_add"),
path("transports/<uuid:uuid>/start/",
TransportStartView.as_view(), name="transport_batches_start"),
path("transports/<uuid:uuid>/complete/",
TransportCompleteView.as_view(), name="transport_batches_complete"),
# batches
path("batches/<str:alias>/shippingitems/",
BatchShippingitemsView.as_view(), name="batch_shippingitems"),
path("batches/<str:alias>/add/", BatchShippingitemsAddView.as_view(),
name="batch_shippingitem_add"),
]
urlpatterns += router_shippingitem.urls
urlpatterns += router_batch.urls
urlpatterns += router_transport.urls
| from django.urls import path
from rest_framework.routers import DefaultRouter
from .views import (
ShippingTransportViewSet,
ShippingBatchViewSet,
ShippingItemViewSet,
TransportBatchesView,
TransportBatchesAddView,
TransportStartView,
TransportCompleteView,
BatchShippingitemsView,
BatchShippingitemsAddView
)
router_shippingitem = DefaultRouter()
router_shippingitem.register(
r'shippingitems', ShippingItemViewSet, basename='shippingitem')
router_batch = DefaultRouter()
router_batch.register(r'batches', ShippingBatchViewSet, basename='batch')
router_transport = DefaultRouter()
router_transport.register(r'transports', ShippingTransportViewSet, basename='transport')
urlpatterns = [
# transport
path("transports/<uuid:uuid>/batches/",
TransportBatchesView.as_view(), name="transport_batches"),
path("transports/<uuid:uuid>/add/",
TransportBatchesAddView.as_view(), name="transport_batches_add"),
path("transports/<uuid:uuid>/start/",
TransportStartView.as_view(), name="transport_batches_start"),
path("transports/<uuid:uuid>/complete/",
TransportCompleteView.as_view(), name="transport_batches_complete"),
# batches
path("batches/<str:alias>/shippingitems/",
BatchShippingitemsView.as_view(), name="batch_shippingitems"),
path("batches/<str:alias>/add/", BatchShippingitemsAddView.as_view(),
name="batch_shippingitem_add"),
]
urlpatterns += router_shippingitem.urls
urlpatterns += router_batch.urls
urlpatterns += router_transport.urls
| en | 0.778674 | # transport # batches | 2.16712 | 2 |
setup.py | Fhrozen/locata_python | 2 | 6614739 | """Setuptools for Locata Wrapper.
"""
#!/usr/bin/env python
from distutils.version import LooseVersion
from os import path
import pip
from setuptools import find_packages
from setuptools import setup
import sys
mainpath = path.abspath(path.dirname(__file__))
with open(path.join(mainpath, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'locata_wrapper',
version = '0.1.0',
description = 'Locata Wrapper: Tools for LOCATA Challenge in Python',
long_description = long_description,
long_description_content_type = 'text/markdown',
url = 'http://github.com/audiofhrozen/locata_python',
author = '<NAME>',
author_email = '<EMAIL>',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
packages = find_packages(include = ['locata_wrapper*']),
python_requires= '>=3.6',
install_requires = [
'librosa',
'pandas>=0.24.0',
'pathos>=0.2.0',
'pymongo>=3.0.0',
'python_speech_features>=0.6',
'setuptools>=38.5.1',
'sacred>=0.7.0',
'scipy',
'soundfile>=0.10.2',
'PyYAML',
],
setup_requires = [
'numpy', 'pytest-runner'
],
extras_require = {
'test': [
'ipdb',
'pytest>=3.3.0',
'pytest-pythonpath>=0.7.3',
'pytest-cov>=2.7.1',
'hacking>=1.1.0',
'mock>=2.0.0',
'autopep8>=1.3.3',
'jsondiff'
]},
# package_data={
# 'sample': ['package_data.dat'],
# }
license='Apache Software License',
)
| """Setuptools for Locata Wrapper.
"""
#!/usr/bin/env python
from distutils.version import LooseVersion
from os import path
import pip
from setuptools import find_packages
from setuptools import setup
import sys
mainpath = path.abspath(path.dirname(__file__))
with open(path.join(mainpath, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'locata_wrapper',
version = '0.1.0',
description = 'Locata Wrapper: Tools for LOCATA Challenge in Python',
long_description = long_description,
long_description_content_type = 'text/markdown',
url = 'http://github.com/audiofhrozen/locata_python',
author = '<NAME>',
author_email = '<EMAIL>',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
packages = find_packages(include = ['locata_wrapper*']),
python_requires= '>=3.6',
install_requires = [
'librosa',
'pandas>=0.24.0',
'pathos>=0.2.0',
'pymongo>=3.0.0',
'python_speech_features>=0.6',
'setuptools>=38.5.1',
'sacred>=0.7.0',
'scipy',
'soundfile>=0.10.2',
'PyYAML',
],
setup_requires = [
'numpy', 'pytest-runner'
],
extras_require = {
'test': [
'ipdb',
'pytest>=3.3.0',
'pytest-pythonpath>=0.7.3',
'pytest-cov>=2.7.1',
'hacking>=1.1.0',
'mock>=2.0.0',
'autopep8>=1.3.3',
'jsondiff'
]},
# package_data={
# 'sample': ['package_data.dat'],
# }
license='Apache Software License',
)
| en | 0.182818 | Setuptools for Locata Wrapper. #!/usr/bin/env python # package_data={ # 'sample': ['package_data.dat'], # } | 1.243001 | 1 |
test/diffmerge/comparator_test.py | Manu343726/biicode-common | 17 | 6614740 | import unittest
import math
from biicode.common.diffmerge.compare import compare
from biicode.common.model.brl.cell_name import CellName
class Int(int):
def similarity(self, other):
return math.exp(-abs(self - other) / 5.0)
class CompareTest(unittest.TestCase):
def test_deduce_renames(self):
''' 2 is modified from 2 to 22
3 is deleted
5 is renamed to 6 (5 deleted, 6 created)
10 is created'''
base_resources = {CellName('1'): Int(1), CellName('2'): Int(2),
CellName('3'): Int(3), CellName('4'): Int(4),
CellName('5'): Int(5)}
other_resources = {CellName('1'): Int(1), CellName('2'): Int(22),
CellName('4'): Int(4), CellName('6'): Int(6),
CellName('10'): Int(10)}
#compute changes without renames
changes = compare(base_resources, other_resources)
self.assertEqual({CellName('3'): 3, CellName('5'): 5}, changes.deleted)
self.assertEqual({CellName('6'): Int(6), CellName('10'): 10}, changes.created)
self.assertEqual({CellName('2'): (2, 22)}, changes.modified)
self.assertEqual({}, changes.renames)
#deduce renames
changes.deduce_renames()
#nothing changes
self.assertEqual({CellName('3'): 3, CellName('5'): 5}, changes.deleted)
self.assertEqual({CellName('6'): Int(6), CellName('10'): 10}, changes.created)
self.assertEqual({CellName('2'): (2, 22)}, changes.modified)
#but the renames field
self.assertEqual({CellName('5'): CellName('6')}, changes.renames)
def test_deduce_renames_multi_all_equal(self):
'''2 is deleted
3 is created with 2's contents
4 is created with 2's contents
2 is considered to be renamed to 4
'''
#FIXME: Conclusion is arbitrary. Last one to be processed with equal similarty degreee
# is the one choosen. We might have to inform the user about this
base_resources = {CellName('1'): Int(1), CellName('2'): Int(2)}
other_resources = {CellName('1'): Int(1),
CellName('3'): Int(2), CellName('4'): Int(2)}
#compute changes without renames
changes = compare(base_resources, other_resources)
changes.deduce_renames()
#nothing changes
self.assertEqual({CellName('2'): 2}, changes.deleted)
self.assertEqual({CellName('3'): Int(2), CellName('4'): 2}, changes.created)
self.assertEqual({}, changes.modified)
self.assertEqual({CellName('2'): CellName('4')}, changes.renames)
def test_deduce_renames_multi_different_values(self):
'''2 is deleted
3 is created with 3
4 is created with 4
2 is considered to be renamed to 3
'''
base_resources = {CellName('1'): Int(1), CellName('2'): Int(2)}
other_resources = {CellName('1'): Int(1),
CellName('3'): Int(3), CellName('4'): Int(4)}
#compute changes without renames
changes = compare(base_resources, other_resources)
changes.deduce_renames()
#nothing changes
self.assertEqual({CellName('2'): 2}, changes.deleted)
self.assertEqual({CellName('3'): Int(3), CellName('4'): 4}, changes.created)
self.assertEqual({}, changes.modified)
self.assertEqual({CellName('2'): CellName('3')}, changes.renames)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| import unittest
import math
from biicode.common.diffmerge.compare import compare
from biicode.common.model.brl.cell_name import CellName
class Int(int):
def similarity(self, other):
return math.exp(-abs(self - other) / 5.0)
class CompareTest(unittest.TestCase):
def test_deduce_renames(self):
''' 2 is modified from 2 to 22
3 is deleted
5 is renamed to 6 (5 deleted, 6 created)
10 is created'''
base_resources = {CellName('1'): Int(1), CellName('2'): Int(2),
CellName('3'): Int(3), CellName('4'): Int(4),
CellName('5'): Int(5)}
other_resources = {CellName('1'): Int(1), CellName('2'): Int(22),
CellName('4'): Int(4), CellName('6'): Int(6),
CellName('10'): Int(10)}
#compute changes without renames
changes = compare(base_resources, other_resources)
self.assertEqual({CellName('3'): 3, CellName('5'): 5}, changes.deleted)
self.assertEqual({CellName('6'): Int(6), CellName('10'): 10}, changes.created)
self.assertEqual({CellName('2'): (2, 22)}, changes.modified)
self.assertEqual({}, changes.renames)
#deduce renames
changes.deduce_renames()
#nothing changes
self.assertEqual({CellName('3'): 3, CellName('5'): 5}, changes.deleted)
self.assertEqual({CellName('6'): Int(6), CellName('10'): 10}, changes.created)
self.assertEqual({CellName('2'): (2, 22)}, changes.modified)
#but the renames field
self.assertEqual({CellName('5'): CellName('6')}, changes.renames)
def test_deduce_renames_multi_all_equal(self):
'''2 is deleted
3 is created with 2's contents
4 is created with 2's contents
2 is considered to be renamed to 4
'''
#FIXME: Conclusion is arbitrary. Last one to be processed with equal similarty degreee
# is the one choosen. We might have to inform the user about this
base_resources = {CellName('1'): Int(1), CellName('2'): Int(2)}
other_resources = {CellName('1'): Int(1),
CellName('3'): Int(2), CellName('4'): Int(2)}
#compute changes without renames
changes = compare(base_resources, other_resources)
changes.deduce_renames()
#nothing changes
self.assertEqual({CellName('2'): 2}, changes.deleted)
self.assertEqual({CellName('3'): Int(2), CellName('4'): 2}, changes.created)
self.assertEqual({}, changes.modified)
self.assertEqual({CellName('2'): CellName('4')}, changes.renames)
def test_deduce_renames_multi_different_values(self):
'''2 is deleted
3 is created with 3
4 is created with 4
2 is considered to be renamed to 3
'''
base_resources = {CellName('1'): Int(1), CellName('2'): Int(2)}
other_resources = {CellName('1'): Int(1),
CellName('3'): Int(3), CellName('4'): Int(4)}
#compute changes without renames
changes = compare(base_resources, other_resources)
changes.deduce_renames()
#nothing changes
self.assertEqual({CellName('2'): 2}, changes.deleted)
self.assertEqual({CellName('3'): Int(3), CellName('4'): 4}, changes.created)
self.assertEqual({}, changes.modified)
self.assertEqual({CellName('2'): CellName('3')}, changes.renames)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| en | 0.961169 | 2 is modified from 2 to 22 3 is deleted 5 is renamed to 6 (5 deleted, 6 created) 10 is created #compute changes without renames #deduce renames #nothing changes #but the renames field 2 is deleted 3 is created with 2's contents 4 is created with 2's contents 2 is considered to be renamed to 4 #FIXME: Conclusion is arbitrary. Last one to be processed with equal similarty degreee # is the one choosen. We might have to inform the user about this #compute changes without renames #nothing changes 2 is deleted 3 is created with 3 4 is created with 4 2 is considered to be renamed to 3 #compute changes without renames #nothing changes # import sys;sys.argv = ['', 'Test.testName'] | 2.724773 | 3 |
Discord Sentiment Analysis Bot/discord_bot_example.py | AymaneZizi/Tutorials | 559 | 6614741 | <gh_stars>100-1000
import discord
# create discord client
client = discord.Client()
# on message event-handler
@client.event
async def on_message(message):
# ignore if the bot is the author
if message.author == client.user:
return
await message.channel.send(message.content)
# run our bot
client.run('<your token>')
| import discord
# create discord client
client = discord.Client()
# on message event-handler
@client.event
async def on_message(message):
# ignore if the bot is the author
if message.author == client.user:
return
await message.channel.send(message.content)
# run our bot
client.run('<your token>') | en | 0.390005 | # create discord client # on message event-handler # ignore if the bot is the author # run our bot | 2.846972 | 3 |
gallery/urls.py | mattmc318/coolwater-creations | 0 | 6614742 | from django.conf.urls import url
from . import views
from cwc.settings import STAGE
app_name = 'gallery'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^gallery$', views.gallery, name='gallery'),
url(r'^archive$', views.archive, name='archive'),
url(r'^new_product$', views.new_product, name='new_product'),
url(r'^upload$', views.upload, name='upload'),
url(r'^product$', views.product_page, name='product_page'),
url(r'^edit_product$', views.edit_product, name='edit_product'),
url(r'^delete_product$', views.delete_product, name='delete_product'),
url(r'^cart$', views.cart, name='cart'),
url(r'^add_cart$', views.add_cart, name='add_cart'),
url(r'^remove_cart$', views.remove_cart, name='remove_cart'),
url(r'^checkout$', views.checkout, name='checkout'),
url(r'^on_approve$', views.on_approve, name='on_approve'),
url(r'^order$', views.order, name='order'),
url(r'^orders$', views.orders, name='orders'),
url(r'^mark_shipped$', views.mark_shipped, name='mark_shipped'),
url(r'^delete_sales$', views.delete_sales, name='mark_shipped'),
url(r'^unsubscribe$', views.unsubscribe, name='unsubscribe'),
]
######################
# FOR DEBUG USE ONLY #
######################
if STAGE != 'production':
urlpatterns += [
url(r'^create_gallery_pics$', views.create_gallery_pics, name='create_gallery_pics'),
url(r'^clear_all_sessions$', views.clear_all_sessions, name='clear_all_sessions'),
url(r'^clear_all_carts$', views.clear_all_carts, name='clear_all_carts'),
]
| from django.conf.urls import url
from . import views
from cwc.settings import STAGE
app_name = 'gallery'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^gallery$', views.gallery, name='gallery'),
url(r'^archive$', views.archive, name='archive'),
url(r'^new_product$', views.new_product, name='new_product'),
url(r'^upload$', views.upload, name='upload'),
url(r'^product$', views.product_page, name='product_page'),
url(r'^edit_product$', views.edit_product, name='edit_product'),
url(r'^delete_product$', views.delete_product, name='delete_product'),
url(r'^cart$', views.cart, name='cart'),
url(r'^add_cart$', views.add_cart, name='add_cart'),
url(r'^remove_cart$', views.remove_cart, name='remove_cart'),
url(r'^checkout$', views.checkout, name='checkout'),
url(r'^on_approve$', views.on_approve, name='on_approve'),
url(r'^order$', views.order, name='order'),
url(r'^orders$', views.orders, name='orders'),
url(r'^mark_shipped$', views.mark_shipped, name='mark_shipped'),
url(r'^delete_sales$', views.delete_sales, name='mark_shipped'),
url(r'^unsubscribe$', views.unsubscribe, name='unsubscribe'),
]
######################
# FOR DEBUG USE ONLY #
######################
if STAGE != 'production':
urlpatterns += [
url(r'^create_gallery_pics$', views.create_gallery_pics, name='create_gallery_pics'),
url(r'^clear_all_sessions$', views.clear_all_sessions, name='clear_all_sessions'),
url(r'^clear_all_carts$', views.clear_all_carts, name='clear_all_carts'),
]
| de | 0.753896 | ###################### # FOR DEBUG USE ONLY # ###################### | 1.649208 | 2 |
docs/app.py | Archmonger/idom | 0 | 6614743 | import os
from logging import getLogger
from pathlib import Path
from sanic import Sanic, response
from idom.server.sanic import PerClientStateServer
from idom.widgets import multiview
from .examples import load_examples
HERE = Path(__file__).parent
IDOM_MODEL_SERVER_URL_PREFIX = "/_idom"
logger = getLogger(__name__)
IDOM_MODEL_SERVER_URL_PREFIX = "/_idom"
def run():
app = make_app()
PerClientStateServer(
make_examples_component(),
{
"redirect_root_to_index": False,
"url_prefix": IDOM_MODEL_SERVER_URL_PREFIX,
},
app,
)
app.run(
host="0.0.0.0",
port=int(os.environ.get("PORT", 5000)),
workers=int(os.environ.get("WEB_CONCURRENCY", 1)),
debug=bool(int(os.environ.get("DEBUG", "0"))),
)
def make_app():
app = Sanic(__name__)
app.static("/docs", str(HERE / "build"))
@app.route("/")
async def forward_to_index(request):
return response.redirect("/docs/index.html")
return app
def make_examples_component():
mount, component = multiview()
for example_name, example_component in load_examples():
mount.add(example_name, example_component)
return component
| import os
from logging import getLogger
from pathlib import Path
from sanic import Sanic, response
from idom.server.sanic import PerClientStateServer
from idom.widgets import multiview
from .examples import load_examples
HERE = Path(__file__).parent
IDOM_MODEL_SERVER_URL_PREFIX = "/_idom"
logger = getLogger(__name__)
IDOM_MODEL_SERVER_URL_PREFIX = "/_idom"
def run():
app = make_app()
PerClientStateServer(
make_examples_component(),
{
"redirect_root_to_index": False,
"url_prefix": IDOM_MODEL_SERVER_URL_PREFIX,
},
app,
)
app.run(
host="0.0.0.0",
port=int(os.environ.get("PORT", 5000)),
workers=int(os.environ.get("WEB_CONCURRENCY", 1)),
debug=bool(int(os.environ.get("DEBUG", "0"))),
)
def make_app():
app = Sanic(__name__)
app.static("/docs", str(HERE / "build"))
@app.route("/")
async def forward_to_index(request):
return response.redirect("/docs/index.html")
return app
def make_examples_component():
mount, component = multiview()
for example_name, example_component in load_examples():
mount.add(example_name, example_component)
return component
| none | 1 | 2.070605 | 2 | |
Extra/compression.py | ekunnii/APPIAN | 1 | 6614744 | <gh_stars>1-10
import nipype.interfaces.io as nio
import nipype.interfaces.utility as niu
import nipype.algorithms.misc as misc
from nipype.interfaces.utility import Function
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath,
BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined)
from nipype.interfaces.base import CommandLine, CommandLineInputSpec
from nipype.interfaces.minc.minc import Resample, ResampleOutputSpec, ResampleInputSpec
from time import gmtime, strftime
import gzip
import shutil
import os
import re
class gzipOutput(TraitedSpec):
out_file = File(argstr="%s", desc="compressed file")
class gzipInput(CommandLineInputSpec):
out_file = File( argstr="%s", position=-1, desc="compressed")
in_file= File(exists=True, argstr="%s", position=-2, desc="input file")
class gzipCommand(BaseInterface):
input_spec = gzipInput
output_spec = gzipOutput
def _run_interface(self, runtime):
self.inputs.out_file = self._gen_output()
try :
with open(self.inputs.in_file, 'rb') as f_in, gzip.open(self.inputs.out_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if os.path.exists(self.inputs.out_file) :
os.remove(self.inputs.in_file)
except RuntimeError :
print("Error: Could not gzip file ", self.inputs.in_file)
exit(1)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_output()
outputs["out_file"] = self.inputs.out_file
return outputs
def _gen_output(self):
return self.inputs.in_file +'.gz'
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_output(self.inputs.in_file)
return super(gzipCommand, self)._parse_inputs(skip=skip)
class gunzipOutput(TraitedSpec):
out_file = File(argstr="%s", desc="uncompressed file")
class gunzipInput(CommandLineInputSpec):
out_file = File( argstr="%s", position=-1, desc="uncompressed")
in_file= File(exists=True, argstr="%s", position=-2, desc="compressed input file")
class gunzipCommand(BaseInterface):
input_spec = gzipInput
output_spec = gzipOutput
def _run_interface(self, runtime):
self.inputs.out_file = self._gen_output()
try :
with gzip.open(self.inputs.in_file, 'rb') as f_in, open(self.inputs.out_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if os.path.exists(self.inputs.out_file) :
os.remove(self.inputs.in_file)
except RuntimeError :
print("Error: Could not gzip file ", self.inputs.in_file)
exit(1)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_output()
outputs["out_file"] = self.inputs.out_file
return outputs
def _gen_output(self):
return re.sub('.gz', '', self.inputs.in_file)
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_output(self.inputs.in_file)
return super(gzipCommand, self)._parse_inputs(skip=skip)
class gzipResampleCommand(BaseInterface):
input_spec = ResampleInputSpec
output_spec = ResampleOutputSpec
def _run_interface(self, runtime):
temp_fn="/tmp/tmp_mnc_"+ strftime("%Y%m%d%H%M%S", gmtime())+str(np.random.randint(9999999999))+".mnc"
try :
resample = Resample()
resample.inputs = self.inputs
resample.inputs.out_file = temp_fn
except RuntimeError :
print("Error: Could not resample file ", self.inputs.in_file)
exit(1)
try :
gzip = gzipCommand()
gzip.inputs.in_file = resample.inputs.out_file
gzip.run()
except RuntimeError :
print("Error: After resampling, could not gzip file ", resample.inputs.out_file)
exit(1)
self.inputs.out_file = gzip.inputs.out_file
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
#if not isdefined(self.inputs.out_file) :
# self.inputs.out_file = self._gen_output()
outputs["out_file"] = self.inputs.out_file
return outputs
def _gen_output(self):
return self.inputs.in_file +'.gz'
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
#if not isdefined(self.inputs.out_file):
# self.inputs.out_file = self._gen_output(self.inputs.in_file)
return super(gzipResampleCommand, self)._parse_inputs(skip=skip)
| import nipype.interfaces.io as nio
import nipype.interfaces.utility as niu
import nipype.algorithms.misc as misc
from nipype.interfaces.utility import Function
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath,
BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined)
from nipype.interfaces.base import CommandLine, CommandLineInputSpec
from nipype.interfaces.minc.minc import Resample, ResampleOutputSpec, ResampleInputSpec
from time import gmtime, strftime
import gzip
import shutil
import os
import re
class gzipOutput(TraitedSpec):
out_file = File(argstr="%s", desc="compressed file")
class gzipInput(CommandLineInputSpec):
out_file = File( argstr="%s", position=-1, desc="compressed")
in_file= File(exists=True, argstr="%s", position=-2, desc="input file")
class gzipCommand(BaseInterface):
input_spec = gzipInput
output_spec = gzipOutput
def _run_interface(self, runtime):
self.inputs.out_file = self._gen_output()
try :
with open(self.inputs.in_file, 'rb') as f_in, gzip.open(self.inputs.out_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if os.path.exists(self.inputs.out_file) :
os.remove(self.inputs.in_file)
except RuntimeError :
print("Error: Could not gzip file ", self.inputs.in_file)
exit(1)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_output()
outputs["out_file"] = self.inputs.out_file
return outputs
def _gen_output(self):
return self.inputs.in_file +'.gz'
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_output(self.inputs.in_file)
return super(gzipCommand, self)._parse_inputs(skip=skip)
class gunzipOutput(TraitedSpec):
out_file = File(argstr="%s", desc="uncompressed file")
class gunzipInput(CommandLineInputSpec):
out_file = File( argstr="%s", position=-1, desc="uncompressed")
in_file= File(exists=True, argstr="%s", position=-2, desc="compressed input file")
class gunzipCommand(BaseInterface):
input_spec = gzipInput
output_spec = gzipOutput
def _run_interface(self, runtime):
self.inputs.out_file = self._gen_output()
try :
with gzip.open(self.inputs.in_file, 'rb') as f_in, open(self.inputs.out_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if os.path.exists(self.inputs.out_file) :
os.remove(self.inputs.in_file)
except RuntimeError :
print("Error: Could not gzip file ", self.inputs.in_file)
exit(1)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_output()
outputs["out_file"] = self.inputs.out_file
return outputs
def _gen_output(self):
return re.sub('.gz', '', self.inputs.in_file)
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_output(self.inputs.in_file)
return super(gzipCommand, self)._parse_inputs(skip=skip)
class gzipResampleCommand(BaseInterface):
input_spec = ResampleInputSpec
output_spec = ResampleOutputSpec
def _run_interface(self, runtime):
temp_fn="/tmp/tmp_mnc_"+ strftime("%Y%m%d%H%M%S", gmtime())+str(np.random.randint(9999999999))+".mnc"
try :
resample = Resample()
resample.inputs = self.inputs
resample.inputs.out_file = temp_fn
except RuntimeError :
print("Error: Could not resample file ", self.inputs.in_file)
exit(1)
try :
gzip = gzipCommand()
gzip.inputs.in_file = resample.inputs.out_file
gzip.run()
except RuntimeError :
print("Error: After resampling, could not gzip file ", resample.inputs.out_file)
exit(1)
self.inputs.out_file = gzip.inputs.out_file
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
#if not isdefined(self.inputs.out_file) :
# self.inputs.out_file = self._gen_output()
outputs["out_file"] = self.inputs.out_file
return outputs
def _gen_output(self):
return self.inputs.in_file +'.gz'
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
#if not isdefined(self.inputs.out_file):
# self.inputs.out_file = self._gen_output(self.inputs.in_file)
return super(gzipResampleCommand, self)._parse_inputs(skip=skip) | fa | 0.083584 | #if not isdefined(self.inputs.out_file) : # self.inputs.out_file = self._gen_output() #if not isdefined(self.inputs.out_file): # self.inputs.out_file = self._gen_output(self.inputs.in_file) | 2.169535 | 2 |
Chat/model/Seq2Seq/model.py | DengBoCong/NLP-Examples | 1 | 6614745 | import tensorflow as tf
import config.getConfig as getConfig
import common.attention as attention
config = {}
config = getConfig.get_config_ini('config/ini/seq2seq.ini')
vocab_inp_size = config['enc_vocab_size']
vocab_tar_size = config['dec_vocab_size']
embedding_dim = config['embedding_dim']
units = config['layer_size']
BATCH_SIZE = config['batch_size']
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units, return_sequences=True, return_state=True,
recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state=hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
self.attention = attention.BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
context_vector, attention_weights = self.attention(hidden, enc_output)
x = self.embedding(x)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
output, state = self.gru(x)
output = tf.reshape(output, (-1, output.shape[2]))
x = self.fc(output)
return x, state, attention_weights
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
checkpoint = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder)
# @tf.function
def train_step(inp, targ, targ_lang, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['start']] * BATCH_SIZE, 1)
for t in range(1, targ.shape[1]):
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return batch_loss
| import tensorflow as tf
import config.getConfig as getConfig
import common.attention as attention
config = {}
config = getConfig.get_config_ini('config/ini/seq2seq.ini')
vocab_inp_size = config['enc_vocab_size']
vocab_tar_size = config['dec_vocab_size']
embedding_dim = config['embedding_dim']
units = config['layer_size']
BATCH_SIZE = config['batch_size']
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units, return_sequences=True, return_state=True,
recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state=hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
self.attention = attention.BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
context_vector, attention_weights = self.attention(hidden, enc_output)
x = self.embedding(x)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
output, state = self.gru(x)
output = tf.reshape(output, (-1, output.shape[2]))
x = self.fc(output)
return x, state, attention_weights
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
checkpoint = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder)
# @tf.function
def train_step(inp, targ, targ_lang, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['start']] * BATCH_SIZE, 1)
for t in range(1, targ.shape[1]):
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return batch_loss
| en | 0.215539 | # @tf.function | 2.316556 | 2 |
e2e-test/seleniume2e.py | elastest/elastest-bigdata-service | 0 | 6614746 | ######################
# Author: <NAME>
# 2/3/2019 fixed by <NAME>
######################
import time
import sys
import os
import selenium
from selenium import webdriver
# TODO: Substitute timers with webdriverwaits.
url = sys.argv[1]
projectname = 'deleteme'
tjobname = 'deletethisproject'
tjobimage = 'elastest/ebs-spark'
commands = """
git clone https://github.com/elastest/demo-projects.git
cd demo-projects/ebs-test
mvn -q package
rm -f big.txt
wget -q https://norvig.com/big.txt
hadoop fs -rmr /out.txt
hadoop fs -rm /big.txt
hadoop fs -copyFromLocal big.txt /big.txt
spark-submit --class org.sparkexample.WordCountTask --master spark://sparkmaster:7077 /demo-projects/ebs-test/target/hadoopWordCount-1.0-SNAPSHOT.jar /big.txt
hadoop fs -getmerge /out.txt ./out.txt
head -20 out.txt
"""
#setup Chrome WebDriver
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('--no-sandbox')
capabilities = options.to_capabilities()
eusUrl=os.environ['ET_EUS_API']
print("EUS URL is: "+str(eusUrl))
driver = webdriver.Remote(command_executor=eusUrl, desired_capabilities=capabilities)
driver.get(url)
# create new project
time.sleep(5)
element=driver.find_element_by_xpath("//button[contains(string(), 'New Project')]")
element.click()
time.sleep(5)
driver.find_element_by_name("project.name").send_keys(projectname)
driver.find_element_by_xpath("//button[contains(string(), 'SAVE')]").click()
time.sleep(5)
# create new tjob
driver.find_element_by_xpath("//button[contains(string(), 'New TJob')]").click()
time.sleep(5)
driver.find_element_by_name("tJobName").send_keys(tjobname)
# driver.find_element_by_xpath('//*[@id="mat-select-0"]/div/div[1]/span').click()
driver.find_element_by_class_name("mat-select-trigger").click()
driver.find_element_by_xpath("//mat-option/span[contains(string(), 'None')]").click()
driver.find_element_by_name("tJobImageName").send_keys(tjobimage)
driver.find_element_by_name("commands").send_keys(commands)
driver.find_element_by_xpath("//mat-checkbox[@id='serviceEBS']/label").click()
driver.find_element_by_xpath("//button[contains(string(), 'SAVE')]").click()
time.sleep(1)
# run tjob
driver.find_element_by_xpath("//button[@title='Run TJob']").click()
time.sleep(10)
# default max wait 5 minutes
TSS_MAX_WAIT = 300
# check for success.
while TSS_MAX_WAIT > 0:
try:
element = driver.find_element_by_id('resultMsgText')
if (element.text=="Executing Test" or element.text=="Starting Test Support Service: EBS" or element.text=="Starting Dockbeat to get metrics..."):
print("\t Waiting for tjob execution to complete")
time.sleep(20)
TSS_MAX_WAIT = TSS_MAX_WAIT - 20
element = driver.find_element_by_id('resultMsgText')
continue
else:
print("\t TJob Execution Result: "+element.text)
break
except:
print("\t Something is wrong")
break
driver.close()
| ######################
# Author: <NAME>
# 2/3/2019 fixed by <NAME>
######################
import time
import sys
import os
import selenium
from selenium import webdriver
# TODO: Substitute timers with webdriverwaits.
url = sys.argv[1]
projectname = 'deleteme'
tjobname = 'deletethisproject'
tjobimage = 'elastest/ebs-spark'
commands = """
git clone https://github.com/elastest/demo-projects.git
cd demo-projects/ebs-test
mvn -q package
rm -f big.txt
wget -q https://norvig.com/big.txt
hadoop fs -rmr /out.txt
hadoop fs -rm /big.txt
hadoop fs -copyFromLocal big.txt /big.txt
spark-submit --class org.sparkexample.WordCountTask --master spark://sparkmaster:7077 /demo-projects/ebs-test/target/hadoopWordCount-1.0-SNAPSHOT.jar /big.txt
hadoop fs -getmerge /out.txt ./out.txt
head -20 out.txt
"""
#setup Chrome WebDriver
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('--no-sandbox')
capabilities = options.to_capabilities()
eusUrl=os.environ['ET_EUS_API']
print("EUS URL is: "+str(eusUrl))
driver = webdriver.Remote(command_executor=eusUrl, desired_capabilities=capabilities)
driver.get(url)
# create new project
time.sleep(5)
element=driver.find_element_by_xpath("//button[contains(string(), 'New Project')]")
element.click()
time.sleep(5)
driver.find_element_by_name("project.name").send_keys(projectname)
driver.find_element_by_xpath("//button[contains(string(), 'SAVE')]").click()
time.sleep(5)
# create new tjob
driver.find_element_by_xpath("//button[contains(string(), 'New TJob')]").click()
time.sleep(5)
driver.find_element_by_name("tJobName").send_keys(tjobname)
# driver.find_element_by_xpath('//*[@id="mat-select-0"]/div/div[1]/span').click()
driver.find_element_by_class_name("mat-select-trigger").click()
driver.find_element_by_xpath("//mat-option/span[contains(string(), 'None')]").click()
driver.find_element_by_name("tJobImageName").send_keys(tjobimage)
driver.find_element_by_name("commands").send_keys(commands)
driver.find_element_by_xpath("//mat-checkbox[@id='serviceEBS']/label").click()
driver.find_element_by_xpath("//button[contains(string(), 'SAVE')]").click()
time.sleep(1)
# run tjob
driver.find_element_by_xpath("//button[@title='Run TJob']").click()
time.sleep(10)
# default max wait 5 minutes
TSS_MAX_WAIT = 300
# check for success.
while TSS_MAX_WAIT > 0:
try:
element = driver.find_element_by_id('resultMsgText')
if (element.text=="Executing Test" or element.text=="Starting Test Support Service: EBS" or element.text=="Starting Dockbeat to get metrics..."):
print("\t Waiting for tjob execution to complete")
time.sleep(20)
TSS_MAX_WAIT = TSS_MAX_WAIT - 20
element = driver.find_element_by_id('resultMsgText')
continue
else:
print("\t TJob Execution Result: "+element.text)
break
except:
print("\t Something is wrong")
break
driver.close()
| en | 0.489509 | ###################### # Author: <NAME> # 2/3/2019 fixed by <NAME> ###################### # TODO: Substitute timers with webdriverwaits. git clone https://github.com/elastest/demo-projects.git cd demo-projects/ebs-test mvn -q package rm -f big.txt wget -q https://norvig.com/big.txt hadoop fs -rmr /out.txt hadoop fs -rm /big.txt hadoop fs -copyFromLocal big.txt /big.txt spark-submit --class org.sparkexample.WordCountTask --master spark://sparkmaster:7077 /demo-projects/ebs-test/target/hadoopWordCount-1.0-SNAPSHOT.jar /big.txt hadoop fs -getmerge /out.txt ./out.txt head -20 out.txt #setup Chrome WebDriver # create new project # create new tjob # driver.find_element_by_xpath('//*[@id="mat-select-0"]/div/div[1]/span').click() # run tjob # default max wait 5 minutes # check for success. | 2.134439 | 2 |
src/apis/text/text/language-detections/toftrup-etal-2021/toftrup-etal-2021.py | jqueguiner/ai-api-marketplace- | 0 | 6614747 | from LanguageIdentifier import predict, rank
import json
def predict(text):
output = list()
for k,v in rank(text):
output.append({'language': k, 'score': v})
return json.dumps(output)
| from LanguageIdentifier import predict, rank
import json
def predict(text):
output = list()
for k,v in rank(text):
output.append({'language': k, 'score': v})
return json.dumps(output)
| none | 1 | 2.669446 | 3 | |
August 2021/Set Matrix Zeroes.py | parikshitgupta1/leetcode | 0 | 6614748 | class Solution:
def setZeroes(self, matrix):
if len(matrix) == 0: return
if len(matrix[0]) == 0: return
row = len(matrix)
col = len(matrix[0])
for i in range(row):
for j in range(col):
if matrix[i][j] == 0:
self.mark(matrix, i, j)
for i in range(row):
for j in range(col):
if matrix[i][j] == 'N':
matrix[i][j] = 0
def mark(self, matrix, i, j):
for col in range(len(matrix[0])):
if matrix[i][col] != 0:
matrix[i][col] = 'N'
for row in range(len(matrix)):
if matrix[row][j] != 0:
matrix[row][j] = 'N'
| class Solution:
def setZeroes(self, matrix):
if len(matrix) == 0: return
if len(matrix[0]) == 0: return
row = len(matrix)
col = len(matrix[0])
for i in range(row):
for j in range(col):
if matrix[i][j] == 0:
self.mark(matrix, i, j)
for i in range(row):
for j in range(col):
if matrix[i][j] == 'N':
matrix[i][j] = 0
def mark(self, matrix, i, j):
for col in range(len(matrix[0])):
if matrix[i][col] != 0:
matrix[i][col] = 'N'
for row in range(len(matrix)):
if matrix[row][j] != 0:
matrix[row][j] = 'N'
| none | 1 | 3.384021 | 3 | |
tests/rtc/test_coregistration.py | ASFHyP3/hyp3-gamma | 8 | 6614749 | import pytest
from hyp3_gamma.rtc import coregistration
def test_get_offset(tmp_path):
diff_par = tmp_path / 'diff_par'
with open(diff_par, 'w') as f:
f.write('range_offset_polynomial: -3.00000 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00\n')
f.write('azimuth_offset_polynomial: 4.00000 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00\n')
assert coregistration.get_offset(diff_par) == 5.0
with open(diff_par, 'w') as f:
f.write('range_offset_polynomial: 1.00000 -2.0000e+00 3.0000e+00 -4.0000e+00 5.0000e+00 -6.0000e+00\n')
f.write('azimuth_offset_polynomial: -7.00000 8.0000e+00 -9.0000e+00 10.0000e+00 -11.0000e+00 12.0000e+00\n')
assert coregistration.get_offset(diff_par) == 7.0710678118654755
def test_get_stddev(tmp_path):
log = tmp_path / 'log'
with open(log, 'w') as f:
f.write('final model fit std. dev. (samples) range: 3.0000 azimuth: 4.0000')
assert coregistration.get_std_dev(log) == 5.0
with open(log, 'w') as f:
f.write('final model fit std. dev. (samples) range: 50.9111 azimuth: 79.8217')
assert coregistration.get_std_dev(log) == 94.67546616785154
def test_check_coregistration(tmp_path):
log = tmp_path / 'log'
with open(log, 'w') as f:
f.write('final model fit std. dev. (samples) range: 3.0000 azimuth: 4.0000')
diff_par = tmp_path / 'diff_par'
with open(diff_par, 'w') as f:
f.write('range_offset_polynomial: -5.00000 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00\n')
f.write('azimuth_offset_polynomial: 12.00000 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00\n')
coregistration.check_coregistration(log, diff_par, max_stddev=5.0, max_offset=13.0, pixel_size=1.0)
with pytest.raises(coregistration.CoregistrationError):
coregistration.check_coregistration(log, diff_par, max_stddev=4.99, max_offset=13.0, pixel_size=1.0)
with pytest.raises(coregistration.CoregistrationError):
coregistration.check_coregistration(log, diff_par, max_stddev=5.0, max_offset=12.99, pixel_size=1.0)
coregistration.check_coregistration(log, diff_par, max_stddev=5.0, max_offset=26.0, pixel_size=2.0)
with pytest.raises(coregistration.CoregistrationError):
coregistration.check_coregistration(log, diff_par, max_stddev=5.0, max_offset=25.99, pixel_size=2.0)
| import pytest
from hyp3_gamma.rtc import coregistration
def test_get_offset(tmp_path):
diff_par = tmp_path / 'diff_par'
with open(diff_par, 'w') as f:
f.write('range_offset_polynomial: -3.00000 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00\n')
f.write('azimuth_offset_polynomial: 4.00000 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00\n')
assert coregistration.get_offset(diff_par) == 5.0
with open(diff_par, 'w') as f:
f.write('range_offset_polynomial: 1.00000 -2.0000e+00 3.0000e+00 -4.0000e+00 5.0000e+00 -6.0000e+00\n')
f.write('azimuth_offset_polynomial: -7.00000 8.0000e+00 -9.0000e+00 10.0000e+00 -11.0000e+00 12.0000e+00\n')
assert coregistration.get_offset(diff_par) == 7.0710678118654755
def test_get_stddev(tmp_path):
log = tmp_path / 'log'
with open(log, 'w') as f:
f.write('final model fit std. dev. (samples) range: 3.0000 azimuth: 4.0000')
assert coregistration.get_std_dev(log) == 5.0
with open(log, 'w') as f:
f.write('final model fit std. dev. (samples) range: 50.9111 azimuth: 79.8217')
assert coregistration.get_std_dev(log) == 94.67546616785154
def test_check_coregistration(tmp_path):
log = tmp_path / 'log'
with open(log, 'w') as f:
f.write('final model fit std. dev. (samples) range: 3.0000 azimuth: 4.0000')
diff_par = tmp_path / 'diff_par'
with open(diff_par, 'w') as f:
f.write('range_offset_polynomial: -5.00000 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00\n')
f.write('azimuth_offset_polynomial: 12.00000 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00 0.0000e+00\n')
coregistration.check_coregistration(log, diff_par, max_stddev=5.0, max_offset=13.0, pixel_size=1.0)
with pytest.raises(coregistration.CoregistrationError):
coregistration.check_coregistration(log, diff_par, max_stddev=4.99, max_offset=13.0, pixel_size=1.0)
with pytest.raises(coregistration.CoregistrationError):
coregistration.check_coregistration(log, diff_par, max_stddev=5.0, max_offset=12.99, pixel_size=1.0)
coregistration.check_coregistration(log, diff_par, max_stddev=5.0, max_offset=26.0, pixel_size=2.0)
with pytest.raises(coregistration.CoregistrationError):
coregistration.check_coregistration(log, diff_par, max_stddev=5.0, max_offset=25.99, pixel_size=2.0)
| none | 1 | 1.92511 | 2 | |
easy_rec/python/test/util_test.py | xia-huang-411303/EasyRec | 61 | 6614750 | # -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
import tensorflow as tf
from easy_rec.python.utils import estimator_utils
if tf.__version__ >= '2.0':
tf = tf.compat.v1
gfile = tf.gfile
class UtilTest(tf.test.TestCase):
def test_get_ckpt_version(self):
ver = estimator_utils.get_ckpt_version(
'oss://easyrec/ckpts/model.ckpt-6500.meta')
assert ver == 6500, 'invalid version: %s' % str(ver)
ver = estimator_utils.get_ckpt_version(
'oss://easyrec/ckpts/model.ckpt-6500')
assert ver == 6500, 'invalid version: %s' % str(ver)
if __name__ == '__main__':
tf.test.main()
| # -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
import tensorflow as tf
from easy_rec.python.utils import estimator_utils
if tf.__version__ >= '2.0':
tf = tf.compat.v1
gfile = tf.gfile
class UtilTest(tf.test.TestCase):
def test_get_ckpt_version(self):
ver = estimator_utils.get_ckpt_version(
'oss://easyrec/ckpts/model.ckpt-6500.meta')
assert ver == 6500, 'invalid version: %s' % str(ver)
ver = estimator_utils.get_ckpt_version(
'oss://easyrec/ckpts/model.ckpt-6500')
assert ver == 6500, 'invalid version: %s' % str(ver)
if __name__ == '__main__':
tf.test.main()
| en | 0.920354 | # -*- encoding:utf-8 -*- # Copyright (c) Alibaba, Inc. and its affiliates. | 2.171233 | 2 |