index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
992,000 | db8772b88f62f0b4efe6ea71e9fda7ee47508346 | #!/usr/bin/env python
# coding:utf-8
# date: 2016-03-29
# author: dudp@foxmail.com
import re
import urllib
import urllib2
import cookielib
import mechanize
from bs4 import BeautifulSoup
login_url = 'https://login.salesforce.com'
home_page = 'https://na7.salesforce.com'
cookie_file = './.cookie'
username = 'xxxxxxxxxx'
password = 'xxxxxxxx'
def get_post_data(content,email):
html_proc = BeautifulSoup(content,'lxml')
_confirmationtoken = html_proc.find("input",id='_CONFIRMATIONTOKEN').get('value')
cancelURL = html_proc.find("input",id='cancelURL').get('value')
retURL = html_proc.find("input",id='retURL').get('value')
save_new_url = html_proc.find("input",id='save_new_url').get('value')
vcsrf = html_proc.find("input",id='vcsrf').get('value')
vpol = html_proc.find("input",id='vpol').get('value')
vflid = html_proc.find("input",id='vflid').get('value')
vfgrp = html_proc.find("input",id='vfgrp').get('value')
code = raw_input("Please input your verification code: ")
save = 'Verify'
postdata = {
'_CONFIRMATIONTOKEN':_confirmationtoken,
'cancelURL':cancelURL,
'retURL':retURL,
'save_new_url':save_new_url,
'vcsrf':vcsrf,
'vpol':vpol,
'retURL':retURL,
'vflid':vflid,
'vfgrp':vfgrp,
'smc':code,
'save':save
}
if email:
postdata.pop('smc')
postdata['emc'] = code
print urllib.urlencode(postdata)
return urllib.urlencode(postdata)
def request(url,data=None):
response = browser.open(url,data)
browser._ua_handlers['_cookies'].cookiejar.save(cookie_file, ignore_discard=True, ignore_expires=True)
return response
acc_pwd = { 'un': username, 'pw': password }
cookiejar = cookielib.LWPCookieJar()
try:
cookiejar.load(cookie_file, ignore_discard=True, ignore_expires=True)
except IOError:
pass
browser = mechanize.UserAgent()
browser.set_handle_robots(False)
browser.set_cookiejar(cookiejar)
browser.addheaders = [('User-agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0')]
browser.addheaders.append(('Referer', home_page))
try:
data = urllib.urlencode(acc_pwd)
response = request(login_url,data)
content = response.read().decode('utf-8')
status = response.code
if status == 200:
patern = u'Verify Your Identity'
if re.search(patern,content):
new_url = response.geturl()
email = re.search('EmailVerification',new_url)
data2 = get_post_data(content,email)
response = request(new_url,data2)
print response.geturl()
except urllib2.HTTPError as e:
print "Failed to open ", home_page, str(e)
|
992,001 | 0cc5ea60c9040b1c368b7ffa34fd2574d31369c2 | """
python 的深浅拷贝
"""
#1. python 默认的赋值是浅拷贝,如对列表的赋值
In [1]: s = [1,2,3]
In [4]: s2 = s
In [7]: s2[0]= 2
In [8]: s
Out[8]: [2, 2, 3]
# 修改s2的元素会影响s的元素值
#2. [:]运算对不可变元素是深拷贝
In [10]: s
Out[10]: [2, 2, 3]
In [9]: s2 = s[:]
In [11]: s2[0]= 1
In [12]: s
Out[12]: [2, 2, 3]
# 此时对s2的修改不影响s的值
#3. [:]运算对可变元素是浅拷贝
In [13]: s = [1,2,[3,4,5]]
In [14]: s2= s[:]
In [15]: s2[2][0] = 6
In [16]: s2
Out[16]: [1, 2, [6, 4, 5]]
In [18]: s
Out[18]: [1, 2, [6, 4, 5]]
# 对s2中的可变元素如列表修改时,同样会影响s的值
#4. 深拷贝使用deepcopy函数
In [18]: s
Out[18]: [1, 2, [6, 4, 5]]
In [19]: import copy
In [20]: s3 = copy.deepcopy(s)
In [21]: s
Out[21]: [1, 2, [6, 4, 5]]
In [22]: s3
Out[22]: [1, 2, [6, 4, 5]]
In [23]: s3[2][0] = 3
In [24]: s
Out[24]: [1, 2, [6, 4, 5]]
# 此时修改s3的值已经不会影响s了
|
992,002 | 062427f010eb903475ed0df6854b71f950754f8d | from django.shortcuts import render, HttpResponse, redirect
from django.contrib import messages
from time import localtime, strftime
from datetime import datetime
from django.utils.crypto import get_random_string
from models import *
# Create your views here.
products = {
'1001': 19.99,
'1002': 24.99,
'1003': 4.99,
'1004': 49.99
}
def index(request):
return render(request,'amadon_app/index.html')
def buy(request):
if request.method=='POST':
if 'quantity' not in request.session:
request.session['quantity'] = 0
if 'total_amount' not in request.session:
request.session['total_amount'] =0
request.session['price'] = products[request.POST['product_id']] * int(request.POST['quantity'])#price per buy button
request.session['quantity'] += int(request.POST['quantity'])
request.session['total_amount'] += float(request.session['price'])
return redirect('/checkout')
def checkout(request):
return render(request,'amadon_app/checkout.html')
def clear(request):
request.session.clear()
return redirect('/')
|
992,003 | 45d8046fcc653864251029dcef81ad84b37bb10d | import sys
from collections.abc import Callable
from typing import Any, NamedTuple
from typing_extensions import TypeAlias
__all__ = ["scheduler"]
_ActionCallback: TypeAlias = Callable[..., Any]
if sys.version_info >= (3, 10):
class Event(NamedTuple):
time: float
priority: Any
sequence: int
action: _ActionCallback
argument: tuple[Any, ...]
kwargs: dict[str, Any]
else:
class Event(NamedTuple):
time: float
priority: Any
action: _ActionCallback
argument: tuple[Any, ...]
kwargs: dict[str, Any]
class scheduler:
timefunc: Callable[[], float]
delayfunc: Callable[[float], object]
def __init__(self, timefunc: Callable[[], float] = ..., delayfunc: Callable[[float], object] = ...) -> None: ...
def enterabs(
self, time: float, priority: Any, action: _ActionCallback, argument: tuple[Any, ...] = ..., kwargs: dict[str, Any] = ...
) -> Event: ...
def enter(
self, delay: float, priority: Any, action: _ActionCallback, argument: tuple[Any, ...] = ..., kwargs: dict[str, Any] = ...
) -> Event: ...
def run(self, blocking: bool = ...) -> float | None: ...
def cancel(self, event: Event) -> None: ...
def empty(self) -> bool: ...
@property
def queue(self) -> list[Event]: ...
|
992,004 | 2f6a18b378324c9e28d2e403b209a0d8e609f426 | version https://git-lfs.github.com/spec/v1
oid sha256:d68868d80e258d863e342461e21b8bda846eb4ad92f54f4b6a088c0c87fe9d5e
size 959
|
992,005 | 1e8c78ed0b57b707626b1a815eca292813c29d10 | # Generated by Django 3.1.5 on 2021-01-20 17:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='hourlyrate',
name='rate',
field=models.CharField(max_length=10),
),
migrations.CreateModel(
name='Panel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hourlyRate', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pages.hourlyrate')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
992,006 | 9e3ab97ea6462a1a00006528bed00a7cae892805 | from pymongo import MongoClient
client= MongoClient('localhost', 27017)
db=client.db_cek
def delete_all():
# 데이터베이스 안에 있는 것들을 모두 지운다.
db.orders.delete_many({})
def main():
delete_all()
if __name__=='__main__':
main() |
992,007 | e631ab71328ad420210671066913bdacce5279bf | #-*-coding:utf-8-*-
import os
import sys
import numpy as np
import cv2
from PIL import Image
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
class NumberRecognizer(object):
"""숫자 인식 클래스.
위 숫자 인식 클래스의 기능으로는
1) 학습용 숫자 이미지 전처리
2)
"""
def __init__(self):
print(sys.version)
# for reproducibility
tf.set_random_seed(777)
self.TRAIN_DIR = '/home/taemin/PoseData/trainingSet'
self.TEST_DIR = '/home/taemin/PoseData/testSet'
self.train_input = []
self.train_label = []
self.test_input = []
self.test_label = []
def preprocess_image(self):
# 학습용 데이터 처리
train_folder_list = np.array(os.listdir(self.TRAIN_DIR))
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(train_folder_list)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
train_input = []
train_label = []
for index in range(len(train_folder_list)):
path = os.path.join(self.TRAIN_DIR, train_folder_list[index])
path = path + '/'
image_list = os.listdir(path)
for image in image_list:
image_path = os.path.join(path, image)
raw_image = cv2.imread(image_path)
gray_image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2GRAY)
blur_image = cv2.GaussianBlur(gray_image, (5,5), 0)
_, threshed_image = cv2.threshold(blur_image, 60, 255, cv2.THRESH_BINARY_INV)
resized_image = cv2.resize(blur_image, (40, 40))
train_input.append(np.array(resized_image))
train_label.append(np.array(onehot_encoded[index]))
train_input = np.reshape(train_input, (-1, 1600))
train_label = np.reshape(train_label, (-1, 3))
train_input = np.array(train_input).astype(np.float32)
train_label = np.array(train_label).astype(np.float32)
# np.save('train_data.npy', train_input)
# np.save('train_label.npy', train_label)
s_train = np.arange(train_input.shape[0])
np.random.shuffle(s_train)
train_input = train_input[s_train]
train_label = train_label[s_train]
self.train_input = train_input
self.train_label = train_label
# print(self.train_input, self.train_label)
# 검증용 데이터 처리
test_folder_list = np.array(os.listdir(self.TEST_DIR))
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(test_folder_list)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
test_input = []
test_label = []
for index in range(len(test_folder_list)):
path = os.path.join(self.TEST_DIR, test_folder_list[index])
path = path + '/'
image_list = os.listdir(path)
for image in image_list:
image_path = os.path.join(path, image)
raw_image = cv2.imread(image_path)
gray_image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2GRAY)
blur_image = cv2.GaussianBlur(gray_image, (5,5), 0)
_, threshed_image = cv2.threshold(blur_image, 60, 255, cv2.THRESH_BINARY_INV)
resized_image = cv2.resize(blur_image, (40, 40))
test_input.append(np.array(resized_image))
test_label.append(np.array(onehot_encoded[index]))
test_input = np.reshape(test_input, (-1, 1600))
test_label = np.reshape(test_label, (-1, 3))
test_input = np.array(test_input).astype(np.float32)
test_label = np.array(test_label).astype(np.float32)
# np.save('test_data.npy', test_input)
# np.save('test_label.npy', test_label)
s_test = np.arange(test_input.shape[0])
np.random.shuffle(s_test)
test_input = test_input[s_test]
test_label = test_label[s_test]
self.test_input = test_input
self.test_label = test_label
print(self.train_input.shape)
print(self.train_label.shape)
print(self.test_input.shape)
print(self.test_label.shape)
def network_model(self):
# parameters
learning_rate = 0.001
training_epochs = 60
batch_size = 20
# # input place holders
# X = tf.placeholder(tf.float32, [None, 3600])
# Y = tf.placeholder(tf.float32, [None, 3])
# dropout (keep_prob) rate 0.7 on training, but should be 1 for testing
keep_prob = tf.placeholder(tf.float32)
# input place holders
X = tf.placeholder(tf.float32, [None, 1600])
X_img = tf.reshape(X, [-1, 40, 40, 1]) # img 40x40x1 (black/white)
Y = tf.placeholder(tf.float32, [None, 3])
# L1 ImgIn shape=(?, 40, 40, 1)
W1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01))
# Conv -> (?, 40, 40, 32)
# Pool -> (?, 20, 20, 32)
L1 = tf.nn.conv2d(X_img, W1, strides=[1, 1, 1, 1], padding='SAME')
L1 = tf.nn.relu(L1)
L1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
'''
Tensor("Conv2D:0", shape=(?, 28, 28, 32), dtype=float32)
Tensor("Relu:0", shape=(?, 28, 28, 32), dtype=float32)
Tensor("MaxPool:0", shape=(?, 14, 14, 32), dtype=float32)
Tensor("dropout/mul:0", shape=(?, 14, 14, 32), dtype=float32)
'''
# L2 ImgIn shape=(?, 20, 20, 32)
W2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))
# Conv ->(?, 20, 20, 64)
# Pool ->(?, 10, 10, 64)
L2 = tf.nn.conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME')
L2 = tf.nn.relu(L2)
L2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
'''
Tensor("Conv2D_1:0", shape=(?, 14, 14, 64), dtype=float32)
Tensor("Relu_1:0", shape=(?, 14, 14, 64), dtype=float32)
Tensor("MaxPool_1:0", shape=(?, 7, 7, 64), dtype=float32)
Tensor("dropout_1/mul:0", shape=(?, 7, 7, 64), dtype=float32)
'''
# L3 ImgIn shape=(?, 10, 10, 64)
W3 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.01))
# Conv ->(?, 10, 10, 128)
# Pool ->(?, 5, 5, 128)
# Reshape ->(?, 5 * 5 * 128) # Flatten them for FC
L3 = tf.nn.conv2d(L2, W3, strides=[1, 1, 1, 1], padding='SAME')
L3 = tf.nn.relu(L3)
L3 = tf.nn.max_pool(L3, ksize=[1, 2, 2, 1], strides=[
1, 2, 2, 1], padding='SAME')
L3 = tf.nn.dropout(L3, keep_prob=keep_prob)
L3_flat = tf.reshape(L3, [-1, 128 * 5 * 5])
'''
Tensor("Conv2D_2:0", shape=(?, 7, 7, 128), dtype=float32)
Tensor("Relu_2:0", shape=(?, 7, 7, 128), dtype=float32)
Tensor("MaxPool_2:0", shape=(?, 4, 4, 128), dtype=float32)
Tensor("dropout_2/mul:0", shape=(?, 4, 4, 128), dtype=float32)
Tensor("Reshape_1:0", shape=(?, 2048), dtype=float32)
'''
# L4 FC 4x4x128 inputs -> 625 outputs
W4 = tf.get_variable("W4", shape=[128 * 5 * 5, 625],
initializer=tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal([625]))
L4 = tf.nn.relu(tf.matmul(L3_flat, W4) + b4)
L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
'''
Tensor("Relu_3:0", shape=(?, 625), dtype=float32)
Tensor("dropout_3/mul:0", shape=(?, 625), dtype=float32)
'''
# L5 Final FC 625 inputs -> 10 outputs
W5 = tf.get_variable("W5", shape=[625, 3],
initializer=tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([3]))
logits = tf.matmul(L4, W5) + b5
'''
Tensor("add_1:0", shape=(?, 10), dtype=float32)
'''
# define cost/loss & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
'''
Tensor("add_1:0", shape=(?, 10), dtype=float32)
'''
# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# train my model
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(len(self.train_input) / batch_size)
for i in range(total_batch):
start = ((i+1) * batch_size) - batch_size
end = ((i+1) * batch_size)
batch_xs = self.train_input[start:end]
batch_ys = self.train_label[start:end]
feed_dict = {X: batch_xs, Y: batch_ys, keep_prob: 0.7}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
# Test model and check accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
X: self.test_input, Y: self.test_label, keep_prob: 1}))
# Save the model
save_path = './models/recognizer_pose_model'
saver = tf.train.Saver()
saver.save(sess, save_path)
print("Model saved in file: %s" %save_path)
sess.close()
if __name__ == "__main__":
number_recognizer = NumberRecognizer()
number_recognizer.preprocess_image()
number_recognizer.network_model()
|
992,008 | 4b9e6fdb28ed98241da22101cc68db5a16808fdf | #
# @lc app=leetcode.cn id=697 lang=python3
#
# [697] 数组的度
#
# https://leetcode-cn.com/problems/degree-of-an-array/description/
#
# algorithms
# Easy (60.62%)
# Likes: 349
# Dislikes: 0
# Total Accepted: 62.2K
# Total Submissions: 102.6K
# Testcase Example: '[1,2,2,3,1]'
#
# 给定一个非空且只包含非负数的整数数组 nums,数组的度的定义是指数组里任一元素出现频数的最大值。
#
# 你的任务是在 nums 中找到与 nums 拥有相同大小的度的最短连续子数组,返回其长度。
#
#
#
# 示例 1:
#
#
# 输入:[1, 2, 2, 3, 1]
# 输出:2
# 解释:
# 输入数组的度是2,因为元素1和2的出现频数最大,均为2.
# 连续子数组里面拥有相同度的有如下所示:
# [1, 2, 2, 3, 1], [1, 2, 2, 3], [2, 2, 3, 1], [1, 2, 2], [2, 2, 3], [2, 2]
# 最短连续子数组[2, 2]的长度为2,所以返回2.
#
#
# 示例 2:
#
#
# 输入:[1,2,2,3,1,4,2]
# 输出:6
#
#
#
#
# 提示:
#
#
# nums.length 在1到 50,000 区间范围内。
# nums[i] 是一个在 0 到 49,999 范围内的整数。
#
#
#
# @lc code=start
class Solution:
def findShortestSubArray(self, nums: List[int]) -> int:
dic, ans = dict(), 0
for i,n in enumerate(nums):
if n not in dic:
dic[n] = [i, i, 1]
else:
dic[n][1] = i
dic[n][2] += 1
if ans < dic[n][2]:
ans = dic[n][2]
res = float('inf')
for k, lst in dic.items():
if lst[2] == ans:
res = min(res, lst[1] - lst[0] + 1)
return res
# @lc code=end
|
992,009 | 37f2c02d4030627e89929cf041c4c235dbd30958 | import torch
import torch.nn as nn
from pykp.masked_softmax import MaskedSoftmax
class RNNEncoder(nn.Module):
def __init__(self, vocab_size, embed_size, hidden_size, num_layers, bidirectional, pad_token, dropout=0.0):
super(RNNEncoder, self).__init__()
self.vocab_size = vocab_size
self.embed_size = embed_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bidirectional = bidirectional
self.pad_token = pad_token
self.embedding = nn.Embedding(
self.vocab_size,
self.embed_size,
self.pad_token
)
self.rnn = nn.GRU(input_size=embed_size, hidden_size=hidden_size, num_layers=num_layers,
bidirectional=bidirectional, batch_first=True, dropout=dropout)
def forward(self, src, src_lens):
"""
:param src: [batch, src_seq_len]
:param src_lens: a list containing the length of src sequences for each batch, with len=batch
:return:
"""
src_embed = self.embedding(src) # [batch, src_len, embed_size]
packed_input_src = nn.utils.rnn.pack_padded_sequence(src_embed, src_lens, batch_first=True)
memory_bank, encoder_final_state = self.rnn(packed_input_src)
# ([batch, seq_len, num_directions*hidden_size], [num_layer * num_directions, batch, hidden_size])
memory_bank, _ = nn.utils.rnn.pad_packed_sequence(memory_bank, batch_first=True) # unpack (back to padded)
# only extract the final state in the last layer
if self.bidirectional:
encoder_last_layer_final_state = torch.cat((encoder_final_state[-1, :, :], encoder_final_state[-2, :, :]),
1)
# [batch, hidden_size*2]
else:
encoder_last_layer_final_state = encoder_final_state[-1, :, :] # [batch, hidden_size]
return memory_bank.contiguous(), encoder_last_layer_final_state
class Attention(nn.Module):
def __init__(self, decoder_size, memory_bank_size, coverage_attn, attn_mode):
super(Attention, self).__init__()
# attention
if attn_mode == "concat":
self.v = nn.Linear(decoder_size, 1, bias=False)
self.decode_project = nn.Linear(decoder_size, decoder_size)
self.memory_project = nn.Linear(memory_bank_size, decoder_size, bias=False)
self.coverage_attn = coverage_attn
if coverage_attn:
self.coverage_project = nn.Linear(1, decoder_size, bias=False)
self.softmax = MaskedSoftmax(dim=1)
# self.softmax = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
self.attn_mode = attn_mode
def score(self, memory_bank, decoder_state, coverage=None):
"""
:param memory_bank: [batch_size, max_input_seq_len, self.num_directions * self.encoder_size]
:param decoder_state: [batch_size, decoder_size]
:param coverage: [batch_size, max_input_seq_len]
:return: score: [batch_size, max_input_seq_len]
"""
batch_size, max_input_seq_len, memory_bank_size = list(memory_bank.size())
decoder_size = decoder_state.size(1)
if self.attn_mode == "general":
# project memory_bank
memory_bank_ = memory_bank.view(-1,
memory_bank_size) # [batch_size*max_input_seq_len, memory_bank_size]
"""
if self.coverage_attn:
coverage_input = coverage.view(-1, 1) # [batch_size*max_input_seq_len, 1]
memory_bank_ += self.coverage_project(coverage_input) # [batch_size*max_input_seq_len, decoder_size]
memory_bank_ = self.tanh(memory_bank_)
encoder_feature = self.memory_project(memory_bank_) # [batch_size*max_input_seq_len, decoder size]
"""
encoder_feature = self.memory_project(memory_bank_) # [batch_size*max_input_seq_len, decoder size]
if self.coverage_attn:
coverage_input = coverage.view(-1, 1) # [batch_size*max_input_seq_len, 1]
encoder_feature += self.coverage_project(coverage_input) # [batch_size*max_input_seq_len, decoder_size]
encoder_feature = self.tanh(encoder_feature)
# expand decoder state
decoder_state_expanded = decoder_state.unsqueeze(1).expand(batch_size, max_input_seq_len,
decoder_size).contiguous()
decoder_state_expanded = decoder_state_expanded.view(-1,
decoder_size) # [batch_size*max_input_seq_len, decoder_size]
# Perform bi-linear operation
scores = torch.bmm(decoder_state_expanded.unsqueeze(1),
encoder_feature.unsqueeze(2)) # [batch_size*max_input_seq_len, 1, 1]
else: # Bahdanau style attention
# project memory_bank
memory_bank_ = memory_bank.view(-1, memory_bank_size) # [batch_size*max_input_seq_len, memory_bank_size]
encoder_feature = self.memory_project(memory_bank_) # [batch_size*max_input_seq_len, decoder size]
# project decoder state
dec_feature = self.decode_project(decoder_state) # [batch_size, decoder_size]
dec_feature_expanded = dec_feature.unsqueeze(1).expand(batch_size, max_input_seq_len,
decoder_size).contiguous()
dec_feature_expanded = dec_feature_expanded.view(-1,
decoder_size) # [batch_size*max_input_seq_len, decoder_size]
# sum up attention features
att_features = encoder_feature + dec_feature_expanded # [batch_size*max_input_seq_len, decoder_size]
# Apply coverage
if self.coverage_attn:
coverage_input = coverage.view(-1, 1) # [batch_size*max_input_seq_len, 1]
coverage_feature = self.coverage_project(coverage_input) # [batch_size*max_input_seq_len, decoder_size]
# print(coverage.size())
# print(coverage_feature.size())
# print(att_features.size())
att_features = att_features + coverage_feature
# compute attention score and normalize them
e = self.tanh(att_features) # [batch_size*max_input_seq_len, decoder_size]
scores = self.v(e) # [batch_size*max_input_seq_len, 1]
scores = scores.view(-1, max_input_seq_len) # [batch_size, max_input_seq_len]
return scores
def forward(self, decoder_state, memory_bank, src_mask=None, coverage=None):
"""
:param decoder_state: [batch_size, decoder_size]
:param memory_bank: [batch_size, max_input_seq_len, self.num_directions * self.encoder_size]
:param src_mask: [batch_size, max_input_seq_len]
:param coverage: [batch_size, max_input_seq_len]
:return: context: [batch_size, self.num_directions * self.encoder_size], attn_dist: [batch_size, max_input_seq_len], coverage: [batch_size, max_input_seq_len]
"""
# init dimension info
batch_size, max_input_seq_len, memory_bank_size = list(memory_bank.size())
# decoder_size = decoder_state.size(1)
if src_mask is None: # if it does not supply a source mask, create a dummy mask with all ones
src_mask = memory_bank.new_ones(batch_size, max_input_seq_len)
"""
# project memory_bank
memory_bank = memory_bank.view(-1, memory_bank_size) # [batch_size*max_input_seq_len, memory_bank_size]
encoder_feature = self.memory_project(memory_bank) # [batch_size*max_input_seq_len, decoder size]
# project decoder state
dec_feature = self.decode_project(decoder_state) # [batch_size, decoder_size]
dec_feature_expanded = dec_feature.unsqueeze(1).expand(batch_size, max_input_seq_len, decoder_size).contiguous()
dec_feature_expanded = dec_feature_expanded.view(-1, decoder_size) # [batch_size*max_input_seq_len, decoder_size]
# sum up attention features
att_features = encoder_feature + dec_feature_expanded # [batch_size*max_input_seq_len, decoder_size]
# Apply coverage
if self.coverage_attn:
coverage_input = coverage.view(-1, 1) # [batch_size*max_input_seq_len, 1]
coverage_feature = self.coverage_project(coverage_input) # [batch_size*max_input_seq_len, decoder_size]
#print(coverage.size())
#print(coverage_feature.size())
#print(att_features.size())
att_features = att_features + coverage_feature
# compute attention score and normalize them
e = self.tanh(att_features) # [batch_size*max_input_seq_len, decoder_size]
scores = self.v(e) # [batch_size*max_input_seq_len, 1]
scores = scores.view(-1, max_input_seq_len) # [batch_size, max_input_seq_len]
"""
scores = self.score(memory_bank, decoder_state, coverage)
attn_dist = self.softmax(scores, mask=src_mask)
# Compute weighted sum of memory bank features
attn_dist = attn_dist.unsqueeze(1) # [batch_size, 1, max_input_seq_len]
memory_bank = memory_bank.view(-1, max_input_seq_len,
memory_bank_size) # batch_size, max_input_seq_len, memory_bank_size]
context = torch.bmm(attn_dist, memory_bank) # [batch_size, 1, memory_bank_size]
context = context.squeeze(1) # [batch_size, memory_bank_size]
attn_dist = attn_dist.squeeze(1) # [batch_size, max_input_seq_len]
# Update coverage
if self.coverage_attn:
coverage = coverage.view(-1, max_input_seq_len)
coverage = coverage + attn_dist
assert coverage.size() == torch.Size([batch_size, max_input_seq_len])
assert attn_dist.size() == torch.Size([batch_size, max_input_seq_len])
assert context.size() == torch.Size([batch_size, memory_bank_size])
return context, attn_dist, coverage
class TopicAttention(nn.Module):
def __init__(self, decoder_size, memory_bank_size, coverage_attn, attn_mode, topic_num):
super(TopicAttention, self).__init__()
# attention
if attn_mode == "concat":
self.v = nn.Linear(decoder_size, 1, bias=False)
self.decode_project = nn.Linear(decoder_size, decoder_size)
self.topic_num = topic_num
self.memory_project = nn.Linear(memory_bank_size, decoder_size, bias=False)
self.topic_project = nn.Linear(topic_num, decoder_size, bias=False)
self.coverage_attn = coverage_attn
if coverage_attn:
self.coverage_project = nn.Linear(1, decoder_size, bias=False)
self.softmax = MaskedSoftmax(dim=1)
# self.softmax = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
self.attn_mode = attn_mode
def score(self, memory_bank, decoder_state, topic_represent, coverage=None):
"""
:param memory_bank: [batch_size, max_input_seq_len, self.num_directions * self.encoder_size]
:param decoder_state: [batch_size, decoder_size]
:param topic_represent: [batch_size, topic_num]
:param coverage: [batch_size, max_input_seq_len]
:return: score: [batch_size, max_input_seq_len]
"""
batch_size, max_input_seq_len, memory_bank_size = list(memory_bank.size())
decoder_size = decoder_state.size(1)
if self.attn_mode == "general":
# project memory_bank
memory_bank_ = memory_bank.view(-1,
memory_bank_size) # [batch_size*max_input_seq_len, memory_bank_size]
encoder_feature = self.memory_project(memory_bank_) # [batch_size*max_input_seq_len, decoder size]
if self.coverage_attn:
coverage_input = coverage.view(-1, 1) # [batch_size*max_input_seq_len, 1]
encoder_feature += self.coverage_project(coverage_input) # [batch_size*max_input_seq_len, decoder_size]
encoder_feature = self.tanh(encoder_feature)
# expand decoder state
decoder_state_expanded = decoder_state.unsqueeze(1).expand(batch_size, max_input_seq_len,
decoder_size).contiguous()
decoder_state_expanded = decoder_state_expanded.view(-1,
decoder_size) # [batch_size*max_input_seq_len, decoder_size]
# Perform bi-linear operation
scores = torch.bmm(decoder_state_expanded.unsqueeze(1),
encoder_feature.unsqueeze(2)) # [batch_size*max_input_seq_len, 1, 1]
else: # Bahdanau style attention
# project memory_bank
memory_bank_ = memory_bank.view(-1, memory_bank_size) # [batch_size*max_input_seq_len, memory_bank_size]
encoder_feature = self.memory_project(memory_bank_) # [batch_size*max_input_seq_len, decoder size]
# project decoder state
topic_feature = self.topic_project(topic_represent) # [batch_size, decoder_size]
topic_feature_expanded = topic_feature.unsqueeze(1).expand(batch_size, max_input_seq_len,
decoder_size).contiguous()
topic_feature_expanded = topic_feature_expanded.view(-1,
decoder_size) # [batch_size*max_input_seq_len, decoder_size]
dec_feature = self.decode_project(decoder_state) # [batch_size, decoder_size]
dec_feature_expanded = dec_feature.unsqueeze(1).expand(batch_size, max_input_seq_len,
decoder_size).contiguous()
dec_feature_expanded = dec_feature_expanded.view(-1,
decoder_size) # [batch_size*max_input_seq_len, decoder_size]
# sum up attention features
att_features = encoder_feature + dec_feature_expanded + topic_feature_expanded # [batch_size*max_input_seq_len, decoder_size]
# Apply coverage
if self.coverage_attn:
coverage_input = coverage.view(-1, 1) # [batch_size*max_input_seq_len, 1]
coverage_feature = self.coverage_project(coverage_input) # [batch_size*max_input_seq_len, decoder_size]
# print(coverage.size())
# print(coverage_feature.size())
# print(att_features.size())
att_features = att_features + coverage_feature
# compute attention score and normalize them
e = self.tanh(att_features) # [batch_size*max_input_seq_len, decoder_size]
scores = self.v(e) # [batch_size*max_input_seq_len, 1]
scores = scores.view(-1, max_input_seq_len) # [batch_size, max_input_seq_len]
return scores
def forward(self, decoder_state, memory_bank, topic_represent, src_mask=None, coverage=None):
"""
:param decoder_state: [batch_size, decoder_size]
:param memory_bank: [batch_size, max_input_seq_len, self.num_directions * self.encoder_size]
:param src_mask: [batch_size, max_input_seq_len]
:param coverage: [batch_size, max_input_seq_len]
:return: context: [batch_size, self.num_directions * self.encoder_size], attn_dist: [batch_size, max_input_seq_len], coverage: [batch_size, max_input_seq_len]
"""
# init dimension info
batch_size, max_input_seq_len, memory_bank_size = list(memory_bank.size())
if src_mask is None: # if it does not supply a source mask, create a dummy mask with all ones
src_mask = memory_bank.new_ones(batch_size, max_input_seq_len)
scores = self.score(memory_bank, decoder_state, topic_represent, coverage)
attn_dist = self.softmax(scores, mask=src_mask)
# Compute weighted sum of memory bank features
attn_dist = attn_dist.unsqueeze(1) # [batch_size, 1, max_input_seq_len]
memory_bank = memory_bank.view(-1, max_input_seq_len,
memory_bank_size) # batch_size, max_input_seq_len, memory_bank_size]
context = torch.bmm(attn_dist, memory_bank) # [batch_size, 1, memory_bank_size]
context = context.squeeze(1) # [batch_size, memory_bank_size]
attn_dist = attn_dist.squeeze(1) # [batch_size, max_input_seq_len]
# Update coverage
if self.coverage_attn:
coverage = coverage.view(-1, max_input_seq_len)
coverage = coverage + attn_dist
assert coverage.size() == torch.Size([batch_size, max_input_seq_len])
assert attn_dist.size() == torch.Size([batch_size, max_input_seq_len])
assert context.size() == torch.Size([batch_size, memory_bank_size])
return context, attn_dist, coverage
class RNNDecoder(nn.Module):
def __init__(self, vocab_size, embed_size, hidden_size, num_layers, memory_bank_size, coverage_attn, copy_attn,
review_attn, pad_idx, attn_mode, dropout=0.0, use_topic_represent=False, topic_attn=False,
topic_attn_in=False, topic_copy=False, topic_dec=False, topic_num=50):
super(RNNDecoder, self).__init__()
self.use_topic_represent = use_topic_represent
self.topic_attn = topic_attn
self.topic_attn_in = topic_attn_in
self.topic_copy = topic_copy
self.topic_dec = topic_dec
self.topic_num = topic_num
self.embed_size = embed_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.vocab_size = vocab_size
self.memory_bank_size = memory_bank_size
self.dropout = nn.Dropout(dropout)
self.coverage_attn = coverage_attn
self.copy_attn = copy_attn
self.review_attn = review_attn
self.pad_token = pad_idx
self.embedding = nn.Embedding(
self.vocab_size,
self.embed_size,
self.pad_token
)
self.input_size = embed_size
if use_topic_represent:
if topic_dec:
self.input_size = embed_size + topic_num
self.rnn = nn.GRU(input_size=self.input_size, hidden_size=hidden_size, num_layers=num_layers,
bidirectional=False, batch_first=False, dropout=dropout)
if topic_attn_in:
self.attention_layer = TopicAttention(
decoder_size=hidden_size,
memory_bank_size=memory_bank_size,
coverage_attn=coverage_attn,
attn_mode=attn_mode,
topic_num=topic_num
)
else:
self.attention_layer = Attention(
decoder_size=hidden_size,
memory_bank_size=memory_bank_size,
coverage_attn=coverage_attn,
attn_mode=attn_mode
)
if copy_attn:
if topic_copy:
self.p_gen_linear = nn.Linear(embed_size + hidden_size + memory_bank_size + topic_num, 1)
else:
self.p_gen_linear = nn.Linear(embed_size + hidden_size + memory_bank_size, 1)
self.sigmoid = nn.Sigmoid()
if topic_attn:
self.vocab_dist_linear_1 = nn.Linear(hidden_size + memory_bank_size + topic_num, hidden_size)
else:
self.vocab_dist_linear_1 = nn.Linear(hidden_size + memory_bank_size, hidden_size)
self.vocab_dist_linear_2 = nn.Linear(hidden_size, vocab_size)
self.softmax = MaskedSoftmax(dim=1)
def forward(self, y, topic_represent, h, memory_bank, src_mask, max_num_oovs, src_oov, coverage):
"""
:param y: [batch_size]
:param h: [num_layers, batch_size, decoder_size]
:param memory_bank: [batch_size, max_src_seq_len, memory_bank_size]
:param src_mask: [batch_size, max_src_seq_len]
:param max_num_oovs: int
:param src_oov: [batch_size, max_src_seq_len]
:param coverage: [batch_size, max_src_seq_len]
:return:
"""
batch_size, max_src_seq_len = list(src_oov.size())
assert y.size() == torch.Size([batch_size])
if self.use_topic_represent:
assert topic_represent.size() == torch.Size([batch_size, self.topic_num])
assert h.size() == torch.Size([self.num_layers, batch_size, self.hidden_size])
# init input embedding
y_emb = self.embedding(y).unsqueeze(0) # [1, batch_size, embed_size]
if self.use_topic_represent and self.topic_dec:
rnn_input = torch.cat([y_emb, topic_represent.unsqueeze(0)], dim=2)
else:
rnn_input = y_emb
_, h_next = self.rnn(rnn_input, h)
assert h_next.size() == torch.Size([self.num_layers, batch_size, self.hidden_size])
last_layer_h_next = h_next[-1, :, :] # [batch, decoder_size]
# apply attention, get input-aware context vector, attention distribution and update the coverage vector
if self.topic_attn_in:
context, attn_dist, coverage = self.attention_layer(last_layer_h_next, memory_bank, topic_represent,
src_mask, coverage)
else:
context, attn_dist, coverage = self.attention_layer(last_layer_h_next, memory_bank, src_mask, coverage)
# context: [batch_size, memory_bank_size]
# attn_dist: [batch_size, max_input_seq_len]
# coverage: [batch_size, max_input_seq_len]
assert context.size() == torch.Size([batch_size, self.memory_bank_size])
assert attn_dist.size() == torch.Size([batch_size, max_src_seq_len])
if self.coverage_attn:
assert coverage.size() == torch.Size([batch_size, max_src_seq_len])
if self.topic_attn:
vocab_dist_input = torch.cat((context, last_layer_h_next, topic_represent), dim=1)
# [B, memory_bank_size + decoder_size + topic_num]
else:
vocab_dist_input = torch.cat((context, last_layer_h_next), dim=1)
# [B, memory_bank_size + decoder_size]
vocab_dist = self.softmax(self.vocab_dist_linear_2(self.dropout(self.vocab_dist_linear_1(vocab_dist_input))))
p_gen = None
if self.copy_attn:
if self.topic_copy:
p_gen_input = torch.cat((context, last_layer_h_next, y_emb.squeeze(0), topic_represent),
dim=1) # [B, memory_bank_size + decoder_size + embed_size]
else:
p_gen_input = torch.cat((context, last_layer_h_next, y_emb.squeeze(0)),
dim=1) # [B, memory_bank_size + decoder_size + embed_size]
p_gen = self.sigmoid(self.p_gen_linear(p_gen_input))
vocab_dist_ = p_gen * vocab_dist
attn_dist_ = (1 - p_gen) * attn_dist
if max_num_oovs > 0:
extra_zeros = vocab_dist_.new_zeros((batch_size, max_num_oovs))
vocab_dist_ = torch.cat((vocab_dist_, extra_zeros), dim=1)
final_dist = vocab_dist_.scatter_add(1, src_oov, attn_dist_)
assert final_dist.size() == torch.Size([batch_size, self.vocab_size + max_num_oovs])
else:
final_dist = vocab_dist
assert final_dist.size() == torch.Size([batch_size, self.vocab_size])
return final_dist, h_next, context, attn_dist, p_gen, coverage
|
992,010 | a2d0e9066784174706f6f3f67bdd7ad7fcafa7e1 | # Created by Justin Lowe
import pyodbc
import csv
connection = pyodbc.connect("Driver={SQL Server Native Client 10.0};"
"Server=PETEST,1433;"
"Database=PEWarehouse;"
"Trusted_Connection=yes;")
#Trusted connection to force AD authentication.
cursor = connection.cursor()
cursor.execute("SELECT FullDate,ClientCode,REPLACE(ClientName,',','') ClientName,REPLACE([Service Category],',','') ServiceCategory,REPLACE([Service Title],',','') ServiceTitle ,REPLACE(RelationshipPartner,',','') RelationshipPartner ,REPLACE(JobStatus,',','') JobStatus,REPLACE(JobName,',','') JobName ,REPLACE(JobManager,',','') JobManager,REPLACE(StaffName,',','') StaffName,BusinessUnit,SUM(Hours) Hours,SUM(Cost) Cost,SUM(Billed) Billed FROM (SELECT FullDate,c.[Service Category],c.[Service Title],[Client Code] ClientCode, [Client Name] ClientName,[Client Group] ClientGroup, a.ClientKey ClientKey,[Job Code] JobCode ,[Job Name] JobName,[Job Status] JobStatus,Partner RelationshipPartner,[Job Partner] JobPartner,[Manager Name] JobManager,[Staff Name] StaffName,case when d.[Partner Office] = 'Flemington' THEN 'FLEM' ELSE 'WISS' END as BusinessUnit,Hours,case when TransTypeIndex IN (1,2) THEN Amount else 0 end Cost,case when TransTypeIndex IN (3,6,8) THEN Amount * -1 else 0 end Billed FROM FactWorkInProgress a INNER JOIN DimDate b ON a.DateKey = b.DateKey INNER JOIN DimService c ON a.ServiceKey = c.ServiceKey INNER JOIN DimClient d ON a.ClientKey = d.ClientKey INNER JOIN DimPeriod e ON a.PeriodKey = e.PeriodKey INNER JOIN DimTransType f ON a.TransTypeKey = f.TransTypeKey INNER JOIN DimJob j ON a.JobKey = j.JobKey INNER JOIN DimManager m ON a.ManagerKey = m.ManagerKey INNER JOIN DimStaff s ON a.StaffKey = s.StaffKey Where FullDate > '2017-01-01') X GROUP BY FullDate,ClientCode,ClientName,[Service Category],[Service Title] ,RelationshipPartner,JobStatus,JobName,JobManager,StaffName,BusinessUnit")
data=cursor.fetchall()
with open("./PEExport.csv", "w",newline='') as fp:
a= csv.writer(fp, delimiter=',')
for line in data:
a.writerow(line)
cursor.close()
connection.close()
|
992,011 | 2919a119611035c1f410e5b6371055066194d4c4 | import unittest
from homework7852.process_covid import (load_covid_data,
cases_per_population_by_age,
hospital_vs_confirmed,
create_confirmed_plot,
count_high_rain_low_tests_days,
compute_running_average)
# global file path
file = r"C:/Users/Administrator/Desktop/7820/ER-Mi-EV_2020-03-16_2020-04-24.json"
class CovidTest(unittest.TestCase):
def testLoadData(self):
""" test function load_covid_data() return dict or not """
data = load_covid_data(file)
assert type(data).__name__ == 'dict'
def testhospital_vs_confirmed(self):
""" the expected output even when some values are missing"""
data = load_covid_data(file)
aim_day = data['evolution']['2020-03-16']
# Artificial cut one value , it supposed to be 4 number
aim_day['epidemiology']['confirmed']['total']['age'] = [10, 11, 12]
try:
cases_population = cases_per_population_by_age(data)
except Exception as e:
raise Exception
def testgenerate_data_plot_confirm(self):
data = load_covid_data(file)
create_confirmed_plot(data, sex=4)
def testcompute_running_average(self):
data = [0, 1, 5, 2, 2, 5]
res = compute_running_average(data, 3)
assert res == [None, 2.0, 2.667, 3.0, 3.0, None]
res = compute_running_average(data, 4)
assert res == [None, None, None, 2.0, 2.5, 3.5]
|
992,012 | 8630a2b99ef6180526b33211d39c170bde50477a | import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
from torch_model_base import TorchModelBase
import utils
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2022"
class TorchShallowNeuralClassifier(TorchModelBase):
def __init__(self,
hidden_dim=50,
hidden_activation=nn.Tanh(),
**base_kwargs):
"""
A model
h = f(xW_xh + b_h)
y = softmax(hW_hy + b_y)
with a cross-entropy loss and f determined by `hidden_activation`.
Parameters
----------
hidden_dim : int
Dimensionality of the hidden layer.
hidden_activation : nn.Module
The non-activation function used by the network for the
hidden layer.
**base_kwargs
For details, see `torch_model_base.py`.
Attributes
----------
loss: nn.CrossEntropyLoss(reduction="mean")
self.params: list
Extends TorchModelBase.params with names for all of the
arguments for this class to support tuning of these values
using `sklearn.model_selection` tools.
"""
self.hidden_dim = hidden_dim
self.hidden_activation = hidden_activation
super().__init__(**base_kwargs)
self.loss = nn.CrossEntropyLoss(reduction="mean")
self.params += ['hidden_dim', 'hidden_activation']
def build_graph(self):
"""
Define the model's computation graph.
Returns
-------
nn.Module
"""
return nn.Sequential(
nn.Linear(self.input_dim, self.hidden_dim),
self.hidden_activation,
nn.Linear(self.hidden_dim, self.n_classes_))
def build_dataset(self, X, y=None):
"""
Define datasets for the model.
Parameters
----------
X : iterable of length `n_examples`
Each element must have the same length.
y: None or iterable of length `n_examples`
Attributes
----------
input_dim : int
Set based on `X.shape[1]` after `X` has been converted to
`np.array`.
Returns
-------
torch.utils.data.TensorDataset` Where `y=None`, the dataset will
yield single tensors `X`. Where `y` is specified, it will yield
`(X, y)` pairs.
"""
X = np.array(X)
self.input_dim = X.shape[1]
X = torch.FloatTensor(X)
if y is None:
dataset = torch.utils.data.TensorDataset(X)
else:
self.classes_ = sorted(set(y))
self.n_classes_ = len(self.classes_)
class2index = dict(zip(self.classes_, range(self.n_classes_)))
y = [class2index[label] for label in y]
y = torch.tensor(y)
dataset = torch.utils.data.TensorDataset(X, y)
return dataset
def score(self, X, y, device=None):
"""
Uses macro-F1 as the score function. Note: this departs from
`sklearn`, where classifiers use accuracy as their scoring
function. Using macro-F1 is more consistent with our course.
This function can be used to evaluate models, but its primary
use is in cross-validation and hyperparameter tuning.
Parameters
----------
X: np.array, shape `(n_examples, n_features)`
y: iterable, shape `len(n_examples)`
These can be the raw labels. They will converted internally
as needed. See `build_dataset`.
device: str or None
Allows the user to temporarily change the device used
during prediction. This is useful if predictions require a
lot of memory and so are better done on the CPU. After
prediction is done, the model is returned to `self.device`.
Returns
-------
float
"""
preds = self.predict(X, device=device)
return utils.safe_macro_f1(y, preds)
def predict_proba(self, X, device=None):
"""
Predicted probabilities for the examples in `X`.
Parameters
----------
X : np.array, shape `(n_examples, n_features)`
device: str or None
Allows the user to temporarily change the device used
during prediction. This is useful if predictions require a
lot of memory and so are better done on the CPU. After
prediction is done, the model is returned to `self.device`.
Returns
-------
np.array, shape `(len(X), self.n_classes_)`
Each row of this matrix will sum to 1.0.
"""
preds = self._predict(X, device=device)
probs = torch.softmax(preds, dim=1).cpu().numpy()
return probs
def predict(self, X, device=None):
"""
Predicted labels for the examples in `X`. These are converted
from the integers that PyTorch needs back to their original
values in `self.classes_`.
Parameters
----------
X : np.array, shape `(n_examples, n_features)`
device: str or None
Allows the user to temporarily change the device used
during prediction. This is useful if predictions require a
lot of memory and so are better done on the CPU. After
prediction is done, the model is returned to `self.device`.
Returns
-------
list, length len(X)
"""
probs = self.predict_proba(X, device=device)
return [self.classes_[i] for i in probs.argmax(axis=1)]
def simple_example():
"""Assess on the digits dataset."""
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
utils.fix_random_seeds()
digits = load_digits()
X = digits.data
y = digits.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
mod = TorchShallowNeuralClassifier()
print(mod)
mod.fit(X_train, y_train)
preds = mod.predict(X_test)
print("\nClassification report:")
print(classification_report(y_test, preds))
return accuracy_score(y_test, preds)
if __name__ == '__main__':
simple_example()
|
992,013 | a946d4fe5c1d33732ba4378c16e8e595132fe63d | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from awscli.testutils import unittest, mock
from awscli.autocomplete import completer, parser
from awscli.autocomplete.local import basic
from awscli.autocomplete.completer import CompletionResult
from tests.unit.autocomplete import InMemoryIndex
class TestAutoCompleter(unittest.TestCase):
def setUp(self):
self.parser = mock.Mock(spec=parser.CLIParser)
self.parsed_result = parser.ParsedResult()
self.parser.parse.return_value = self.parsed_result
def test_delegates_to_autocompleters(self):
mock_complete = mock.Mock(spec=completer.BaseCompleter)
expected = [
CompletionResult('ec2', -1),
CompletionResult('ecs', -1)
]
mock_complete.complete.return_value = expected
auto_complete = completer.AutoCompleter(
self.parser, completers=[mock_complete])
results = auto_complete.autocomplete('aws e')
self.assertEqual(results, expected)
self.parser.parse.assert_called_with('aws e', None)
mock_complete.complete.assert_called_with(self.parsed_result)
def test_stops_processing_when_list_returned(self):
first = mock.Mock(spec=completer.BaseCompleter)
second = mock.Mock(spec=completer.BaseCompleter)
first.complete.return_value = None
expected = [
CompletionResult('ec2', -1),
CompletionResult('ecs', -1)
]
second.complete.return_value = expected
auto_complete = completer.AutoCompleter(
self.parser, completers=[first, second])
self.assertEqual(auto_complete.autocomplete('aws e'), expected)
first.complete.assert_called_with(self.parsed_result)
second.complete.assert_called_with(self.parsed_result)
def test_returns_empty_list_if_no_completers_have_results(self):
first = mock.Mock(spec=completer.BaseCompleter)
second = mock.Mock(spec=completer.BaseCompleter)
first.complete.return_value = None
second.complete.return_value = None
auto_complete = completer.AutoCompleter(
self.parser, completers=[first, second])
self.assertEqual(auto_complete.autocomplete('aws e'), [])
first.complete.assert_called_with(self.parsed_result)
second.complete.assert_called_with(self.parsed_result)
def test_first_result_wins(self):
first = mock.Mock(spec=completer.BaseCompleter)
second = mock.Mock(spec=completer.BaseCompleter)
first.complete.return_value = [CompletionResult('ec2', -1)]
second.complete.return_value = [CompletionResult('ecs', -1)]
auto_complete = completer.AutoCompleter(
self.parser, completers=[first, second])
self.assertEqual(
auto_complete.autocomplete('aws e'),
[CompletionResult('ec2', -1)]
)
first.complete.assert_called_with(self.parsed_result)
self.assertFalse(second.complete.called)
class TestModelIndexCompleter(unittest.TestCase):
def setUp(self):
self.index = InMemoryIndex({
'command_names': {
'': ['aws'],
'aws': ['ec2', 'ecs', 's3'],
'aws.ec2': ['describe-instances'],
},
'arg_names': {
'': {
'aws': ['region', 'endpoint-url'],
},
'aws.ec2': {
'describe-instances': [
'instance-ids', 'reserve', 'positional'],
}
},
'arg_data': {
'': {
'aws': {
'endpoint-url': ('endpoint-url', 'string',
'aws', '', None, False),
'region': ('region', 'string', 'aws', '', None, False),
}
},
'aws.ec2': {
'describe-instances': {
'instance-ids': (
'instance-ids', 'string',
'describe-instances', 'aws.ec2.', None, False),
'reserve': (
'reserve', 'string',
'describe-instances', 'aws.ec2.', None, False),
'positional': (
'positional', 'string',
'describe-instances', 'aws.ec2.', None, True),
}
}
}
})
self.parser = parser.CLIParser(self.index)
self.completer = basic.ModelIndexCompleter(self.index)
def test_does_not_complete_if_unparsed_items(self):
parsed = self.parser.parse('aws foo ')
self.assertIsNone(self.completer.complete(parsed))
def test_does_complete_if_current_fragment_is_none(self):
parsed = self.parser.parse('aws')
self.assertIsNone(self.completer.complete(parsed))
def test_can_prefix_match_services(self):
parsed = parser.ParsedResult(
current_command='aws', lineage=[],
current_fragment='e',
)
parsed = self.parser.parse('aws e')
expected = [
# The -1 is because we need to replace the string starting
# 1 character back (the last fragment is the string 'e').
CompletionResult('ec2', starting_index=-1),
CompletionResult('ecs', starting_index=-1),
]
self.assertEqual(self.completer.complete(parsed), expected)
def test_returns_all_results_when_current_fragment_empty(self):
parsed = self.parser.parse('aws ')
expected = [
# The -1 is because we need to replace the string starting
# 1 character back (the last fragment is the string 'e').
CompletionResult('ec2', starting_index=0),
CompletionResult('ecs', starting_index=0),
CompletionResult('s3', starting_index=0),
]
self.assertEqual(self.completer.complete(parsed), expected)
def test_can_autocomplete_global_param(self):
parsed = self.parser.parse('aws --re')
self.assertEqual(
self.completer.complete(parsed),
[CompletionResult('--region', -4)]
)
def test_can_combine_global_and_command_params(self):
parsed = self.parser.parse('aws ec2 describe-instances --r')
self.assertEqual(
self.completer.complete(parsed),
[CompletionResult('--reserve', -3),
CompletionResult('--region', -3)]
)
def test_no_autocompletions_if_nothing_matches(self):
parsed = self.parser.parse('aws --foo')
self.assertEqual(self.completer.complete(parsed), [])
def test_no_complete_positional_arguments(self):
parsed = self.parser.parse('aws ec2 describe-instances --pos')
self.assertEqual(self.completer.complete(parsed), [])
|
992,014 | 2547d444750950223d15c839502367879d83c27c | #!/usr/bin/env python
from resource_management import *
# server configurations
config = Script.get_config()
ds_password = config['configurations']['freeipa-config']['freeipa.server.ds.password']
admin_password = config['configurations']['freeipa-config']['freeipa.server.admin.password']
master_password = config['configurations']['freeipa-config']['freeipa.server.master.password']
server_hostname = config['configurations']['freeipa-config']['freeipa.server.hostname']
server_domain = config['configurations']['freeipa-config']['freeipa.server.domain']
server_realm = config['configurations']['freeipa-config']['freeipa.server.realm']
dns_setup = config['configurations']['freeipa-config']['freeipa.server.dns.setup']
dns_forwarder = config['configurations']['freeipa-config']['freeipa.server.dns.forwarder']
|
992,015 | 0f3b7aabdcc7b907764bfa2fd1a8136854d29a16 | import unittest
import numpy as np
from day8 import split_to_layers, corruption_check, decode_image
class MyTestCase(unittest.TestCase):
def test_splitter(self):
data = [1,2,3,4,5,6,7,8,9,0,1,2]
layers = split_to_layers(data, 3, 2)
self.assertEqual(2, len(layers))
self.assertEqual(6, len(layers[0]))
def test_imagedecoder(self):
data = '0222112222120000'
expected = np.array(list(' ## ')).reshape((2, 2))
self.assertEqual(True, (expected == decode_image(data, 2, 2)[1]).all()) # comparing numpy arrays
if __name__ == '__main__':
unittest.main()
|
992,016 | 0679311d0218ac077d0a57702c288dbc32c77b98 | import torch
import random
import cv2
import numpy as np
import matplotlib.pyplot as plt
from collections import namedtuple, deque
class ReplayMemory(object):
def __init__(self, max_size, batch_size, seed, device):
'''Initialise a Replay Buffer
max_size: max size of buffer
batch_size: size of each training batch
seed: random seed
device: GPU or CPU
'''
self.buffer = deque(maxlen = max_size)
self.batch_size = batch_size
self.transition = namedtuple('Transition' ,field_names=('st', 'act', 'r', 'n_s' ,'d'))
self.seed = random.seed(seed)
self.device = device
self.orderfunc = [lambda x: torch.from_numpy(np.array(x)).float().to(self.device),
lambda x: torch.from_numpy(np.array(x)).long().to(self.device),
lambda x: torch.from_numpy(np.array(x)).float().to(self.device),
lambda x: torch.from_numpy(np.array(x)).float().to(self.device),
lambda x: torch.from_numpy(np.array(x).astype(np.uint8)).float().to(self.device)]
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.transition(state, action, reward, next_state, done)
self.buffer.append(e)
def load(self, state):
''' Loads Memory from Prior Training'''
b,bat,seed = state
self.buffer=b
self.batch_size = bat
self.seed = seed
def sample(self):
''' Randomly sample a batch of experiences from memory'''
#Get a sample
exp = random.sample(self.buffer ,k = self.batch_size)
#Return the sample in the correct format
return (self.orderfunc[i](v) for i,v in enumerate(zip(*exp)))
# print(tuple(result))
# states = torch.from_numpy(np.array([e.st for e in exp if e is not None])).float().to(self.device)
# actions = torch.from_numpy(np.array([e.act for e in exp if e is not None])).long().to(self.device)
# rewards = torch.from_numpy(np.array([e.r for e in exp if e is not None])).float().to(self.device)
# next_states = torch.from_numpy(np.array([e.n_s for e in exp if e is not None])).float().to(self.device)
# done = torch.from_numpy(np.array([e.d for e in exp if e is not None]).astype(np.uint8)).float().to(self.device)
# result = (states, actions, rewards, next_states, done)
# print(result)
def __len__(self):
'''Return the current size of memory'''
return len(self.buffer)
def preprocess_frame(state, output):
''' Preprocessing the frame from RGB -> Greyscale'''
state = cv2.cvtColor(state, cv2.COLOR_RGB2GRAY)
state = np.ascontiguousarray(state, dtype=np.float32) / 255
state = cv2.resize(state,output).T
return state
def stack_frame(stacked_frames, frame, is_new):
"""Stacking Frames.
Params
======
stacked_frames (array): Four Channel Stacked Frame
frame: Preprocessed Frame to be added
is_new: Is the state First
"""
if is_new:
stacked_frames = np.stack(arrays=[frame, frame, frame, frame])
else:
stacked_frames[:-1] = stacked_frames[1:]
stacked_frames[-1] = frame
return stacked_frames
def load_obj(path,device):
return torch.load(path, map_location=device)
def display_preprocessed(env,frame):
"""Plot the preprocessed frame"""
env.reset()
#Plot the figure
plt.figure()
#Show the pre processed frame
plt.imshow(preprocess_frame(env.reset(), (0, 0, 0, 0), 84), cmap="gray")
#Add title
plt.title('Pre Processed image')
#Show the plot
plt.show()
def get_filename() -> str:
#Ask for file to be saved
filename = input(f'Please input the filename to save: ')
#Check if it has the correct extension
if filename[-4:] != '.pth':
filename += '.pth'
return filename
if __name__ == '__main__':
mem = ReplayMemory(1000, 4, 123, 'cuda')
for _ in range(1000):
mem.add(1,2,3,4,5)
mem.sample() |
992,017 | fda664c7e505d8362fbc9908f3e38a9fa1d2651a | # Generated by Django 2.1.2 on 2018-10-29 07:31
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20181029_0729'),
]
operations = [
migrations.AlterField(
model_name='housedetails',
name='contact_no',
field=models.CharField(max_length=10, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 10 digits allowed.", regex='^\\+?1?\\d{6,10}$')]),
),
]
|
992,018 | cfd2ff89c8cfd0db3f419c8529b8091721f1acc6 | '''import urllib.request
from bs4 import BeautifulSoup
import re
import time
import random
class CommanClass:
def __init__(self):
self.header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
}
self.testurl = "www.baidu.com"
def getresponse(self,url):
req= urllib.request.Request(url,headers=self.header)
resp = urllib.request.urlopen(req,timeout=5)
content = resp.read()
return content
def _is_alive(self,proxy):
try:
resp =0
for i in range(3):
proxy_support = urllib.request.ProxyHandler({"http":proxy})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
req = urllib.request.Request(self.url,headers=self.header)
resp = urllib.request.urlopen(req,timeout=5)
if resp == 200:
return True
except:
return False
class ProxyPool:
def __init__(self,proxy_finder):
self.pool = []
self.proxy_finder = proxy_finder
self.cominstan =CommanClass()
def get_proxies(self):
self.pool=self.proxy_finder.find()
for p in self.pool:
if self.cominstan._is_alive(p):
continue
else:
return self.remove(p)
def get_one_proxy(self):
return random.choice(self.pool)
def writeToTxt(self,file_path):
try:
fp = open(file_path,"w+")
for item in self.pool:
fp.write(str(item)+"\n")
fp.close()
except IOError :
print("fail to open file")
#获取代理方法
#定义一个基类
class IProxyFinder:
def __init__(self):
self.pool = []
def find(self):
return
#西刺代理爬取
class XiciProxyFinder(IProxyFinder):
def __init__(self,url):
super(XiciProxyFinder, self).__init__()
self.url = url
self.cominstan = CommanClass()
def find(self):
for i in range(1,10):
content = self.cominstan.getresponse((self.url+str(i)))
soup = BeautifulSoup(content,'lxml')
ips = soup.findAll('tr')
for x in range(2,len(ips)):
ip = ips[x]
tds = ip.findAll("td")
if tds == []:
continue
ip_temp = tds[1].contents[0]+":"+tds[2].contents[0]
self.pool.append(ip_temp)
time.sleep(1)
return self.pool
if __name__ =='__main__':
finder = XiciProxyFinder("http://xicidaili.com/wn/")
ppool_instance = ProxyPool(finder)
ppool_instance.get_proxies()
ppool_instance.writeToTxt("proxy.txt")
'''
import requests
import chardet
import random
import time
from bs4 import BeautifulSoup
from telnetlib import Telnet
import progressbar
user_agent = [
"Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52"
]
def getHtmlWithHeader(url):
# 尝试获取网页内容,当获取失败的时候重新获取网页代码
# 当失败次数过多时停止获取 并输出提示信息
try:
# 获取响应内容
response = requests.get(
url,
headers={"User-Agent": random.choice(user_agent)}
)
# 获取编码
code = chardet.detect(response.content)["encoding"]
# 指定编码
response.encoding = code
# 输出文本
# print(response.text)
return response.text
except:
time.sleep(1)
global _times_count
_times_count += 1
if _times_count > 5:
print("ip获取失败,请稍后重试")
return
print("第", _times_count, "次尝试抓取")
return getHtmlWithHeader(url)
def getIP(num):
# 准备数据列表
datalist = []
# 准备 url
for num1 in range(num):
url = 'http://www.xicidaili.com/nn/' + str(num1 + 1)
# 获取返回数据
html = getHtmlWithHeader(url)
soup = BeautifulSoup(html, 'html.parser')
parent = soup.find(id="ip_list")
lis = parent.find_all('tr')
# 删除第一条数据
lis.pop(0)
print("爬取ip地址及相关信息")
for i in lis:
ip = i.find_all('td')[1].get_text()
dk = i.find_all('td')[2].get_text()
nm = i.find_all('td')[4].get_text()
ty = i.find_all('td')[5].get_text()
tm = i.find_all('td')[8].get_text()
datalist.append((ip, dk, nm, ty, tm))
print("共爬取到", len(datalist), "条数据\n")
# 将获取到的数据返回
return datalist
def filtrateIP(datalist):
datalist1 = []
# 对生存时间短的数据进行过滤
print('过滤存活时间短的\n')
for i in datalist:
if "分钟" not in i[4]:
datalist1.append(i)
# print(i)
print("共过滤掉", len(datalist) - len(datalist1), "条生存时间短的数据")
print("还剩", len(datalist1), "条数据\n")
# 对得到的数据进行测试,看是否可用
print('测试不可用的ip并将其过滤')
datalist.clear()
v = 1
p = progressbar.ProgressBar()
for i in p(datalist1):
# print("正在检测第"+str(v)+"条数据")
v += 1
try:
Telnet(i[0], i[1], timeout=1)
except:
pass
else:
datalist.append(i)
print('过滤不可用的ip')
print("共过滤掉", len(datalist1) - len(datalist), "条不可用数据")
print("还剩", len(datalist), "条数据")
# 将过滤后的数据返回
return datalist
def saveIP(datalist):
# 对得到的数据进行分类 http/https
httplist = []
httpslist = []
for i in datalist:
if i[3] == 'HTTP':
httplist.append('http://' + i[0] + ':' + i[1])
else:
httpslist.append('https://' + i[0] + ':' + i[1])
# 将显示结果显示到屏幕上
print("HTTP共" + str(len(httplist)) + "条数据")
print(httplist)
print("")
print("HTTPS共" + str(len(httpslist)) + "条数据")
print(httpslist)
print("")
print("写入文件")
# 打开文件
f = open('ip地址2.txt', 'w', encoding="utf-8")
# 写入文件
f.write("HTTP\n")
f.write(str(httplist) + "\n\n")
f.write("HTTPS\n")
f.write(str(httpslist))
# 关闭文件
f.close()
# num 为爬取的页数
def main(num):
datalist = getIP(50)
#IPlist = filtrateIP(datalist)
saveIP(datalist)
if __name__ == '__main__':
main(1)
|
992,019 | 7e4a8902e3668ed1701112ccb587a168456058b9 | # -*- coding: utf-8 -*-
"""Implements stereo camera calibration and rectify/undistort with a pair of CalibratedCamera objects.
"""
import cv2
import numpy as np
from .base import *
from .calibratedcamera import PinholeCamera
from EasyVision.vision import PyroCapture
import threading as mt
class StereoCamera(namedtuple('StereoCamera', 'left right R T E F Q')):
"""Stereo camera model that contains two pinhole cameras and transformation matrices between them.
Has these properties:
left - left camera intrinsics
right - right camera intrinsics
R - rotation matrix
T - translation vector
E - essential matrix
F - fundamental matrix
Q - disparity matrix
"""
def __new__(cls, left, right, R, T, E, F, Q):
if not isinstance(left, PinholeCamera):
raise ValueError("Left camera must be PinholeCamera")
if not isinstance(right, PinholeCamera):
raise ValueError("Right camera must be PinholeCamera")
if left.size != right.size:
raise ValueError("Left and Right camera width/height must match")
R = np.float64(R) if not isinstance(R, np.ndarray) and R is not None else R
T = np.float64(T) if not isinstance(T, np.ndarray) and T is not None else T
E = np.float64(E) if not isinstance(E, np.ndarray) and E is not None else E
F = np.float64(F) if not isinstance(F, np.ndarray) and F is not None else F
Q = np.float64(Q) if not isinstance(Q, np.ndarray) and Q is not None else Q
return super(StereoCamera, cls).__new__(cls, left, right, R, T, E, F, Q)
@staticmethod
def fromdict(as_dict):
"""Creates StereoCamera from a dict"""
left = PinholeCamera.fromdict(as_dict.pop('left'))
right = PinholeCamera.fromdict(as_dict.pop('right'))
return StereoCamera(left, right, **as_dict)
def todict(self):
"""Converts StereoCamera into a dict"""
d = {
"left": self.left.todict(),
"right": self.right.todict(),
"R": self.R.tolist(),
"T": self.T.tolist(),
"E": self.E.tolist(),
"F": self.F.tolist(),
"Q": self.Q.tolist(),
}
return d
@staticmethod
def from_parameters(size, M1, d1, R1, P1, M2, d2, R2, P2, R, T, E, F, Q):
"""Creates StereoCamera from parameters
:param size: Frame size tuple (width, height)
:param M1: Left camera matrix
:param d1: Left camera distortion coefficients
:param R1: Left camera rectification matrix
:param P1: Left camera projection matrix
:param M2: Right camera matrix
:param d2: Right camera distortion coefficients
:param R2: Right camera rectification matrix
:param P2: Right camera Projection matrix
:param R: Right camera rotation matrix
:param T: Right camera translation vector
:param E: Essential matrix
:param F: Fundamental matrix
:param Q: Disparity matrix
:return: StereoCamera
"""
return StereoCamera(
PinholeCamera(size, M1, d1, R1, P1),
PinholeCamera(size, M2, d2, R2, P2),
R, T, E, F, Q)
class CaptureThread(mt.Thread):
"""Capturing thread for a camera pair
TODO: Synchronization
"""
def __init__(self, vision):
super(CaptureThread, self).__init__()
self._vision = vision
self._running = False
self._capture = mt.Event()
self._ready = mt.Event()
self.frame = None
self._ready.clear()
self._capture.clear()
def capture_prepare(self):
self._ready.clear()
self._capture.set()
def capture_finish(self):
if not self._running:
return None
self._ready.wait()
return self.frame
def __getattr__(self, name):
return getattr(self._vision, name)
def run(self):
self._running = True
try:
self._ready.set()
while True:
if self._capture.wait():
if not self._running:
break
self.frame = self._vision.capture()
self._capture.clear()
self._ready.set()
except:
raise
finally:
self._running = False
self.frame = None
self._ready.set()
class CameraPairProxy(VisionBase):
"""Capturing proxy class
"""
def __init__(self, _self, left, right):
self._left = CaptureThread(left)
self._right = CaptureThread(right)
self._self = _self
super(CameraPairProxy, self).__init__()
def setup(self):
self._left._vision.setup()
self._right._vision.setup()
self._left.start()
self._right.start()
if not self._left._ready.wait():
raise TimeoutError()
if not self._right._ready.wait():
raise TimeoutError()
super(CameraPairProxy, self).setup()
def release(self):
self._left._running = False
self._right._running = False
self._left._capture.set()
self._right._capture.set()
self._left.join()
self._right.join()
self._left._vision.release()
self._right._vision.release()
super(CameraPairProxy, self).release()
def capture(self):
super(CameraPairProxy, self).capture()
self._left.capture_prepare()
self._right.capture_prepare()
left, right = self._left.capture_finish(), self._right.capture_finish()
if left is None or right is None:
return None
return left._replace(images=left.images + right.images)
def get_source(self, source):
return self._left.get_source(source), self._right.get_source(source)
def __getattr__(self, name):
return getattr(self._left, name), getattr(self._right, name)
@property
def is_open(self):
return self._left.is_open and self._right.is_open
@property
def frame_size(self):
return self._left.frame_size
@property
def fps(self):
return self._left.fps
@property
def name(self):
return "({} : {})".format(self._left._vision.name, self._right._vision.name)
@property
def frame_count(self):
return self._left.frame_count
@property
def path(self):
return "{} : {}".format(self._left.path, self._right.path)
@property
def description(self):
return "Stereo Pair Vision Proxy"
@property
def devices(self):
return self._left.devices
@property
def autoexposure(self):
return self._left.autoexposure, self._right.autoexposure
@property
def autofocus(self):
return self._left.autofocus, self._right.autofocus
@property
def autowhitebalance(self):
return self._left.autowhitebalance, self._right.autowhitebalance
@property
def autogain(self):
return self._left.autogain, self._right.autogain
@property
def exposure(self):
return self._left.exposure, self._right.exposure
@property
def focus(self):
return self._left.focus, self._right.focus
@property
def whitebalance(self):
return self._left.whitebalance, self._right.whitebalance
@property
def gain(self):
return self._left.gain, self._right.gain
@autoexposure.setter
def autoexposure(self, value):
self._left.autoexposure = value
self._right.autoexposure = value
@autofocus.setter
def autofocus(self, value):
self._left.autofocus = value
self._right.autofocus = value
@autowhitebalance.setter
def autowhitebalance(self, value):
self._left.autowhitebalance = value
self._right.autowhitebalance = value
@autogain.setter
def autogain(self, value):
self._left.autogain = value
self._right.autogain = value
@exposure.setter
def exposure(self, value):
self._left.exposure = value
self._right.exposure = value
@focus.setter
def focus(self, value):
self._left.focus = value
self._right.focus = value
@whitebalance.setter
def whitebalance(self, value):
self._left.whitebalance = value
self._right.whitebalance = value
@gain.setter
def gain(self, value):
self._left.gain = value
self._right.gain = value
class CalibratedStereoCamera(ProcessorBase):
"""Implements calibrated stereo camera calibration, rectification/undistort in conjuction with CalibratedCamera"""
def __init__(self, left, right, camera=None, calculate_disparity=False, num_disparities=255, block_size=15,
grid_shape=(9, 6), square_size=20, max_samples=20, frame_delay=1, *args, **kwargs):
"""CalibratedStereoCamera instance initialization
:param left: Left camera capturing source
:param right: Right camera capturing source
:param camera: StereoCamera object
:param calculate_disparity: flag indicating whether to calculate disparity map from stereo
:param num_disparities: Disparity map calculation parameter
:param block_size: Disparity map calculation parameter
:param grid_shape: Calibration grid shape
:param square_size: Calibration grid element size e.g. in mm.
:param max_samples: number of samples to capture for calibration
:param frame_delay: number of frames to skip
"""
calibrate = camera is None
if (not isinstance(left, ProcessorBase) and not isinstance(left, PyroCapture))or \
(not isinstance(right, ProcessorBase) and not isinstance(right, PyroCapture)) or \
left.get_source('CalibratedCamera') is None or right.get_source('CalibratedCamera') is None:
raise TypeError("Left/Right must have CalibratedCamera")
if not calibrate:
if not isinstance(camera, StereoCamera) and not (isinstance(camera, tuple) and len(camera) == 6):
raise TypeError("Camera must be either StereoCamera or tuple with (frame_size, camera_matrix, distortion)")
self._camera = camera
if not isinstance(left, PyroCapture):
left.get_source('CalibratedCamera').camera = camera.left
else:
left.remote_set('camera', camera.left)
if not isinstance(right, PyroCapture):
right.get_source('CalibratedCamera').camera = camera.right
else:
right.remote_set('camera', camera.right)
if left._calibrate or right._calibrate:
raise ValueError("Left and Right cameras must NOT be set to calibrate mode")
else:
if not left._calibrate or not right._calibrate:
raise ValueError("Left and Right cameras must be set to calibrate mode")
if left.get_source('FeatureExtraction'):
if not isinstance(left, PyroCapture):
left.get_source('FeatureExtraction').enabled = False
else:
left.remote_set('enabled', False)
if not isinstance(right, PyroCapture):
right.get_source('FeatureExtraction').enabled = False
else:
right.remote_set('enabled', False)
if not isinstance(left, PyroCapture):
left._grid_shape = grid_shape
left._square_size = square_size
else:
left.remote_set('_grid_shape', grid_shape)
left.remote_set('_square_size', square_size)
if not isinstance(right, PyroCapture):
right._grid_shape = grid_shape
right._square_size = square_size
else:
right.remote_set('_grid_shape', grid_shape)
right.remote_set('_square_size', square_size)
self._frame_delay = frame_delay
self._grid_shape = grid_shape
self._square_size = square_size
self._camera = None
self._stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5)
self._flags = 0
#self.flags |= cv2.CALIB_FIX_INTRINSIC
self._flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
#self.flags |= cv2.CALIB_USE_INTRINSIC_GUESS
self._flags |= cv2.CALIB_FIX_FOCAL_LENGTH
self._flags |= cv2.CALIB_FIX_ASPECT_RATIO
self._flags |= cv2.CALIB_ZERO_TANGENT_DIST
# self.flags |= cv2.CALIB_RATIONAL_MODEL
self._flags |= cv2.CALIB_SAME_FOCAL_LENGTH
self._flags |= cv2.CALIB_FIX_K3
self._flags |= cv2.CALIB_FIX_K4
self._flags |= cv2.CALIB_FIX_K5
self._max_samples = max_samples
self._last_timestamp = None
vision = CameraPairProxy(self, left, right)
self._calibrate = calibrate
self._calculate_disparity = calculate_disparity
self._num_disparities = num_disparities
self._block_size = block_size
super(CalibratedStereoCamera, self).__init__(vision, *args, **kwargs)
def setup(self):
if self._calibrate:
self.objp = np.zeros((np.prod(self._grid_shape), 3), np.float32)
self.objp[:, :2] = np.indices(self._grid_shape).T.reshape(-1, 2)
self.objp *= self._square_size
self.objpoints = []
self.imgpoints_l = []
self.imgpoints_r = []
self.calibration_samples = 0
if self._calculate_disparity:
self._stereoBM = cv2.StereoBM_create(self._num_disparities, self._block_size)
super(CalibratedStereoCamera, self).setup()
@property
def description(self):
return "Stereo Camera rectify processor"
@property
def camera(self):
return self._camera
@camera.setter
def camera(self, value):
if not isinstance(value, StereoCamera):
raise TypeError("Must be StereoCamera")
self._camera = value
self.source._left.camera = value.left
self.source._right.camera = value.right
def capture(self):
frame = super(CalibratedStereoCamera, self).capture()
if frame and self._calculate_disparity and not self._calibrate:
try:
left = cv2.cvtColor(frame.images[0].image, cv2.COLOR_BGR2GRAY)
right = cv2.cvtColor(frame.images[1].image, cv2.COLOR_BGR2GRAY)
except cv2.error:
left = frame.images[0].image
right = frame.images[1].image
disparity = self._stereoBM.compute(left, right)
if self.display_results:
disp = cv2.normalize(disparity, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
cv2.imshow("Disparity", disp)
img = Image(self, disparity)
frame = frame._replace(images=frame.images + (img,), processor_mask="110")
return frame
def process(self, image):
if self._calibrate:
# Draw and display the corners
ret, corners = image.features
if self.display_results:
img = cv2.drawChessboardCorners(image.image, self._grid_shape, corners, ret)
cv2.putText(img, "Samples added: {}/{}".format(self.calibration_samples, self._max_samples),
(20, 11), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1, 8)
cv2.imshow("Left" if image.source is self._vision._left._vision else "Right", img)
else:
# TODO: rectified images
img = image.image
if self.display_results:
cv2.imshow("Left" if image.source is self._vision._left._vision else "Right", img)
print (image.source, self._vision._left._vision)
return image
def calibrate(self):
"""Use this method for calibration instead of ``capture``. See ``CalibratedCamera`` as the usage is the same."""
if not self._calibrate:
raise ValueError("calibrate parameter must be set")
if self.calibration_samples >= self._max_samples:
return self._camera
frame = self.capture()
left = frame.images[0]
right = frame.images[1]
ret_l, corners_l = left.features
ret_r, corners_r = right.features
if self._last_timestamp is None:
self._last_timestamp = frame.timestamp
if ret_l is True and ret_r is True and (frame.timestamp - self._last_timestamp).total_seconds() > self._frame_delay:
self.objpoints.append(self.objp)
self.imgpoints_l.append(corners_l)
self.imgpoints_r.append(corners_r)
self.calibration_samples += 1
self._last_timestamp = frame.timestamp
if self.calibration_samples >= self._max_samples:
img_shape = left.image.shape[::-1]
self._camera = self._finish_calibration(self.objpoints, self.imgpoints_l, self.imgpoints_r, img_shape)
return self._camera
def _finish_calibration(self, objpoints, imgpoints_l, imgpoints_r, shape):
"""Helper method that is factored out in the same spirit as in ``CalibratedCamera``"""
left_camera = self.source._left._finish_calibration(objpoints, imgpoints_l, shape)
right_camera = self.source._right._finish_calibration(objpoints, imgpoints_r, shape)
ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate(
objpoints,
imgpoints_l, imgpoints_r,
left_camera.matrix, left_camera.distortion,
right_camera.matrix, right_camera.distortion,
shape,
criteria=self._stereocalib_criteria, flags=self._flags)
R1, R2, P1, P2, Q, vb1, vb2 = cv2.stereoRectify(
M1,
d1,
M2,
d2,
shape,
R,
T,
flags=cv2.CALIB_ZERO_DISPARITY)
left_camera = PinholeCamera(shape, M1, d1, R1, P1)
right_camera = PinholeCamera(shape, M2, d2, R2, P2)
return StereoCamera(left_camera, right_camera, R, T, E, F, Q)
|
992,020 | 8fcdbdc374e9c11e080432f0fc801becd35693aa | import numpy as np
def logmae(y_pred, y_true):
return np.log(np.max(np.abs(y_true - y_pred),1e-9))
|
992,021 | 5997d4b6b0c5f92bf36221a686fe721e264d0953 | #!/usr/bin/env python
# encoding=utf8
from __future__ import print_function
from flexbe_core import EventState, Logger
import rospy
from wm_nlu.srv import HKGetRoom
from std_msgs.msg import String
class SaraNLUgetRoom(EventState):
'''
Use wm_nlu to parse a sentence and return a room
># sentence string sentence to parse
#> ActionForms string[] list of ActionForms
<= understood Finished job.
<= not_understood Finished job but no commands detected.
<= fail service unavailable.
'''
def __init__(self):
# See example_state.py for basic explanations.
super(SaraNLUgetRoom, self).__init__(outcomes=['understood', 'not_understood', 'fail'], input_keys=['sentence'],output_keys=['answer'])
serviceName = "/get_room"
Logger.loginfo("waiting for service: "+serviceName)
rospy.wait_for_service(serviceName)
self.service = rospy.ServiceProxy(serviceName, HKGetRoom)
def execute(self, userdata):
# Call the NLU service
response = self.service(String(userdata.sentence))
# Checking the validity of the response
if response.str.data is "":
userdata.answer = response.str.data
return "fail"
userdata.answer = response.str.data
return "understood"
|
992,022 | 9db2b10736b873451e517bafdda95f3cd19caf70 | users = {
"12": "Alice",
"11": "Sasha",
1:"Masha"
}
key = "11"
user = users.pop("12")
print(users)
user = users.pop("1123374631", "Unknown")
print(user)
users.clear()
print(users)
|
992,023 | 1047fffa5ee7f537b9f7660526423ca79f8a4af0 | import imp
import argparse
import os
import gamesman
import random
def main():
parser = argparse.ArgumentParser(description='Play games')
parser.add_argument('game', help='The path to the game script to run.')
arg = parser.parse_args()
name = os.path.split(os.path.splitext(arg.game)[0])[-1]
game = imp.load_source(name, arg.game)
print("\n Welcome to GamesmanSpark " + name + "\n\n")
while (True):
play_against, ai_type, players_turn = game_setup()
game_loop(game, play_against, ai_type, players_turn)
if (end_game()):
break;
def game_setup():
play_against = -1
ai_type = -1
players_turn = -1
while (play_against not in [1, 2]):
play_against = int(raw_input("=== Play against computer or another player? ===\n" +
"1) computer\n" +
"2) player\n"))
if (play_against == 1):
while (ai_type not in [1, 2]):
ai_type = int(raw_input("=== Please choose an AI type ===\n" +
"1) Naive solver (Slow)\n" +
"2) Random do moves \n"))
print("============ Who move first? =============")
print("1) Computer first.")
print("2) I first.")
print("3) Random.")
players_turn = int(raw_input())
if (players_turn == 3):
players_turn = random.choice([1, 2])
if (players_turn == 1):
print("\n === Wait for computer's move === \n")
return (play_against-1, ai_type-1, players_turn-1)
def game_loop(game, play_against, ai_type, players_turn):
pos = game.initialPosition
while (True):
game.printBoard(pos)
if game.primitive(pos) is not gamesman.UNDECIDED:
if (players_turn):
print("==== You lose. ====")
else:
print("==== You win. ====")
break
if (players_turn):
player_move = (-1, -1)
validMoves = game.generateMoves(pos)
while (player_move not in validMoves):
print("Possible valid moves:" + str([game.toIndex(l) for l in validMoves]))
player_move = game.toLoc(int(raw_input("Please enter your move: ")))
pos = game.doMove(pos, player_move)
print("Your Move:" + str(player_move))
else:
print("\n === Wait for computer's move ===")
computer_moves = game.generateMoves(pos)
computer_move = -1
if ai_type == 0:
for m in computer_moves:
s = solve(game, game.doMove(pos, m))
if s is gamesman.LOSE:
computer_move = m
break
elif s is gamesman.TIE:
computer_move = m
if computer_move == -1:
computer_move = random.choice(computer_moves)
elif ai_type == 1:
computer_move = random.choice(computer_moves)
pos = game.doMove(pos, computer_move)
print("Computer's Move:" + str(computer_move) + "\n")
if not play_against:
players_turn = ~players_turn & 1
def end_game():
print("Play again? y/n")
user_input = raw_input()
if (user_input == "n"):
return 1
return 0
def solve(game, pos):
v = game.primitive(pos)
if v is not gamesman.UNDECIDED:
return v
else:
moves = game.generateMoves(pos)
vals = [solve(game, game.doMove(pos, m)) for m in moves]
if gamesman.LOSE in vals:
return gamesman.WIN
elif gamesman.TIE in vals:
return gamesman.TIE
else:
return gamesman.LOSE
if __name__ == '__main__':
main() |
992,024 | 38da98c8462ae4bcd4f6e46f577115af73258783 | from enum import Enum
import pygame
from sprites import make_outline_splites
from group import Group as MyGroup
class Screen(Enum):
START = 0
CHARACTER_SELECT = 1
STAGE_SELECT = 2
GAME = 3
RESULT = 4
OPTION = 5
QUIT = 6
class BaseScreen:
def __init__(self):
if not pygame.init():
pygame.init()
if not pygame.display.get_surface():
pygame.display.set_mode((500, 500))
pygame.display.set_caption("sample")
self.display = pygame.display.get_surface()
# self.front_sprites = pygame.sprite.Group()
# self.middle_sprites = pygame.sprite.Group()
# self.background_sprites = pygame.sprite.Group()
self.front_sprites = MyGroup()
self.middle_sprites = MyGroup()
self.background_sprites = MyGroup()
# self.groups = [pygame.sprite.Group() for i in range(9)]
self.fps = 60
self.delta_time = 1 / self.fps
self.clock = pygame.time.Clock()
self.run = True
self.next_screen = Screen.QUIT
self.key_downs = []
# @property
# def front_sprites(self) -> pygame.sprite.Group:
# return self.groups[7]
# @property
# def middle_sprites(self) -> pygame.sprite.Group:
# return self.groups[4]
# @property
# def background_sprites(self) -> pygame.sprite.Group:
# return self.groups[1]
def hoverable(self, rich_sprite, outline_image, group=None, border_width: int=5):
if group is None:
group = self.middle_sprites
outlines = make_outline_splites(rich_sprite.rect, outline_image, border_width=border_width)
rich_sprite.change_enter_fnc(group.add, (outlines,))
rich_sprite.change_exit_fnc(group.remove, (outlines,))
def get_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.run = False
elif event.type == pygame.KEYDOWN:
self.key_downs.append(event)
def empty_all_sprites(self):
self.background_sprites.empty()
self.middle_sprites.empty()
self.front_sprites.empty()
def update(self):
self.background_sprites.update()
self.middle_sprites.update()
self.front_sprites.update()
def draw(self):
self.display.fill((255, 255, 255))
self.background_sprites.draw(self.display)
self.middle_sprites.draw(self.display)
self.front_sprites.draw(self.display)
def main(self):
while self.run:
self.get_events()
self.update()
self.draw()
pygame.display.update()
self.clock.tick(self.fps)
# for group in self.groups:
# if len(group):
# group.draw(self.display) |
992,025 | 181d8ea743ca36656e810d461739edf85ef00c75 | import os
import tempfile
import pytest
from server import create_app
from server.db import get_db, init_db
with open(os.path.join(os.path.dirname(__file__), 'data.sql'), 'rb') as f:
_data_sql = f.read().decode('utf8')
@pytest.fixture
def app():
db_fd, dp_path = tempfile.mkstemp()
app = create_app({'TESTING': True, 'DATABASE': dp_path})
with app.app_context():
init_db()
get_db().executescript(_data_sql)
yield app
os.close(db_fd)
os.unlink(dp_path)
@pytest.fixture
def client(app):
# used to make requests to the app without actually running the server
return app.test_client()
@pytest.fixture
def runner(app):
# used to call Click commands registered with the app
return app.test_cli_runner()
class AuthActions(object):
"""
Since most functionality requires the user to login, this class factors out
the steps required to login so that things can happen for a given test.
"""
def __init__(self, client):
self._client = client
def login(self, username="test", password="test"):
return self._client.post(
'/auth/login',
data={'username': username, 'password': password}
)
def logout(self):
return self._client.get('auth/logout')
@pytest.fixture
def auth(client):
return AuthActions(client) |
992,026 | fc82b64b6860ac93a1654f0aa6bfdefdc8227b6c | import FWCore.ParameterSet.Config as cms
ttHFGenFilter = cms.EDFilter("ttHFGenFilter",
genParticles = cms.InputTag("genParticles"),
genBHadFlavour = cms.InputTag("matchGenBHadron", "genBHadFlavour"),
genBHadFromTopWeakDecay = cms.InputTag("matchGenBHadron", "genBHadFromTopWeakDecay"),
genBHadPlusMothers = cms.InputTag("matchGenBHadron", "genBHadPlusMothers"),
genBHadPlusMothersIndices = cms.InputTag("matchGenBHadron", "genBHadPlusMothersIndices"),
genBHadIndex = cms.InputTag("matchGenBHadron", "genBHadIndex"),
OnlyHardProcessBHadrons = cms.bool(False),
taggingMode = cms.bool(False)
)
|
992,027 | b7fc125ccd82ad2fa1da55aa0a20e95aa477512a | from setuptools import setup
setup(
name='elemental',
version='0.1',
author='Zach Kelling',
author_email='zeekayy@gmail.com',
packages=['elemental',],
license='LICENSE',
description='Simple DSL for generating html templates',
long_description=open('README').read(),
)
|
992,028 | 40bc208d7dfe12bde3079367ae28880dfd93bf59 | class Node(object):
def __init__(self, value):
self.info = value
self.prev = None
self.next = None
class DoubleLinkedList(object):
def __init__(self):
self.start = None
def display_list(self):
if self.start is None:
print("List is none")
print("List is : ")
p = self.start
while p is not None:
print(p.info," ", end = "")
p = p.next
print()
def insert_begining(self,data):
temp = Node(data)
temp.next = self.start
self.start.prev = temp
self.start = temp
def insert_in_empty(self,data):
temp = Node(data)
self.start = temp
def insert_at_end(self,data):
temp = Node(data)
p = self.start
while p.next is not None:
p = p.next
p.next = temp
temp.prev = p
def create_list(self):
n = int(input("Enter the number of nodes: "))
if n == 0:
return
data = int(input("Enter the first element to be inserted"))
self.insert_in_empty(data)
for i in range(n-1):
data = int(input("Enter the next element to be inserted : "))
self.insert_at_end(data)
def insert_after(self, data, x):
temp = Node(data)
p = self.start
while p is not None:
if p.info == x:
break
p = p.next
if p is None:
print(x," not presen in the list ")
else:
temp.prev = p
temp.next = p.next
if p.next is not None:
p.next.prev = temp
p.next = temp;
def insert_before(self,data,x):
if self.start is None:
print("List is empty")
return
if self.start.info == x:
temp = Node(data)
temp.next = self.start
self.start.prev = temp
self.start = temp
return
p = self.start;
while p is not None:
if p.info == x:
break
p = p.next
if p is None:
print(x, "not present in the list")
else:
temp = Node(data)
temp.prev = p.prev
temp.next = p
p.prev.next = temp
p.prev = temp
def delete_fiest_node(self):
pass
def delete_last_node(self):
pass
def delete_node(self,x ):
pass
def reverse_list(self):
pass
|
992,029 | fd68ba09eb627583d4b786520b0ae90af4612de0 | from pyjarowinkler import distance
dict_of_orcids = {}
line_count = 1
print("Starting...")
with open("17.doi_with_merged_orcid.txt", "r") as inp:
for line in inp:
print("Loading: " + str(line_count))
doi = line.split("\t")[0].strip()
orcid = line.split("\t")[1].strip()
dict_of_orcids[doi] = orcid
line_count += 1
line_count = 1
with open("12.authors_with_references_sorted.txt", "r") as inp:
with open("18.authors_with_orcid.txt", "w") as outp:
for line in inp:
print("Searching: " + str(line_count))
name = line.split("\t")[3].strip()
dois = line.split("\t")[10].strip().split(";")
orcids = set()
for doi in dois:
try:
found_orcids = dict_of_orcids[doi].split(";")
for orcid in found_orcids:
if distance.get_jaro_distance(str.lower(name), str.lower(orcid.split(",")[0]), winkler=True, scaling=0.1)>0.9:
orcids.update([orcid.split(",")[1].strip()])
except KeyError:
pass
outp.write(line.strip("\n") + "\t" + ",".join(orcids).strip() + "\n")
line_count += 1
print("Finished.")
|
992,030 | 4ede490e88951390a88f1b9790dfb1d06b0fb059 | from selenium import webdriver
import time
driver = webdriver.Chrome('/Users/Karol202/PycharmProjects/chromedriver.exe')
driver.get('https://google.com')
search_box = driver.find_element_by_name('q')
search_box.send_keys('Selenium')
search_box.submit()
time.sleep(5)
driver.quit()
|
992,031 | 2e80c948b8cd415fd2cea0248cd787fa2313ce40 | # -*- coding: utf-8 -*-
# Tom van Steijn, Royal HaskoningDHV
from xsboringen.borehole import Borehole, Segment
import numpy as np
class TestSegment(object):
def test_segment_lithology(self):
s = Segment(top=0., base=10., lithology='Z')
assert s.lithology == 'Z'
def test_segment_thickness(self):
s = Segment(top=0., base=10., lithology='Z')
assert np.isclose(s.base, 10.)
def test_segment_add_top(self):
s1 = Segment(top=0., base=10., lithology='Z')
s2 = Segment(top=10., base=12., lithology='K')
s1 += s2
assert np.isclose(s1.top, 0.)
def test_segment_add_base(self):
s1 = Segment(top=0., base=10., lithology='Z')
s2 = Segment(top=10., base=12., lithology='K')
s1 += s2
assert np.isclose(s1.base, 12.)
def test_segment_add_lithology(self):
s1 = Segment(top=0., base=10., lithology='Z')
s2 = Segment(top=10., base=12., lithology='K')
s1 += s2
assert s1.lithology == 'Z'
def test_segment_relative_to(self):
z = 13.
s = Segment(top=5., base=7., lithology='Z')
ref_top, ref_base = s.relative_to(z)
assert np.isclose(ref_top, 8.)
assert np.isclose(ref_base, 6.)
class TestBorehole(object):
def test_borehole_depth(self):
b = Borehole(code='b', depth=1.2)
assert np.isclose(b.depth, 1.2)
def test_borehole_iter(self):
s_iter = (s for s in [
Segment(top=0., base=0.5, lithology='Z'),
Segment(top=0.5, base=3.0, lithology='K'),
Segment(top=3.0, base=20., lithology='Z'),
])
b = Borehole(code='b', depth=20., segments=s_iter)
b.materialize()
assert len(b) == 3
def test_simplify(self):
segments = [
Segment(top=0., base=0.5, lithology='Z'),
Segment(top=0.5, base=3.0, lithology='Z'),
Segment(top=3.0, base=10.0, lithology='K'),
Segment(top=10.0, base=20., lithology='Z'),
]
b = Borehole(code='b', depth=20., segments=segments)
b.simplify()
assert len(b) == 3
assert b.segments[0].lithology == 'Z'
assert np.isclose(b.segments[0].thickness, 3.0)
def test_simplify_min_thickness(self):
segments = [
Segment(top=0., base=0.5, lithology='Z'),
Segment(top=0.5, base=3.0, lithology='Z'),
Segment(top=3.0, base=3.1, lithology='K'),
Segment(top=10.0, base=20., lithology='Z'),
]
b = Borehole(code='b', depth=20., segments=segments)
b.simplify(min_thickness=0.5)
assert len(b) == 1
assert b.segments[0].lithology == 'Z'
assert np.isclose(b.segments[0].thickness, 20.)
|
992,032 | f978c1e47dfa7acbffa62797ad33bda58b4e78d6 | from django.urls import path,re_path
from .views import (SearchProductView)
urlpatterns = [
path('search/',SearchProductView.as_view(),name='query'),
] |
992,033 | fca8a71570cd99f2934cd2eef5f0e5aef46ae6fc | import numpy as np
from agents.robot import Robot
from agents.obstacle import Obstacle
def test_is_colliding():
robot = Robot(
position=np.array([0, 0]),
velocity=np.array([1, 1]),
goal=np.array([10, 10]),
radius=1,
velocity_lower_bound=-1,
velocity_upper_bound=1,
)
obstacle = Obstacle(
position=np.array([10, 10]),
velocity=np.array([-1, -1]),
goal=np.array([0, 0]),
radius=1,
)
assert robot.is_colliding(obstacle)
# Non colliding case
obstacle.velocity = np.array([1, 1])
assert not robot.is_colliding(obstacle)
|
992,034 | b44ba31282906aa3d94943774252281d9d6366d5 | from keras.models import Sequential ,Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, merge
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from keras.callbacks import *
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
class LossGrapher(Callback):
def __init__(self):
return
def on_train_begin(self,logs={}):
self.acc = []
self.val_acc = []
self.loss = []
self.val_loss = []
def on_epoch_end(self,epoch,logs={}):
return
self.loss.append(logs.get('loss'))
self.val_loss.append(logs.get('val_loss'))
self.acc.append(logs.get('acc'))
self.val_acc.append(logs.get('val_acc'))
if epoch % 10 == 0:
self.plot_history(epoch)
def plot_history(self,epoch):
return
# summarize history for accuracy
fig = plt.figure()
plt.plot(self.acc)
plt.plot(self.val_acc)
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./lossPlot/model_accuracy.png',dpi=fig.dpi)
# summarize history for loss
fig = plt.figure()
plt.plot(self.loss)
plt.plot(self.val_loss)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./lossPlot/model_loss_epoch.png',dpi=fig.dpi)
plt.close('all')
|
992,035 | c4e126614b667186da671418ac8cccd5258851fc | from cs50 import get_int
# check if value is between 1 and 8 and if is decimal number
while True:
inp = input("Height: ")
if str.isdecimal(inp) and int(inp) in range(1, 9):
break
height = int(inp)
# print the blocks
for i in range(1, height + 1):
print(f"{' ' * (height - i)}{'#' * i} {'#' * i}")
|
992,036 | befa14851af77c0d5a1d01b14fa46256047372f0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file exists only to run NREI requests from the command line in a way that makes it easy to use code profiling
functions, independent of the user interface.
"""
import cProfile, pstats # packages for testing the execution time of model commands
import pkg_resources
from DriftModelRT.DriftForager import DriftForager
from DriftModelRT.PreyType import PreyType
driftDensityFile = pkg_resources.resource_filename(__name__, 'DriftModelRT/resources/DemoPreyTypes.csv')
preyTypes = PreyType.loadPreyTypes(driftDensityFile, None)
forager = DriftForager(None,preyTypes,46,18,13,0,10,1,0,0,0)
def test(nRuns):
for i in range(nRuns): forager.netRateOfEnergyIntake(50.0, 30.0, 10, True)
cProfile.run('test(1000)','runstats')
p = pstats.Stats('runstats')
p.strip_dirs().sort_stats('cumulative').print_stats()
##p.strip_dirs().sort_stats('cumulative').print_callees()
# was about 2.5 s |
992,037 | 59c556ade8c8abcdcb4d67d585de1c1a4cd92add | from django.http import HttpResponse
from django.shortcuts import render
from .models import Post, Like
def index(request):
posts = Post.objects.all()
return render(request, 'post/index.html', {'posts': posts})
def like(request):
if request.method == 'GET':
if request.user.is_authenticated:
post_id = request.GET['post_id']
likedpost = Post.objects.get(id=post_id)
m = Like(post=likedpost)
m.save()
return HttpResponse('success like button')
else:
return HttpResponse('not authenticated user')
else:
return HttpResponse('unsuccess')
|
992,038 | 0e2ce6400c536a5faba51d4d7a999a0df99cbef2 |
"""
https://leetcode-cn.com/problems/number-of-ways-to-wear-different-hats-to-each-other/solution/python-3xie-gei-zi-ji-de-chao-xiang-xi-zhuang-ya-d/
https://leetcode-cn.com/problems/number-of-ways-to-wear-different-hats-to-each-other/solution/python-3xie-gei-zi-ji-de-chao-xiang-xi-zhuang-ya-d/
https://leetcode-cn.com/problems/number-of-ways-to-wear-different-hats-to-each-other/solution/python-01bei-bao-by-dangerusswilson/
state: 000101011000
i-bit represen =t if the i-th hat has been taken
dp[state] : the number of ways for this state
物品是人
for p in range(n):
for state in range(111111111(bit)):
for hat in HatsForThisPerson[p]:
if hat has beent taken in state:
continue
else:
newDP[state+hat] += dp[state]
res = sim(dp[state]) for those states contain n bit 1
如果换成不枚举人(40), 而是枚举帽子(10)
state: 000101011000
i-bit represen =t if the i-th hat has been taken
for hat in range(40):
for state in range(1111111111(bit)):
for person in PersonsForThisHat[h]:
if person has taken in this state:
continue
else:
newDP[state+person] += dp[state]
res = dp[111111111]
"""
"""
1434.Number-of-Ways-to-Wear-Different-Hats-to-Each-Other
这道题是一道典型的背包+状态压缩的DP问题。
最初的想法是将帽子作为状态。也就是用一串01向量来表示第i顶帽子是否被人taken了。大致的算法是:
for (int p=0; person < n; person++)
for (int state = 000...000; state <= 111..111; state++)
{
for (int hat : HatsForThisPerson[p])
{
if (hat has been taken in state)
continue;
dp_new[state + hat] += dp[state]
}
}
最终的答案是在所有的state里面找出那些恰好有10个1的那些,取这些dp[state]的和。
但是这个方法有两大问题。首先state太大,有2^40种,第二层循环太复杂。其次,最终的答案要取C(40,10)种可能,也不实际。
比较巧妙的思路是看到人的数量最多只有10,用它来做状态非常合适。我们改定义为:用一串01向量来表示第i个人是否已经take苗子了。大致的算法是:
for (int h = 0; h < 40; h ++)
for (int state = 000...000; state <= 111..111; state++)
{
for (int person : PersonsForThisHat[h])
{
if (person has taken hat in state)
continue;
dp_new[state + person] += dp[state]
}
}
最终的答案就是dp[111...111]
"""
import collections
import functools
class SolutionTony:
def numberWays(self, hats) -> int:
mod = 10 ** 9 + 7
n = len(hats)
h2p = collections.defaultdict(list)
for p, hs in enumerate(hats):
for h in hs:
h2p[h].append(p)
full_mask = (1 << n) - 1
@functools.lru_cache(None)
def dfs(i, mask):
if mask == full_mask:
return 1
if i >= 41:
return 0
res = dfs(i + 1, mask)
for p in h2p[i]:
if mask & (1 << p):
continue
res += dfs(i + 1, mask | (1 << p))
return res
return dfs(0, 0) % mod
class Solution:
def numberWays(self, hats) -> int:
n = len(hats)
dp = [0 for i in range(1 << n)]
mod = 10 ** 9 + 7
personForThisHat = collections.defaultdict(list)
for i, hat in enumerate(hats):
for person in hat:
personForThisHat[person].append(i)
dp[0] = 1
for hat in range(1, 41):
newDP = dp[:]
for state in range(1 << n):
for person in personForThisHat[hat]:
# hat were taken
if ((state >> person) & 1) == 1:
continue
newDP[state + (1 << person)] += dp[state]
newDP[state + (1 << person)] %= mod
dp = newDP[:]
return dp[(1 << n) - 1]
class SolutionTD:
def numberWays(self, hats) -> int:
n = len(hats)
N = (1 << n) - 1
mod = 10 ** 9 + 7
@functools.lru_cache(None)
def dfs(cur, state):
# cur 代表当前轮到第cur顶帽子可供选择
# state 代表当前戴帽的人有哪些,为二进制压缩状态形式
# 首先,如果当前所有人都带上了帽,则返回1
if state == N:
return 1
# 若不满足所有人都戴上了帽,且当前也没有帽子了,则返回0
if cur > 40:
return 0
# 首先考虑不戴该顶帽子,直接考虑后一顶,则其值应为dp(cur+1, pos)
res = dfs(cur + 1, state)
# 考虑有人佩戴该顶帽子
for i in range(n):
# 找到喜欢该帽子的人,且这个人并没有戴其他帽子(即二进制pos中该位置为0)
if cur in hats[i] and state & (1 << i) == 0:
res += dfs(cur + 1, state + (1 << i))
return res % mod
return dfs(0, 0)
class SolutionDFS:
def numberWays(self, hats) -> int:
memo = {}
return self.dfs(hats, 0, 0, memo)
def dfs(self, hats, pos, state, memo):
n = len(hats)
N = (1 << n) - 1
mod = 10 ** 9 + 7
if (pos, state) in memo:
return memo[(pos, state)]
if state == N:
return 1
if pos > 40:
return 0
res = self.dfs(hats, pos + 1, state, memo)
for i in range(n):
if pos in hats[i] and state & (1 << i) == 0:
res += self.dfs(hats, pos + 1, state + (1 << i), memo)
memo[(pos, state)] = res
return res % mod
class SolutionDFS2:
def numberWays(self, hats) -> int:
# 构建帽子到人的对应关系,以逐顶帽子分配
hats = [set(hat) for hat in hats]
table = collections.defaultdict(list)
for person in range(1, 41):
for i, hat in enumerate(hats):
if person in hat:
table[person].append(i)
memo = {}
return self.dfs(hats, 0, 0, table, memo)
def dfs(self, hats, state, pos, table, memo):
N = (1 << len(hats)) - 1
mod = 10 ** 9 + 7
if state == N:
return 1
if pos > 40:
return 0
if (state, pos) in memo:
return memo[(state, pos)]
res = 0
# 分配第i顶帽子,遍历所有喜欢第i顶帽子的人
for i in table[pos]:
# 当前的状态中,第i个人还没有戴帽子
if (state & (1 << i)) == 0:
# 尝试把帽子分给第j个人,并且更新状态,问题向前推进
res += self.dfs(hats, state | (1 << i), pos + 1, table, memo)
# 不分配第i顶帽子
res += self.dfs(hats, state, pos + 1, table, memo)
res %= mod
memo[(state, pos)] = res
return memo[(state, pos)]
|
992,039 | 941fdb1b18f71b9bb6183bc3ecb892a509a66936 | import logging, pprint, hashlib, urllib, urllib2
from webapp2_extras import json
from lxml import etree
#import pylast
import Handlers, Config
# Routines for dealing with the last.fm API.
# cf http://www.last.fm/api/
# Note, chunks of this code are based on pyLast, which
# seems like a good library, but apart from the authentication
# bit I'm doing all the last.fm interaction at the client level.
# http://code.google.com/p/pylast/
# A few constants
USER_AGENT = 'how-you-been/1.0'
LAST_FM_ROOT = 'http://ws.audioscrobbler.com/2.0/'
class LastFmMixin(Handlers.WebAuth, Config.ConfigAware):
"""Construct the URL we use to auth users at last.fm"""
# Default setting for config lookups
DEFAULT_SETTING_GROUP = 'last.fm'
# Name of the session cookie we store data in
SESSIONKEY_COOKIE = 'lastfm.sessionKey'
USERNAME_COOKIE = 'lastfm.username'
def getAuthRedirectUrl(self):
host = self.request.environ['HTTP_HOST']
url = ('http://www.last.fm/api/auth/' +
'?api_key=' + self.cfg('api_key') +
'&cb=http://' + host + '/lastfm-callback')
return url
def _lastFmApiUrl(self, url): pass
def getLastFmSessionKey(self, token):
"""Given an access code, make a call to last.fm. Find the user's name and a session key,
and save both values into cookies."""
apiRequest = LastFmApiRequest('auth.getSession', {'token': unicode(token).encode('utf-8')})
logging.debug('sessionKey URL: ' + apiRequest.url())
response = apiRequest.execute()
root = response.getroot()
sessionKey = root.xpath('//key/text()')[0]
username = root.xpath('//name/text()')[0]
logging.debug('user:' + username + ' session:' + sessionKey)
# I'm not crazy about the way this couples the mixin and webapp2.RequestHandler
self.setCookie(self.SESSIONKEY_COOKIE, sessionKey)
self.setCookie(self.USERNAME_COOKIE, username)
class LastFmApiRequest(Config.ConfigAware):
"""Stripped-down pylast._Request class usable for authentication"""
DEFAULT_SETTING_GROUP = 'last.fm'
def __init__(self, method, params={}):
Config.ConfigAware.__init__(self)
self.params = params
self.params['api_key'] = self.cfg('api_key')
self.params['method'] = method
# Generate signature
self.params['api_sig'] = self.signature(self.params)
def signature(self, params):
"""Returns a 32-character hexadecimal md5 hash of the signature string."""
string = ''.join(key + params[key] for key in sorted(params.keys()))
return md5(string + self.cfg('secret'))
def url(self):
"""Get the URL for this method"""
queries = ['='.join([key, urllib.quote_plus(self.params[key])]) for key in self.params]
s = LAST_FM_ROOT + '?' + '&'.join(queries)
return s
def execute(self):
"""Fetch the method from last.fm; return the result"""
headers = {
'Content-type': 'application/x-www-form-urlencoded',
'Accept-Charset': 'utf-8',
'User-Agent': USER_AGENT
}
request = urllib2.Request(self.url(), headers=headers)
response = urllib2.urlopen(request)
return etree.parse(response)
def __repr__(self): return repr(self.params)
def md5(text):
h = hashlib.md5()
h.update(unicode(text).encode("utf-8"))
return h.hexdigest()
|
992,040 | 9607bcb756166651d3b52f784575f48d372d9f1b | print ("PYTHON TEST PROGRAM")
for i in range(3):
for j in range(1,4):
print(i+j, end="")
print
|
992,041 | dfc20d4eed3709a6c8159ec7eb96c46ca4d3f018 | #!/usr/bin/env python
PKG = 'frg_rover'
import roslib; roslib.load_manifest(PKG)
#Need this for the Msgs to work
roslib.load_manifest('frg_rover_msgs')
import rospy
import numpy
import math
from numpy import *
from std_msgs.msg import UInt16
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
class Moving_Average_v0:
def __init__(self,buf_len = 10):
self.buf = []
self.buf_len = buf_len
self.average = 0
def update_average(self, val):
if len(self.buf) >= self.buf_len:
for i in range(self.buf_len - 1):
self.buf[i] = self.buf[i+1]
self.buf[self.buf_len - 1] = val
else:
self.buf.append(val)
self.average = mean(self.buf)
return self.average
scan_data = LaserScan()
new_scan_flag = 0
# ----------------------------------------------
def update_scan(data):
global scan_data
global new_scan_flag
new_scan_flag = 1
scan_data = data
#-----------------------------------------
def main_collision_avoidance():
global scan_data
global new_scan_flag
rospy.Subscriber('/scan_laser',LaserScan,update_scan, queue_size = 10)
pub_vel = rospy.Publisher('/move_base/cmd_vel',Twist, queue_size = 10)
rospy.init_node('frg_collision_avoidance', anonymous=True)
r = rospy.Rate(10)
e_stop = 0
cmd = Twist()
dmin_MA = Moving_Average_v0(5)
dmin_average = 1000
while not rospy.is_shutdown():
#print scan_data.ranges[0]
a_min = scan_data.angle_min
a_inc = scan_data.angle_increment
a_list = [a_min]
a_max = a_min
for i in range(len(scan_data.ranges)-1):
a_max += a_inc
a_list.append(a_max)
# print a_list
# for i in range(len(a_list)):
# a_list[i] = a_list[i] - math.pi/2
if not len(scan_data.ranges) > 0:
print 'No scan data...'
dx_min = 1000000
if len(scan_data.ranges) > 1:
dx_list = []
dy_list = []
for i in range(len(scan_data.ranges)):
d1 = scan_data.ranges[i]
a1 = a_list[i]
dx = d1 * numpy.cos(a1)
dy = d1 * numpy.sin(a1)
dx_list.append(dx)
dy_list.append(dy)
if dx_min > dx:
dx_min = dx
dmin_average = dmin_MA.update_average(dx_min)
print dx_min, dmin_average
dist_limit = 1.0
if (e_stop==1) and dmin_average > dist_limit+0.1:
# get out of estop
e_stop = 0
if dmin_average < dist_limit:
# engage estop
e_stop = 1
if e_stop:
pub_vel.publish(cmd)
print rospy.Time.now(), "e-stop engaged"
r.sleep()
if __name__ == "__main__":
main_collision_avoidance()
|
992,042 | 2c4248d1a0edf9a668f92b97fb533394c461d8bc | from django.core.files.storage import default_storage
from django.db.models import Q
from rest_framework import viewsets, status, pagination
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from .models import Post
from .serializers import PostSerializer, UploadSerializer
class Pagination(pagination.PageNumberPagination):
page_size = 10
def get_paginated_response(self, data):
return Response({
'totalPageNumber': self.page.paginator.num_pages,
'currentPageNumber': self.page.number,
'results': data,
'next': self.get_next_link(),
'previous': self.get_previous_link(),
})
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
pagination_class = Pagination
lookup_field = 'slug'
def get_queryset(self):
queryset = super().get_queryset()
params = self.request.query_params
keyword = params.get('keyword', None)
if keyword:
# django python のような、スペース区切りの複数キーワードに対応
for k in keyword.split():
queryset = queryset.filter(
Q(title__icontains=k) |
Q(description__icontains=k) |
Q(text__icontains=k) |
Q(slug__icontains=k)
)
return queryset
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def upload(request):
serializer = UploadSerializer(data=request.data)
if serializer.is_valid():
upload_file = serializer.validated_data['image']
file_name = default_storage.save(upload_file.name, upload_file)
url = default_storage.url(file_name)
return Response({'url': url}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
992,043 | 939cb7f5cb13829957eabf7cd6251a39e7a74d95 | # This software is copyright (c) 2010 UTI Limited Partnership.
# The original authors are Robert A. Brown, M. Louis Lauzon
# and Richard Frayne. This software is licensed in the terms
# set forth in the "FST License Notice.txt" file, which is
# included in the LICENSE directory of this distribution.
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
sourcefiles = ['pyGFT.pyx','gft.c']
ext_modules = [
Extension("gft", sourcefiles,
libraries=['fftw3'],
include_dirs = ['/usr/local/include']
)
]
setup(
name = 'gft',
version = "0.1",
description="A Python module to calculate the General Fourier Family Transform",
author_email="robb@robbtech.com",
cmdclass = {'build_ext': build_ext},
include_dirs = [numpy.get_include()],
ext_modules = ext_modules
)
|
992,044 | 447d048d4cdf90186b9422e6642a3259290068f4 | import webbrowser
import time
total_breaks = 5
break_count = 0
print("This program stared on " + time.ctime())
while (break_count < total_breaks):
time.sleep(10)
webbrowser.open("http://www.youtube.com/watch?v=dQw4w9WgXcQ")
break_count = break_count + 1
|
992,045 | ffc6c76fe59dd7dc6ac9847648a9798f7217b0f3 | import t2
print(t2.palindrome(2,40)) |
992,046 | 86e319b87e42e188decae67054ddbea70512fb3e | """
# Name: meas_models/models.py
# Description:
# Created by: Phuc Le-Sanh
# Date Created: Nov 16 2016
# Last Modified: Nov 23 2016
# Modified by: Phuc Le-Sanh
"""
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.db import models
from ckeditor.fields import RichTextField
from django.contrib.auth.models import User
from common import *
"""
Validations
"""
def validate_difficulty_range(value):
if value < 1 or value > 10:
raise ValidationError(
_('%(value)s is out of correct range of difficulty level.'),
params={'value': value},
)
class EducationLevel(models.Model):
"""
List of education levels. Examples:
A'level
O'level
Elementary
"""
def __str__(self):
return self.name
name = models.CharField(max_length=200, unique=True)
description = models.TextField(max_length=1000)
class Subject(models.Model):
"""
List of subjects in specific education level. Examples:
Additional Mathematics
Elementary Mathematics
H2 Mathematics
PSLE Mathematics
"""
def __str__(self):
return self.name
name = models.CharField(max_length=200, unique=True)
description = models.TextField(max_length=1000)
education_level = models.ForeignKey(
EducationLevel, on_delete=models.CASCADE)
class Topic(models.Model):
"""
List of topics in each subject. Examples:
Quadratic Equations & inequalities
Indices, Surds, Exponential, Logarithms
...
"""
def __str__(self):
return self.name
name = models.CharField(max_length=200, unique=True)
description = models.TextField(max_length=1000)
order = models.PositiveIntegerField(null=True, blank=True)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
def as_json(self):
return dict(
topic_id=self.id,
name=self.name,
description=self.description,
order=self.order,
subject=self.subject.name)
class Concept(models.Model):
"""
List of concepts in each topic. Examples:
Quadratic Equations & inequalities has:
Symmetric properties of the roots of a quadratic equation
...
"""
def __str__(self):
return self.name
name = models.CharField(max_length=200, unique=True)
description = models.TextField(max_length=1000)
order = models.PositiveIntegerField(null=True, blank=True, default=1)
topic = models.ForeignKey(Topic, on_delete=models.CASCADE)
class Test(models.Model):
"""
List of test
"""
name = models.CharField(max_length=200)
test_type = models.CharField(
max_length=200,
choices=TEST_TYPES,
default=PRACTICE_TEST)
questions_list = models.TextField()
number_of_questions = models.IntegerField()
class Paper(models.Model):
"""
List of paper
"""
def __str__(self):
return str(self.year) + " " + str(self.get_month_display()) + " " + \
str(self.number)
year = models.IntegerField()
month = models.CharField(max_length=20, choices=MONTHS, default="1")
number = models.IntegerField()
no_of_question = models.IntegerField(null=True, blank=True)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE,
default=1)
class KeyPoint(models.Model):
"""
List of key points associate with specific concept
"""
def __str__(self):
return self.name
name = models.CharField(max_length=200)
content = models.TextField()
concept = models.ForeignKey(Concept, on_delete=models.CASCADE)
class Question(models.Model):
"""
List of questions
"""
question_type = models.CharField(
max_length=2,
choices=QUESTION_TYPES,
default="EX")
used_for = models.CharField(
max_length=2,
choices=USED_FOR,
default="ON")
mark = models.IntegerField(default=1)
difficulty_level = models.CharField(
max_length=1,
choices=DIFFICULTIES,
default="1")
respone_type = models.CharField(
max_length=10,
choices=RESPONSE_TYPES,
default=TEXT)
content = RichTextField()
solution = RichTextField()
answer = RichTextField(default="Test")
concept = models.ForeignKey(Concept, on_delete=models.CASCADE)
keypoint = models.ForeignKey(KeyPoint, on_delete=models.CASCADE,
null=True, blank=True)
paper = models.ForeignKey(
Paper, on_delete=models.CASCADE, null=True, blank=True)
def get_difficulty_level(self):
return range(0, int(self.difficulty_level))
class Formula(models.Model):
"""
List of formula
"""
def __str__(self):
return self.content
content = models.TextField()
status = models.BooleanField(default=False)
inorder_term = models.CharField(max_length=1024, null=True, blank=True)
sorted_term = models.CharField(max_length=1024, null=True, blank=True)
structure_term = models.CharField(max_length=1024, null=True, blank=True)
constant_term = models.CharField(max_length=1024, null=True, blank=True)
variable_term = models.CharField(max_length=1024, null=True, blank=True)
question = models.ForeignKey(Question, on_delete=models.CASCADE,
null=True, blank=True)
class FormulaIndex(models.Model):
"""
List of Formula Indices
"""
indexkey = models.CharField('index key', primary_key=True, max_length=255)
docsids = models.TextField(null=True, blank=True)
df = models.PositiveIntegerField('frequency', default=1, blank=True)
class AnswerPart(models.Model):
"""
List of AnswerPart
"""
part_name = models.CharField(max_length=1)
part_content = RichTextField()
part_respone_type = models.CharField(
max_length=10,
choices=RESPONSE_TYPES,
default=TEXT)
subpart_name_1 = models.CharField(max_length=10, null=True, blank=True)
subpart_content_1 = RichTextField(null=True, blank=True)
respone_type_1 = models.CharField(
max_length=10,
choices=RESPONSE_TYPES,
default=TEXT, null=True, blank=True)
subpart_name_2 = models.CharField(max_length=10, null=True, blank=True)
subpart_content_2 = RichTextField(null=True, blank=True)
respone_type_2 = models.CharField(
max_length=10,
choices=RESPONSE_TYPES,
default=TEXT, null=True, blank=True)
subpart_name_3 = models.CharField(max_length=10, null=True, blank=True)
subpart_content_3 = RichTextField(null=True, blank=True)
respone_type_3 = models.CharField(
max_length=10,
choices=RESPONSE_TYPES,
default=TEXT, null=True, blank=True)
subpart_name_4 = models.CharField(max_length=10, null=True, blank=True)
subpart_content_4 = RichTextField(null=True, blank=True)
respone_type_4 = models.CharField(
max_length=10,
choices=RESPONSE_TYPES,
default=TEXT, null=True, blank=True)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
|
992,047 | 64822b6768eb2a653602ff0653e8dfd7f4d7554a | from django.urls import path
from .views import crearFase, listar_auditoria,listar_proyectos, proyectoCancelado
from . import views
#from .views import estadoProyecto,detallesProyecto,UsersProyecto,desvinculacionProyecto,ModificarRol,VerRoles,agregarUsuarios, VerUsersEnEspera, ActualizarUser, CrearRol, crearItem,agg_listar_tipo_item,aggAtributos,relacionarItem,detallesFase,listar_relaciones,listar_atributos,itemCancelado,comite,AggComite,desvinculacionComite,DeleteComite,auditoriaProyecto
from .views import *
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.index, name='index'),
path('menu/logout/', views.logout,name='logout'),
path('menu/perfil/',views.perfil,name='perfil'),
path('menu/', views.menu, name='menu'),
path('creacionProyecto/', views.creacionProyecto, name='CrearProyecto'),
path('Contactos/', views.Contactos,name='Contactos'),
#path('enEspera/',views.verSolicitudesenEspera),
path('crear_fase/<int:nroFase>', views.crearFase, name='crearFase'),
path('auditoria/', listar_auditoria, name='auditoria'),
path('auditoriaProyecto/<int:pk>', auditoriaProyecto, name='auditoriaProyecto'),
# path('AggUser/', listar_usuarios_registrar),
path('AggUser/<int:pk>', views.AggUser, name='AggUser'),
path('listUser/<int:pk>', views.UsersProyecto, name='UsersProyecto'),
path('desvinculacionProyecto/<int:pk>/<int:pk_user>', views.desvinculacionProyecto, name='desvinculacionProyecto'),
path('agregarUsuarios/<int:pk><int:nroFase>', views.agregarUsuarios, name='agregarUsuarios'),
path('proyectos/', listar_proyectos, name='listar_proyectos'),
path('cancelado/', proyectoCancelado, name='Proyectocancelado'),
path('itemCancelado/<int:pk>', itemCancelado, name='itemCancelado'),
path('DescargarArchivo/<int:id_atributo>)', DescargarArchivo, name='DescargarArchivo'),
path('crear/TipoItem/<int:id_fase>',views.tipo_item_views_create,name='tipo_item_views_create'),
path('crear/atributo/<str:nombre_ti>/<str:cantidad_atributos>/<str:fase_id>',views.add_atribute,name='add_atribute'),
path('listar/usuarios/aprobados',views.ver_usuarios_aprobados,name='ver_usuarios_aprobados'),
path('getUser/<int:pk>',views.get_user,name='get_user'),
path('estadoProyecto/<int:pk>', views.estadoProyecto, name='estado_Proyecto'),
path('detallesProyecto/<int:pk>', views.detallesProyecto, name='detalles_Proyecto'),
path('verProyecto/<int:pk>', views.ver_proyecto, name='ver_proyecto'),
path('ver/fase/<int:id_fase>/proyecto',views.get_fase_proyecto,name='get_fase_proyecto'),
path('importar/tipo/item/fase/<int:id_fase>', views.importar_tipo_item, name='importar_tipo_item'),
path('estadoProyecto/<int:pk>', views.estadoProyecto, name='estado_Proyecto'),
path('enEspera/', VerUsersEnEspera.as_view(), name="listaDeEspera"),
path('userEnEspera/<int:pk>', ActualizarUser.as_view(), name='userEsperando'),
path('crearRol/<str:proyecto>', CrearRol.as_view(), name='crearRol'),
path('modRol/<int:pk>', ModificarRol.as_view(), name='modificarRol'),
path('lista/tipo/item/<int:id_proyecto>',views.listar_tipo_item, name='listar_tipo_item'),
path('aggTI/<int:id_fase>', views.agg_listar_tipo_item, name='agg_listar_tipo_item'),
path('misRoles/<int:proyecto>', VerRoles.as_view(), name="misRoles"),
path('crearItem/<int:Faseid>', views.crearItem, name="crearItem"),
path('aggAtributos/<int:idTI>', views.aggAtributos, name="aggAtributos"),
path('relacionarItem/<int:id_proyecto>/<int:id_item>', views.relacionarItem, name="relacionarItem"),
path('detallesFase/<int:idFase>', views.detallesFase, name="detallesFase"),
path('listar_relaciones/<int:idItem>', views.listar_relaciones, name="listar_relaciones"),
path('listar_atributos/<int:idAtributoTI>/<int:id_item>', views.listar_atributos, name="listar_atributos"),
path('listar_atributos/<int:idAtributoTI>/<int:id_item>/<int:ver>', views.listar_atributos, name="listar_atributos_ver"),
path('comite/<int:pk>', views.comite, name='comite'),
path('AggComite/<int:pk>', views.AggComite, name='AggComite'),
path('desvinculacionComite/<int:pk>/<int:pk_user>', views.desvinculacionComite, name='desvinculacionComite'),
path('DeleteComite/<int:pk>', views.DeleteComite, name='DeleteComite'),
path('editar/tipo/item/<int:id_ti>',views.editar_ti,name='editar_ti'),
path('editar/tipo/item/<int:id_ti>/agregar/atributo',views.agregar_atributo_ti,name='agregar_atributo_ti'),
path('eliminar/atributo/tipo/item/<int:id_ti>',views.eliminar_atributo_ti,name='eliminar_atributo_ti'),
path('eliminar/tipo/item/<int:id_ti>',views.eliminar_tipo_item,name='eliminar_tipo_item'),
path('asignar/rol/usuario/proyecto/<int:id_Fase>/<int:id_usuario>',views.Asignar_Rol_usuario_proyecto,name='Asignar_Rol_usuario_proyecto'),
path('asignar/rol/<str:nombre>/proyecto',views.asignar_rol_proyecto,name='asignar_rol_proyecto'),
path('modificar/rol/<str:nombre>/proyecto',views.modificar_rol_proyecto,name='modificar_rol_proyecto'),
path('seleccionar/usuario/para/asignar/rol/proyecto/<int:id_fase>',views.seleccionar_usuario_rol,name='seleccionar_usuario_rol'),
path('crearLB/<int:pk>/', views.CrearLB, name = 'crearLB'),
path('ver_lb/<int:pk>/', views.ver_lb, name = 'ver_lb'),
path('solicitud/<int:pk>/', solicitud, name = 'solicitud_cambio'),
path('notificaciones/<int:pk>/', bandeja_mensajes_solicitudes, name = 'notificaciones'),
path('bandeja_mensajes/<int:pk>/', bandeja_mensajes, name = 'bandeja_mensajes'),
path('cambiarEstadoItem/<int:pk>/', views.modificarEstadoItem, name = 'cambiarEstadoItem'),
path('verVersiones/Items/<int:id_item>/',views.ver_versiones_item,name="ver_versiones_item"),
path('reversionar/item/<int:id_item_reversionar>/<int:id_item_actual>/',views.reversionar_item,name='reversionar_item'),
path('relaciones/item/<int:pk>/', views.Editar_relaciones, name = 'editar_relaciones'),
path('relaciones/item/versiones/<int:pk>/<str:id>', views.Editar_relaciones, name = 'editar_relaciones_versiones'),
path('versiones/item/<int:pk>/', views.versiones_item, name = 'versiones_item'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
992,048 | 6f6c6fa6c2657adbff69c52bacf654a8aa266756 | # Project name : HackerRank: Day 27: Testing
# Link : https://www.hackerrank.com/challenges/30-testing/problem
# Try it on :
# Author : Wojciech Raszka
# E-mail : contact@gitpistachio.com
# Date created : 2020-07-15
# Description :
# Status : Accepted (169038695)
# Tags : python
# Comment :
def minimum_index(seq):
if len(seq) == 0:
raise ValueError("Cannot get the minimum value index from an empty sequence")
min_idx = 0
for i in range(1, len(seq)):
if seq[i] < seq[min_idx]:
min_idx = i
return min_idx
class TestDataEmptyArray(object):
@staticmethod
def get_array():
return []
class TestDataUniqueValues(object):
@staticmethod
def get_array():
return [55,1,2,3,4,76]
@staticmethod
def get_expected_result():
return 1
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
return [4,3,12,2,15,14,2]
@staticmethod
def get_expected_result():
return 3
def TestWithEmptyArray():
try:
seq = TestDataEmptyArray.get_array()
result = minimum_index(seq)
except ValueError as e:
pass
else:
assert False
def TestWithUniqueValues():
seq = TestDataUniqueValues.get_array()
assert len(seq) >= 2
assert len(list(set(seq))) == len(seq)
expected_result = TestDataUniqueValues.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
def TestiWithExactyTwoDifferentMinimums():
seq = TestDataExactlyTwoDifferentMinimums.get_array()
assert len(seq) >= 2
tmp = sorted(seq)
assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])
expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
TestWithEmptyArray()
TestWithUniqueValues()
TestiWithExactyTwoDifferentMinimums()
print("OK")
|
992,049 | edfed4ff5872da11b4ce81d749489d89a6501545 | import io, os, sys, requests
from PIL import Image
from picamera import PiCamera
from time import sleep
_maxNumRetries = 10
camera = PiCamera()
camera.rotation = 270
camera.start_preview()
sleep(2)
camera.capture('/home/pi/Desktop/image.jpg')
camera.stop_preview()
json = None
data = open('/home/pi/Desktop/image.jpg', 'rb').read() |
992,050 | c2f3fb111351badd811afdfc1202934eca481c34 | from collections import deque
def get_window_max(arr, win_size):
queue = deque()
res = []
# i, j = 0, 0
# while i < len(arr) and j < len(arr):
for j in range(len(arr)):
while len(queue) > 0 and queue[-1] < arr[j]:
queue.pop()
queue.append(arr[j])
# 如果队头恰好是即将抹去的左边界,那么即使它再大,我们也要舍弃它了
if j >= win_size and len(queue) > 0 \
and queue[0] == arr[j - win_size]:
queue.popleft()
# if j - i + 1 >= win_size:
if j >= win_size - 1:
res.append(queue[0])
j += 1
return res
def get_len_of_longest_no_repeat(txt):
# win = set()
win = {} # 字符→位置.next的dict!因为窗口内肯定不会有重复,所以天然适合用dict
left, right = 0, 0
len_result, range_result = 0, None
while left < len(txt) and right < len(txt):
ncur = txt[right]
if ncur in win:
# left = win[ncur]
# 害,这句其实是因为以前的old可能没remove掉。你完全可以remove掉嘛。
# 不过你也要注意,应该把捣蛋鬼之前的元素全部remove掉!
left = max(win[ncur], left) #只有win[ncur]在当前窗口内,我们才视它为捣蛋鬼,否则无视它!
# left = win[ncur]
# win.pop(ncur)
win[ncur] = right + 1 # .next!直接跳了
if len_result < right - left + 1:
len_result = right - left + 1
range_result = (left, right)
right += 1
# if txt[right] not in win:
# # win.add(txt[right])
# win[txt[right]] = right
# right += 1
# result = max(result, right - left)
# else:
# # nrepeat = txt[right]
# # while txt[left] != nrepeat:
# # # 注意是左边界+1!
# # left += 1
# left = win.pop(txt[right]) + 1
return len_result, txt[range_result[0]:range_result[1] + 1]
def get_len_of_longest_no_repeat2(txt):
map = [-1] * 256
left, right = 0, 0
res = 0
while left < len(txt) and right < len(txt):
chcur = txt[right]
iascii = ord(chcur)
# if map[iascii] != -1:
left = max(left, map[iascii])
map[iascii] = right + 1
res = max(res, right - left + 1)
right += 1
return res
def find_anagrams(str_main, str_pattern):
def is_same_with_pattern(arr1, arr2):
for i in range(26):
if arr1[i] != arr2[i]:
return False
return True
a_code = ord('a')
sArr = [0] * 26
pArr = [1 if chr(a_code + i) in str_pattern else 0 for i in range(0, 26)]
res = []
left, right = 0, 0
while left < len(str_main) and right < len(str_main):
sArr[ord(str_main[right]) - a_code] += 1
if right > len(str_pattern) - 1:
sArr[ord(str_main[left]) - a_code] -= 1
left += 1
if right >= len(str_pattern) - 1 and\
is_same_with_pattern(sArr, pArr):
res.append(left)
right += 1
return res
if __name__ == '__main__':
print(get_len_of_longest_no_repeat("ababcabdefkt1fasf2345"))
print(get_len_of_longest_no_repeat2("ababcabdefkt1fasf2345"))
print(get_window_max([1, 2, 1, 6, 5, 3, 8, 1, 9, 1, 1, 1, 1], 4))
print(find_anagrams("atdatasasatd", "adt")) |
992,051 | 7342abc75bc49d4aeedf10f3f861a5ba0ea23362 | #!/usr/bin/env python
import os
from setuptools import setup
# Utility function to read the README file.
# https://pythonhosted.org/an_example_pypi_project/setuptools.html#setting-up-setup-py
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='netflix-spectator-py',
version='0.1.16',
description='Python library for reporting metrics to Atlas.',
long_description=read('README.md'),
long_description_content_type='text/markdown',
author='Brian Harrington',
author_email='netflix-atlas@googlegroups.com',
license='Apache 2.0',
url='https://github.com/netflix/spectator-py/',
packages=['spectator', 'spectator.histogram'],
install_requires=['future'],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
]
)
|
992,052 | b1404fdc1ea51f18a141ce5ea71c431484082d63 | import paramiko
t = paramiko.Transport(("54.72.144.20", 7515))
t.connect(username='kgawda', password=open('/home/kjg/alx_ssh.txt').read().strip())
sesja = t.open_session()
sesja.exec_command('ls -al')
dane = sesja.recv(1024)
while dane:
print(dane.decode('utf-8'))
dane = sesja.recv(1024)
# while dane := sesja.recv(1024):
# print(dane.decode('utf-8'))
sftp = paramiko.SFTPClient.from_transport(t)
print(sftp.open('test.txt').read())
sftp.get('test.txt', 'test_from_remote.txt')
# sftp.put()
t.close()
|
992,053 | 538726cb2c7c9de4da8ad5694f47abbff1d7f0ca | #!/usr/bin/env python
#Data cleasing and blacklisting is based on the logic used in
#trendingtopics.org
import sys
import os
import re
import urllib
#doing date manipulation
try:
# See if we are running on Hadoop cluster
filepath = os.environ["map_input_file"]
filename = os.path.split(filepath)[-1]
except KeyError:
# sample file for use in testing...
filename = 'pagecounts-20090419-020000.txt'
#printing the filename
#print filename
date = filename.split('-')[1]
#print date
# Excludes pages outside of namespace 0 (ns0)
namespace_titles_regex = re.compile('(Media|Special' +
'|Talk|User|User_talk|Project|Project_talk|File' +
'|File_talk|MediaWiki|MediaWiki_talk|Template' +
'|Template_talk|Help|Help_talk|Category' +
'|Category_talk|Portal|Wikipedia|Wikipedia_talk)\:(.*)')
# More exclusions
first_letter_is_lower_regex = re.compile('([a-z])(.*)')
image_file_regex = re.compile('(.*).(jpg|gif|png|JPG|GIF|PNG|txt|ico)')
# Exclude Mediawiki boilerplate
blacklist = [
'404_error/',
'Main_Page',
'Hypertext_Transfer_Protocol',
'Favicon.ico',
'Search'
]
def clean_anchors(page):
# pages like Facebook#Website really are "Facebook",
# ignore/strip anything starting at # from pagename
anchor = page.find('#')
if anchor > -1:
page = page[0:anchor]
return page
def is_valid_title(title):
is_outside_namespace_zero = namespace_titles_regex.match(title)
if is_outside_namespace_zero is not None:
return False
islowercase = first_letter_is_lower_regex.match(title)
if islowercase is not None:
return False
is_image_file = image_file_regex.match(title)
if is_image_file:
return False
has_spaces = title.find(' ')
if has_spaces > -1:
return False
if title in blacklist:
return False
return True
# input comes from STDIN (standard input)
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line into words
words = line.split()
# increase counters
# for word in words:
# write the results to STDOUT (standard output);
# what we output here will be the input for the
# Reduce step, i.e. the input for reducer.py
#
# tab-delimited; the trivial word count is 1
# print '%s\t%s' % (word, 1)
if is_valid_title(words[1]):
title = clean_anchors(urllib.unquote_plus(words[1]))
if len(title) > 0 and title[0] != '#':
if(( words[0] == 'en') & (int(words[2]) > 100)):
print '%s\t%s' % ((date+title),words[2])
|
992,054 | b4bde82cef86f8fa3c3fbd08aad3f13ed41f1a64 | from keras import initializers, regularizers, constraints
from keras.layers import *
import keras.backend as K
from keras import regularizers
from keras import Model
import numpy as np
from keras.engine.topology import Layer
def zero_loss(y_true, y_pred):
return 0.5 * K.sum(y_pred, axis=0)
class CenterLossLayer(Layer):
def __init__(self, alpha=0.5, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
def build(self, input_shape):
self.centers = self.add_weight(name='centers',
shape=(19, 128),
initializer='uniform',
trainable=False)
# self.counter = self.add_weight(name='counter',
# shape=(1,),
# initializer='zeros',
# trainable=False) # just for debugging
super().build(input_shape)
def call(self, x, mask=None):
# x[0] is Nx2, x[1] is Nx10 onehot, self.centers is 10x2
delta_centers = K.dot(K.transpose(x[1]), (K.dot(x[1], self.centers) - x[0])) # 10x2
center_counts = K.sum(K.transpose(x[1]), axis=1, keepdims=True) + 1 # 10x1
delta_centers /= center_counts
new_centers = self.centers - self.alpha * delta_centers
self.add_update((self.centers, new_centers), x)
# self.add_update((self.counter, self.counter + 1), x)
self.result = x[0] - K.dot(x[1], self.centers)
self.result = K.sum(self.result ** 2, axis=1, keepdims=True) # / K.dot(x[1], center_counts)
return self.result # Nx1
def compute_output_shape(self, input_shape):
return K.int_shape(self.result)
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight(shape=(input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),
K.reshape(self.W, (features_dim, 1))), (-1, step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim
|
992,055 | a2c756886a39424fe56ef694d385f121c370c320 | #Twitter API credentials
consumer_key = "zCqqTurprj78ehJTBUtoo1YTs"
consumer_secret = "voYhiXqCKSITmJZturkp2rTkzRv8tDiVeVGJmdVlcbCd8oXGDT"
access_key = "16597102-GZG4gFoqf0Kvww0z236nSzTSS92AHJy2ITidGr7m8"
access_secret = "qQfa4A4hOu3q8qfPrm9DcVCM3RCBUEniNl8BBMBrkxc8q" |
992,056 | 33519c469658d5dd4d593dde9fcab25750fe9991 | '''
Script to get menus from yelp, and save ratings
Name: Chris Hume
Date : 12/6/13
'''
import foursquare, json, requests
import logging
from bs4 import BeautifulSoup
from urllib2 import urlopen
logging.basicConfig()
##############################
#
# get the individual ratings of each item
#
##############################
def get_ratings(url):
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data)
##################################
#
# Getting the ratings for the item
#
##################################
five_star = four_star = three_star = two_star = one_star = null_star = 0
word = soup.find_all("i", class_="star-img stars_5")
for x in word:
#print word
five_star += 1
word = soup.find_all("i", class_="star-img stars_4")
for x in word:
#print word
four_star += 1
word = soup.find_all("i", class_="star-img stars_3")
for x in word:
#print word
three_star += 1
word = soup.find_all("i", class_="star-img stars_2")
for x in word:
#print word
two_star += 1
word = soup.find_all("i", class_="star-img stars_1")
for x in word:
#print word
one_star += 1
word = soup.find_all("i", class_="star-img stars_0")
for x in word:
#print word
null_star += 1
likes = five_star + four_star + three_star
dislikes = null_star + one_star + two_star
#neutral = three_star + two_star
#print likes, " : ", neutral, " : ", dislikes
return [likes, dislikes]
#r = requests.get("http://www.yelp.com/menu/mistral-restaurant-boston/item/grilled-portobello-mushroom-carpaccio")
#r = requests.get("http://www.yelp.com/menu/mistral-restaurant-boston/item/seared-foie-gras")
##############################
#
# Need to call get_menu.py -- (add as a funciton)
# then request that url if get (url) and skip (if == False)
#
##############################
#r = requests.get("http://www.yelp.com/menu/mistral-restaurant-boston/")
r = requests.get("http://www.yelp.com/menu/coda-boston")
count = 0
data = r.text
soup = BeautifulSoup(data)
'''
for link in soup.find_all('a'):
#print (link.get('href'))
#if(link.get('href').find('grilled')):
# print link.get('href')
bob = str(link.get('href'))
sub = 'grilled'
if(sub in bob):
addon = link.get('href')
#print addon
full_url = "http://www.yelp.com"+addon
print full_url
r = requests.get(full_url)
the class we need is = "menu-item-details"
then need the link in this class
need to save the menu item
'''
def is_item(tag):
return tag.has_attr()
#print soup.find_all("div", "menu-item-details")
###################################
#
# Need to find links inside class = "menu-item-details"
#
# Dictionary of html links and item names
#
###################################
name_and_url = {}
list_of_items = []
line = soup.find_all("div", class_="menu-item-details")
count = 0
for x in line:
#print x
#print 'yes\n'
x = x.find("a")
try:
#print x.get('href') #link to the menu item
count += 1
#list_of_items.append(x.getText()) #name of the menu item
url = x.get('href')
full_url = "http://www.yelp.com"+url
name = x.getText()
name = name.lower()
name_and_url[name] = full_url # add to dict={name, url}
full_url = ''
except:
print '',
#print count
yelp_menu = {}
yelp_count = 0
for key, value in name_and_url.items():
print key
print value, '\n'
yelp_menu[key] = value
yelp_count += 1
#print get_ratings(value)
fsid = "49fb1332f964a520166e1fe3"
r = requests.get("https://api.foursquare.com/v2/venues/"+fsid+"/menu?client_secret=2GA3BI5S4Z10ONRUJRWA40OTYDED3LAGCUAXJDBBEUNR4JJN&client_id=YZVWMVDV1AFEHQ5N5DX4KFLCSVPXEC1L0KUQI45NQTF3IPXT")
data = r.text
soup = BeautifulSoup(data)
js = json.loads(data)
#print data
#print js['response']['menu']['menus']['items'][0]['menuId']
first_true = True
save_this = False
first_count = 0
item_name = ""
item_id = 0
fs_menu = {}
fs_count = 0
while(first_true):
sec_count = 0
sec_true = True
try:
js['response']['menu']['menus']['items'][0]['entries']['items'][first_count]['name']
except:
first_true = False
break
while(sec_true):
#save the id and anme
try:
item_id = js['response']['menu']['menus']['items'][0]['entries']['items'][first_count]['entries']['items'][sec_count]['entryId']
item_name = js['response']['menu']['menus']['items'][0]['entries']['items'][first_count]['entries']['items'][sec_count]['name']
sec_count += 1
fs_menu[item_name] = item_id
fs_count += 1
#save_this = True
#print item_id
except:
sec_true = False
#print 'exception'
try:
#print item_id, " ; ", item_name
pass
except:
pass
#print save_this
#print name, " ", fsid
#will save the restaurant if can get the menu
try:
pass
#outfile.write(fsid + "," + name + "\n")
except:
pass
first_count += 1
print 'Done'
print 'fs_items : ', fs_count
print 'yelp_items : ', yelp_count
match_count = miss_count = 0
for name, value in fs_menu.items():
#print name
name = name.strip()
name = name.lower()
try:
en = yelp_menu[name]
#print 'Yes: ', en
match_count += 1
except:
#print 'No: ', name
miss_count += 1
print 'match_count: ', match_count
print 'miss_count: ', miss_count
###################################
#
# Go through each word and add to dictionary
# match with recomenuAPI data - id
#
###################################
'''
openfile = open("results_db.txt", "r")
name_and_ids = {}
for line in openfile:
line = line.strip()
line = line.split('|')
#print line[2], " : ", line[3]
name = line[2]
id = line[3]
name = name.lower()
name_and_ids[name] = id
#for key, value in name_and_ids.items():
# print key, value
openfile.close()
###################################
#
# Go through dictionary - go to urls, get ratings
# save into a dictionary
#
###################################
testingdict = {}
testingdict["help"] = [1, 2, 3]
for key, value in testingdict.items():
print key, value
id_and_ratings = {}
for name, url in name_and_url.items():
#print name, " : ", url
print get_ratings(url)
id = name_and_ids.get(name)
print id, " : ", name
#########
#
# Maybe just need to save {item_name, ratings}
#
#########
'''
|
992,057 | cc98fbfdd9553cf74f919b2b04c18bab65ae61bf | import bpy
import types
from traceback import print_exc
from inspect import isfunction, ismethod
from .addon import TEMP_PREFS_ID, prefs, temp_prefs
from .constants import (
ICON_UNKNOWN, ICON_FOLDER, ICON_FUNC, ICON_PROP, ICON_NONE,
TOOLTIP_LEN, PROOT, PDOT,
get_icon)
from .utils.collection_utils import sort_collection
def islist(data):
return isinstance(
data, (
list,
tuple,
bpy.types.bpy_func,
bpy.types.bpy_prop_collection
)
)
def isfunc(obj):
return isinstance(obj, types.BuiltinFunctionType) or \
isinstance(obj, types.BuiltinMethodType) or \
isfunction(obj) or ismethod(obj)
class ContextData:
def __init__(self):
self.path = PROOT
self.data = None
self.properties = []
self.functions = []
def data_label(self, name):
if name.startswith("[") and name.endswith("]"):
k = name[1:-1]
if k.isdigit():
obj = self.data[int(k)]
if hasattr(obj, "name"):
return "%s %s" % (name, obj.name)
elif hasattr(obj, "type"):
return "%s %s" % (name, obj.type)
return name
def eval_path(self, path, globals=None, locals=None):
if globals:
self.eval_globals = globals
if locals:
self.eval_locals = locals
return eval(path, self.eval_globals, self.eval_locals)
return None
def add_col_item(self, col, item_type, name=None, data=None):
item = col.add()
item.type = item_type
if name is not None:
item.name = name
if data is not None:
item.data = data
def add_info_item(self, item_type, name=None, data=None):
self.add_col_item(self.tpr.info_list, item_type, name, data)
def add_obj_item(self, item_type, name=None, data=None):
self.add_col_item(self.tpr.obj_list, item_type, name, data)
def add_header_item(self, label, icon='NONE'):
if self.header and getattr(self, self.header, None) and \
len(self.tpr.info_list):
self.add_info_item('SPACER')
self.header = label
self.add_info_item('GROUP', label, icon)
if not hasattr(TempPreferences, label):
setattr(
TempPreferences, label,
bpy.props.BoolProperty(
default=True, update=TempPreferences.update_header))
def add_properties(self):
tpr = temp_prefs()
self.add_header_item("Properties", ICON_PROP)
if not getattr(tpr, "Properties"):
return
for name in self.properties:
if not hasattr(self.data, name):
continue
self.add_info_item('PROP', name)
def add_functions(self):
tpr = temp_prefs()
self.add_header_item("Functions", ICON_FUNC)
if not getattr(tpr, "Functions"):
return
for name in self.functions:
func = getattr(self.data, name, None)
l = name
r = "(...)"
doc = getattr(func, "__doc__", None)
if doc:
sig, _, _ = doc.partition("\n")
i = sig.find(":: ")
if i != -1:
sig = sig[i + 3:]
else:
_, _, sig = sig.partition(".")
l, _, r = sig.partition("(")
r = "(" + r
self.add_info_item('FUNC', l, r)
def parse_list(self):
groups = dict(FOLDER=[], OTHER=[])
if hasattr(self.data, "rna_type"):
self.parse_obj()
for i, v in enumerate(self.data):
stri = "[%d]" % i
label = stri
item_path = self.path + stri
label += "|" + v.__class__.__name__
groups['FOLDER'].append((label, item_path, 'GROUP'))
for key in ('FOLDER', 'OTHER'):
group = groups[key]
for name, path, item_type in group:
self.add_obj_item(item_type, name, path)
def parse_alist(self):
groups = dict(FOLDER=[], OTHER=[])
if hasattr(self.data, "rna_type"):
self.parse_obj()
for i, (k, v) in enumerate(self.data.items()):
stri = "[%d]" % i
strk = "['%s']" % k
k_is_int = isinstance(k, int)
if k_is_int:
label = stri
item_path = self.path + stri
else:
label = "%s %s" % (stri, k)
item_path = self.path + strk
label += "|" + v.__class__.__name__
groups['FOLDER'].append((label, item_path, 'GROUP'))
for key in ('FOLDER', 'OTHER'):
group = groups[key]
for name, path, item_type in group:
self.add_obj_item(item_type, name, path)
def parse_obj(self):
pr = prefs()
tpr = temp_prefs()
rna_type = getattr(self.data, "rna_type", None)
if rna_type is None:
return
folder_types = {'POINTER', 'COLLECTION'}
skip_prop_types = set()
if not pr.show_bool_props:
skip_prop_types.add('BOOLEAN')
if not pr.show_int_props:
skip_prop_types.add('INT')
if not pr.show_float_props:
skip_prop_types.add('FLOAT')
if not pr.show_str_props:
skip_prop_types.add('STRING')
if not pr.show_enum_props:
skip_prop_types.add('ENUM')
items = dir(self.data)
for item in items:
if item.startswith("_"):
continue
if hasattr(rna_type, "functions") and item in rna_type.functions:
self.functions.append(item)
continue
if hasattr(rna_type, "properties") and item in rna_type.properties:
prop = rna_type.properties[item]
if prop.type not in folder_types:
if prop.type not in skip_prop_types:
if not pr.show_vector_props and getattr(
prop, "is_array", False):
continue
self.properties.append(item)
continue
item_path = self.path + PDOT + item
obj = self.eval_path(item_path)
if isfunc(obj):
self.functions.append(item)
continue
if obj is None:
item_name = item + "|None"
item_type = 'ITEM'
else:
item_name = item + "|" + type(obj).__name__
item_type = 'GROUP'
self.add_obj_item(item_type, item_name, item_path)
if pr.group_none:
sort_collection(tpr.obj_list, lambda item: item.type == 'ITEM')
def update_lists(self, path, update_breadcrumbs=True):
self.tpr = tpr = temp_prefs()
tpr.obj_list.clear()
tpr.info_list.clear()
self.functions.clear()
self.properties.clear()
C = bpy.context
D = bpy.data
while True:
try:
self.data = self.eval_path(path, globals(), locals())
break
except:
if PDOT in path:
path = PDOT.join(path.split(PDOT)[:-1])
else:
path = PROOT
self.path = path
if PDOT in self.path:
par_path, _, part = path.rpartition(PDOT)
i = part.find("[")
if i != -1:
par_path = "%s.%s" % (par_path, part[:i])
self.add_obj_item('GROUP', "[..]", par_path)
if update_breadcrumbs:
tpr.last_path = path
tpr.update_breadcrumbs(self.path)
if self.data is None:
return
if islist(self.data):
if hasattr(self.data, "items"):
self.parse_alist()
else:
self.parse_list()
else:
self.parse_obj()
self.update_info_list()
def update_info_list(self):
tpr = temp_prefs()
tpr.info_list.clear()
self.header = None
self.add_properties()
self.add_functions()
class ListItem(bpy.types.PropertyGroup):
data: bpy.props.StringProperty()
type: bpy.props.EnumProperty(
items=(
('ITEM', "", ""),
('GROUP', "", ""),
('SPACER', "", ""),
('PROP', "", ""),
('FUNC', "", ""),
))
class TempPreferences(bpy.types.PropertyGroup):
breadcrumb_items = []
cd = ContextData()
def get_path_items(self, context):
return self.breadcrumb_items
def update_path(self, context):
self.cd.update_lists(self.path, False)
def update_header(self, context):
self.cd.update_info_list()
def obj_list_idx_update(self, context):
item = self.obj_list[self.obj_list_idx]
self["obj_list_idx"] = -1
self.cd.update_lists(item.data)
obj_list: bpy.props.CollectionProperty(type=ListItem)
obj_list_idx: bpy.props.IntProperty(update=obj_list_idx_update)
info_list: bpy.props.CollectionProperty(type=ListItem)
info_list_idx: bpy.props.IntProperty(get=lambda s: -1)
last_path: bpy.props.StringProperty(default=PROOT)
path: bpy.props.EnumProperty(
name="Path", description="Path",
items=get_path_items,
update=update_path)
def clear_lists(self):
self.obj_list.clear()
self.info_list.clear()
def update_breadcrumbs(self, path):
self.breadcrumb_items.clear()
items = path.split(PDOT)
path = ""
item_idx = -1
for item in items:
idx = item.find("[")
if idx >= 0:
a = item[:idx]
path += a
self.breadcrumb_items.append((path, a, path))
a = item[idx:]
path += a
self.breadcrumb_items.append((path, a, path))
path += PDOT
item_idx += 2
else:
item_idx += 1
path += item
self.breadcrumb_items.append((path, item, path))
path += PDOT
self["path"] = item_idx
def register():
if not hasattr(bpy.types.WindowManager, TEMP_PREFS_ID):
setattr(
bpy.types.WindowManager, TEMP_PREFS_ID,
bpy.props.PointerProperty(type=TempPreferences))
def unregister():
if hasattr(bpy.types.WindowManager, TEMP_PREFS_ID):
delattr(bpy.types.WindowManager, TEMP_PREFS_ID)
|
992,058 | 72c5adcb94c889c05b014e7a940a598d525cb74e | import os
import dotenv
import jishaku
import discord
from discord.ext import commands
from command.database.loader import db_load, db_loaded, client_load, client_loaded
# loading env file so that we can use it
dotenv.load_dotenv()
def main():
db_load() # loads database
db = db_loaded()
client_load()
client = client_loaded()
# to load all cogs
for folder in os.listdir("command"):
if os.path.exists(os.path.join("command", folder)):
for filename in os.listdir(f"./command/{folder}"):
if filename.endswith(".py"):
client.load_extension(f"command.{folder}.{filename[:-3]}")
# loading "jishaku"
client.load_extension("jishaku")
client.run(os.getenv("TOKEN"))
if __name__ == "__main__":
main()
|
992,059 | 66bbee5e60d798ca843c08be7af5c707d3d471b4 | from django.test import TestCase
from django.core.urlresolvers import reverse
from .views import ShiftListView, ShiftListCalendarView
from .forms import ShiftForm
class TestShiftListViews(TestCase):
def setUp(self):
self.view = ShiftListView()
def test_attrs(self):
# self.assertEqual(self.view.form_class, OrganizationSetupForm)
self.assertEqual(self.view.get_success_url(), reverse('shifts:shift_list'))
self.assertEqual(self.view.from_, None)
self.assertEqual(self.view.to, None)
self.assertEqual(self.view.template_name, 'shifts/shift_list.html')
self.assertEqual(self.view.form_class, ShiftForm)
class TestShiftCalendarView(TestCase):
def setUp(self):
self.view = ShiftListCalendarView()
def test_attrs(self):
self.assertEqual(self.view.template_name, 'shifts/shift_calendar.html')
def test_context_data(self):
pass |
992,060 | 5c70c35be76b6686a6db5b984cef8fb487581ae2 | from arm_dashboard_client import * |
992,061 | b03a42e9d1948c69d354a54962c2b097d150a44c | #!/usr/bin/env python
# coding: utf-8
# XXX License
# Copyright (C)
# In[1]:
import requests
import json
from urllib.parse import urlparse, parse_qs
from bs4 import BeautifulSoup as bs
# In[2]:
# Convert ISIN to Bloomberg ticker from Google
def isin2bbg(isin_ticker):
url = 'https://www.google.com.tw/search'
keys = '+fundinfo+bloomberg'
key_url = 'product'
# Search on Google
r = requests.get( url, params = {'q': isin_ticker + keys } )
#print(r.url)
if r.status_code == requests.codes.ok:
soup = bs(r.text, 'html.parser')
items = soup.select('div.g')
if len(items) < 1:
return -1
for item in items:
s = item.find('a').get('href')
link = parse_qs(urlparse(s)[4])['q'][0]
parsed_link = urlparse(link)
if key_url in parsed_link[2]:
s = item.find('span', class_='st').text
i = s.find("Bloomberg Code,")
return s[i+16:i+23] + ':' + s[i+24:i+26]
return -1
# In[3]:
# Convert Bloomberg to ISIN from Google
def bbg2isin(bbg_ticker):
url = 'https://www.google.com.tw/search'
keys = '"+fundinfo+isin'
key_url = 'product'
# Search on Google
r = requests.get( url, params = {'q': '"' + bbg_ticker + keys } )
#print(r.url)
if r.status_code == requests.codes.ok:
soup = bs(r.text, 'html.parser')
items = soup.select('div.g')
if len(items) < 1:
return -1
for item in items:
s = item.find('a').get('href')
link = parse_qs(urlparse(s)[4])['q'][0]
parsed_link = urlparse(link)
if key_url in parsed_link[2]:
s = item.find('span', class_='st').text
i = s.find("ISIN,")
return s[i+6:i+18]
return -1
# In[4]:
# Convert ISIN to Morningstar ticker
def isin2morningstar(isin_ticker):
url = 'https://www.google.com.tw/search'
keys = '+morningstar'
# Search on Google
r = requests.get( url, params = {'q': isin_ticker + keys } )
#print(r.url)
if r.status_code == requests.codes.ok:
soup = bs(r.text, 'html.parser')
items = soup.select('div.g > h3.r > a')
if len(items) > 0:
#print(items[0])
s = items[0].get('href')
ms_link = parse_qs(urlparse(s)[4])['q'][0]
parsed_link = urlparse(ms_link)
if 'morningstar' not in parsed_link[1]:
return -1
else:
return parse_qs(urlparse(ms_link)[4])['id'][0]
return -1
# In[5]:
# Convert Bloomberg to morningstar ticker
def bbg2morningstar(bbg_ticker):
return isin2morningstar(bbg2isin(bbg_ticker))
# In[6]:
# Convert ISIN to Financial Times symbol
def isin2ft(isin_ticker):
url = 'http://markets.ft.com'
# Search on financial times
r = requests.get( url + '/data/search', params = {'query':isin_ticker})
if r.status_code == requests.codes.ok:
soup = bs(r.text, "html.parser")
tb = soup.find('table', class_='mod-ui-table mod-ui-table--freeze-pane')
tr = tb.findAll('tr')
if len(tr)!= 1:
return
td = tr[0].find('td')
href = td.find('a').get('href')
name = td.text
#print(href)
#print(name)
td = td.find_next_sibling()
symbol = td.text
#print(symbol)
r = requests.get(url + href)
#print(r.url)
if r.status_code == requests.codes.ok:
soup = bs(r.text, "html.parser")
k = soup.find('section', class_='mod-tearsheet-add-to-watchlist')
js = json.loads(str(k.get('data-mod-config')))
xid = js['xid']
#print(xid)
return xid
return
# In[7]:
# Testing
if __name__ == "__main__":
print(isin2bbg('LU0270844359'))
print(bbg2isin('FAPPAUI:LX'))
print(bbg2morningstar('FAPPAUI:LX'))
print(isin2ft('LU0270844359'))
|
992,062 | f204f445f5664be208b8e7dc3e4ce348bcda3073 | import math
n = int(input())
s = math.ceil(math.sqrt(n))
h = 10**5
for i in range(1,s+1):
if n%i==0:
if h > max(len(str(i)),len(str(int(n/i)))):
h = max(len(str(i)),len(str(int(n/i))))
print(h) |
992,063 | 963d2fd520f0f6b4ced4f8b48b77da5d3e8285d5 | import ctypes
import pathlib
import os
import sys
proj_folder = pathlib.Path(f'{os.getcwd()}\{__file__}').parent
if __name__ == "__main__":
# Load the shared library into ctypes
if sys.platform.startswith("win"):
c_lib = ctypes.CDLL("./cmult.dll")
else:
c_lib = ctypes.CDLL("./libcmult.so")
x, y = 6, 2.3
answer = c_lib.cmult(x, ctypes.c_float(y))
print(answer)
|
992,064 | 9d67631eda8b9054d4c44ef904975bb2dd5a46f2 | drinks = ["espresso", "chai", "decaf", "drip"]
caffeine = [64, 40, 0, 120]
zipped_drinks = zip(drinks, caffeine)
drinks_to_caffeine = {key:value for key, value in zipped_drinks} |
992,065 | 50ce16552a65605b1f7912cb582f467b6e238826 | # -*- coding: utf-8 -*-
# 2020-04-13
# author Liu,Yuxin
'''
【问题背景】国际乒联现在主席沙拉拉自从上任以来就立志于推行一系列改革,以推动乒乓球运动在全球的普及。
其中11分制改革引起了很大的争议,有一部分球员因为无法适应新规则只能选择退役。
华华就是其中一位,他退役之后走上了乒乓球研究工作,意图弄明白11分制和21分制对选手的不同影响。
在开展他的研究之前,他首先需要对他多年比赛的统计数据进行一些分析,所以需要你的帮忙。
【问题描述】华华通过以下方式进行分析,首先将比赛每个球的胜负列成一张表,
然后分别计算在11分制和21分制下,双方的比赛结果(截至记录末尾)。
比如现在有这么一份记录,(其中W表示华华获得一分,L表示华华对手获得一分):
WWWWW WWWWWW
WWWWW WWWWWW
LW
在11分制下,此时比赛的结果是华华第一局11比0获胜,第二局11比0获胜,正在进行第三局,当前比分1比1。
而在21分制下,此时比赛结果是华华第一局21比0获胜,正在进行第二局,比分2比1。如果一局比赛刚开始,则此时比分为0比0。
你的程序就是要对于一系列比赛信息的输入(WL形式),输出正确的结果。
【输入格式】每个输入文件包含若干行字符串(每行至多20个字母),字符串有大写的W、L和E组成。
其中E表示比赛信息结束,#程序应该忽略E之后的所有内容#
【输出格式】输出由两部分组成,每部分有若干行,每一行对应一局比赛的比分(按比赛信息输入顺序)。
其中第一部分是11分制下的结果,第二部分是21分制下的结果,两部分之间由一个空行分隔。
【输入样例】
WWWWWWWWWWWWWWWWWWWW
WWLWE
【输出样例】
11:0
11:0
1:1
21:0
2:1
'''
# input
s = ""
while True:
si = input()
s += si
if 'E' in si:
break
gamlst = list(s)
gamlst = gamlst[:gamlst.index('E')]
# get game result
def game11(gamlst):
dict = {'W': 0, 'L': 0}
lst = []
for i in range(len(gamlst)):
dict[gamlst[i]] = dict.get(gamlst[i], 0) + 1
# print(gamlst[i],dict['W'],dict['L'])
if dict[gamlst[i]] == 11:
lst.append((dict[gamlst[i]], dict['W'] + dict['L'] - 11))
dict['W'] = 0
dict['L'] = 0
lst.append((dict['W'], dict['L']))
return lst
def game21(gamlst):
dict = {'W': 0, 'L': 0}
lst = []
for i in range(len(gamlst)):
dict[gamlst[i]] = dict.get(gamlst[i], 0) + 1
# print(gamlst[i],dict['W'],dict['L'])
if dict[gamlst[i]] == 21:
lst.append((dict[gamlst[i]], dict['W'] + dict['L'] - 11))
dict['W'] = 0
dict['L'] = 0
lst.append((dict['W'], dict['L']))
return lst
result11 = game11(gamlst)
result21 = game21(gamlst)
# print out
def out(lst):
for i in range(len(lst)):
print(str(lst[i][0])+':'+str(lst[i][1]))
out(result11)
out(result21)
|
992,066 | da627cd44212a67501d4a449fba87f0856af49d5 | from bs4 import BeautifulSoup
import requests
import csv
"""
Web Scrapping Example: List of Top rated bollwood film on IMDB and created CSV file of film(name, rate, and starring)
"""
url= "https://www.imdb.com/india/top-rated-indian-movies/"
response= requests.get(url)
data = response.text
soup = BeautifulSoup(data, 'html.parser')
names = soup.find_all('td', {'class':'titleColumn'})
film_name=[]
film_actor=[]
for name in names:
film_name.append(name.a.text)
film_actor.append(name.a['title'])
names = soup.find_all('td', {'class':'imdbRating'})
film_ratng=[]
for name in names:
film_ratng.append(name.strong.text)
data= zip(film_name, film_ratng, film_actor)
# for x in data:
# print(x)
filename = 'web_scrapping/top250bollywood.csv'
with open(filename, 'w') as f:
csv_out=csv.writer(f)
csv_out.writerow(['name','rating', 'actor'])
for row in data:
csv_out.writerow(row)
print("done") |
992,067 | 79f51c4c802ba03ed55fa1861a7d4bfd1f19825b | """
Domemaster3D Camera Setup Script V2.4
2018-08-21
Created by Andrew Hazelden andrew@andrewhazelden.com
This script makes it easy to start creating fulldome stereoscopic content in Autodesk Maya.
-------------------------------------------------------------------------------------------------------
Version History
Version 2.3 - 2017-05-17
-------------------------
Added support for preserving existing pre/post render MEL script entries when Domemaster3D adds a new camera rig to the scene, or you click the "Add" / "Rem" shelf buttons. This revision is based upon a submission from Campbell Strong.
Version 2.2 - 2016-12-23
-------------------------
Added a unlockAncestor() function
Version 2.2 - 2016-09-17
-------------------------
mental ray 3.14 for Maya support
Version 2.1.2 - 2016-09-17
-------------------------------
Improved the domeCamera.mel script's Maya 2017 compatibility by fixing the MEL "Redeclaration of variable" warnings.
Added Maya 2017+ support for creating Maya file node based screen space texture maps with the help of a mib_texture_vector node and a place2Dtexture node. This replaces the previous mentalrayTexture node based approach that has been depreciated in Maya 2017.
Edited the Dome Grid creation script so the catch command is used to handle the event that mental ray might not be installed and a doPaintEffectsToPoly function based Maya code dependency is going to try and change the .miFinalGatherCast attribute.
Code Reformatting
Version 1.9.1
-------------
2015-10-15
Added the ability to use a "DOMEMASTER3D_MAYA_REALTIME_FOV" environment variable through your operating system, the Maya.env file, or a Maya module file to set the realtime OpenGL "persp" viewport field of view FOV value for domeAFL_FOV, domeAFL_FOV_Stereo, latlong_lens, and LatLong_Stereo camera rigs. Typical values would be 4 (mm) for a wide angle 160 degree FOV in the OpenGL persp viewport, or 18 (mm) for a regular 90 degree view.
In a Maya.env file you would change this environment variable by adding a line like this. (4 in this example means a 4mm lens):
DOMEMASTER3D_MAYA_REALTIME_FOV=4
Version 1.9
------------
2015-09-23
Added a LatLong Stereo Aim Camera function for making a camera rig that has an aim constraint applied for easier camera animation. Reminder: Maya has issues with using cameras that have aim constraints when you apply them to animation layers.
Added a LatLong Stereo Zenith Camera function for making a camera rig that has the "Zenith Mode" checkboxes enabled by default and a vertical orientation.
Version 1.8.3
------------
2015-08-21
Added the `addPrePostRenderScript()` and `removePrePostRenderScript()` functions to the domeCamera.py script to make it easier to set up the Domemaster3D pre render and post render mel scripts in the Maya render settings window.
Version 1.7.4
------------
2015-06-12
Updated the Maya Wiki page link to use the new GitHub Wiki table of contents.
Version 1.6
---------------
February 27, 2015
Increased the dome grid line thickness so it has improved legibility in the realtime viewports.
Version 1.6
---------------
Sept 17, 2014
Added LatLong_Stereo support
Version 1.5
---------------
July 12, 2014
Added a new "FulldomeIBL" tool to the Maya shelf. This allows you to use a file texture with a circular domemaster 180 degree FOV image as the image based lighting environment map in the scene, and as the source of final gather IBL, and Maya 2015 "emit light" based lighting. The fulldome texture is applied using a mentalrayTexture with an image sequence expression. The FulldomeIBL tool supports domemaster frame masking.
The new "HemirectIBL" tool (hemirect = half height equirectangular 360x90 degree image) tool creates a custom mentalrayTexture based shading network that lets you feed in an image with the top half of an equirectangular panorama into the mental ray IBL's spherical texture input. Note: This mode requires your batch rendering software to distribute the rendering job using 1 frame per packet/render slice so a new image is loaded for each from of the sequence. The HemirectIBL tool works with Maya 2015's newly improved "emit light" IBL lighting system.
A remapColor node is connected to the FulldomeIB and HemirectIBL shading networks to make it easier to adjust the color correction and exposure on the imagery before it is used with final gathering or light emission.
Updated the dome version "update" button URL to use GitHub
Changed the openGL viewport default focal length from 4 mm (160 degree FOV) to 18 mm (90 degree FOV)
Updated the code that makes sure mental ray is loaded and active before adding mental ray lens shaders to the scene
Added display smoothing to the AutoMagic fulldome sphere test shape
Updated the order of the DomeGrid Extra Attributes.
Version 1.4 B10
-------------------
Dec 18, 2013
Added the latlong_lens shader
Version 1.4 B9
-----------------
Dec 7, 2013
Updated Linux install path to:
/opt/Domemaster3D
Version 1.4 B8
-----------------
Nov 20, 2013
Added flexible changeRenderRes(1024) render resolution tool to match 0.5k, 1k, 2k, 4k, 8k shelf items
Version 1.4 B6
-----------------
Oct 27, 2013
Updated Grid defaults and worked on Maya 2010 support.
Version 1.4 B5
-----------------
Oct 24, 2013
Updated PreRenderMel and PostRenderMel code for the DomeAFL_FOV_Stereo shader.
Version 1.4 B4
---------------
Oct 21, 2013
Upgraded the DomeGrid with new radius controls, color controls, paintFX toon line thickness controls, and custom display modes
Version 1.4 B1
---------------
Oct 6, 2013
Renamed the fulldome rig creation script to domeCamera.py for clarity.
This script is compatible with the new StereoRigEditor based stereo fulldome rig
Version 1.3.5
---------------
Sept 27, 2013
Added features for previewing the dome radius (zero parallax zone), field of view, and safe viewing volumes
Reorganized the python scripts to put the domeAFL_FOV code with the domeAFL_FOV_Stereo camera code
Version 1.3.4
---------------
Released June 27, 2013
Updated the the Automagic tool's dome grid color to a brighter yellow value. This makes the grid more visible in a Physical Sun & Sky scene.
Added a new HELP icon to the Maya Shelf toolset. This shelf item loads the domemaster stereo shader wiki page.
Version 1.3.3
---------------
Released May 30, 2013
Updated the default locator scale.
Fixed the dome ramp shelf tool item so the default ramp texture preset is applied when the tool is run multiple times.
Updated source image paths for Maya 2010 compatibility
Version 1.3.2
---------------
Released April 16, 2013
Edited the default camera connections for the lens shaders to work with the modified versions of the maya createMentalRayIndirectLightingTab.mel & AEmia_physicalskyTemplate.mel scripts. This fixes the problem of the Physical Sky & Sun system overwriting the first .miLensShader input on cameras in the scene.
The location of the default domemaster control map textures is now in the Program Files\Domemaster3D\sourceimages folder on Windows or the /Applications/Domemaster3D/sourceimages folder on macOS. The Domemaster3D shelf tools have been updated to link to the new sourceimages folder.
Version 1.3
------------
Released Nov 4, 2012
Moved FOV and WxH functions into domeMaterial.py, changed the default lens shader connections to support the mental ray sky and sun system.
Version 1.1
------------
Released Aug 14, 2012
Improved python code and made it Maya 2010 compatible.
Version 1.0
------------
Released Aug 6, 2012
First release of the Domemaster3D auto-setup python scripts.
------------------------------------------------------------------------------
Domemaster3D AutoSetup
A python function to create a fulldome stereo rig and test grid in Maya.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.autosetup()
------------------------------------------------------------------------------
Domemaster3D Fulldome Stereo Rig
A python function to create a fulldome stereo rig in Maya.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.createFulldomeStereoRig()
------------------------------------------------------------------------------
Domemaster3D createLatLong_Camera
A python function to create a latitude longitude lens shader and attach it to a camera.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.createLatLong_Camera()
------------------------------------------------------------------------------
Domemaster3D createLatLongStereoRig
A python function to create a stereoscopic latitude longitude lens shader and attach it to a camera.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.createLatLongStereoRig()
------------------------------------------------------------------------------
Domemaster3D createLatLongStereoAimRig
--------------------------------
A python function to create a LatLong stereo rig in Maya with an aim constraint applied.
Reminder: Maya has issues with using cameras that have aim constraints when you apply them to animation layers.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.createLatLongStereoAimRig()
------------------------------------------------------------------------------
Domemaster3D createLatLongStereoZenithRig
A python function to create a stereoscopic latitude longitude lens shader and attach it to a camera.
The lens shaders have the Zenith Mode checkboxes enabled by default.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.createLatLongStereoZenithRig()
------------------------------------------------------------------------------
Domemaster3D createDomeAFL_WxH_Camera
A python function to create a domeAFL_WxH lens shader and attach it to a camera.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.createDomeAFL_WxH_Camera()
------------------------------------------------------------------------------
Domemaster3D createDomeAFL_FOV_Camera
A python function to create a domeAFL_FOV lens shader and attach it to a camera.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.createDomeAFL_FOV_Camera()
------------------------------------------------------------------------------
Domemaster3D DomeGrid test background
A python function to create a hemispherical yellow test grid in Maya.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.createDomeGrid()
------------------------------------------------------------------------------
Domemaster3D LatLongGrid test background
A python function to create a spherical yellow test grid in Maya.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.createLatLongGrid()
------------------------------------------------------------------------------
Domemaster3D createTestShapes
A python function to create a test sphere and cube in Maya.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.createTestShapes()
------------------------------------------------------------------------------
Domemaster3D createRobLookup
A python function to create a mental ray screen space texture
and connect it to a robLookupBackground lens shader.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.createRobLookup()
------------------------------------------------------------------------------
Domemaster3D createDomeRampTexture
A python function to create a mental ray screen space ramp texture
and connect it to a robLookupBackground lens shader.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.createDomeRampTexture()
------------------------------------------------------------------------------
Domemaster3D setRenderRes
A python function to setup the basic mental ray 2K x 2K square render settings.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.setRenderRes()
------------------------------------------------------------------------------
Domemaster3D Add Pre/Post Render Mel
A python function to add the Domemaster3D shader Pre/Post render mel scripts to the Maya Render Settings window.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.addPrePostRenderScript()
------------------------------------------------------------------------------
Domemaster3D Remove Pre/Post Render Mel
A python function to remove the Domemaster3D shader Pre/Post render mel scripts from the Maya Render Settings window.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.removePrePostRenderScript()
------------------------------------------------------------------------------
Domemaster3D setDomeSamplingQuality
A python function to setup the mental ray AA sampling quality.
Run using the command:
import domeCamera as domeCamera
reload(domeCamera)
domeCamera.setDomeSamplingQuality()
------------------------------------------------------------------------------
Domemaster3D changeRenderRes
A python function to change the basic mental ray resolution square render settings.
Run using the command:
import domeCamera
reload(domeCamera)
domeCamera.changeRenderRes(1024)
------------------------------------------------------------------------------
Domemaster3D Force Mental Ray to load
A python function to make sure mental ray is active and the MR shading nodes are read to be used.
Run using the command:
import domeCamera
reload(domeCamera)
domeCamera.forceMentalRayLoad()
------------------------------------------------------------------------------
Domemaster3D createFulldomeIBL
A python function to create a mental ray texture and connect it to an mental ray mentalrayIbl node.
The this function will offset the texture coordinates so a 180 degree fisheye image would sit in the center of the mental ray IBL system's 360 degree angular fisheye input.
Run using this command:
import domeCamera
reload(domeCamera)
domeCamera.createFulldomeIBL()
------------------------------------------------------------------------------
Domemaster3D getMayaVersionDome
A python function to check what Maya version is active.
import domeCamera
reload(domeCamera)
domeCamera.getMayaVersionDome()
------------------------------------------------------------------------------
Unlock Ancestor
A python function to lock/unlock an ancestor plug connection
import domeMaterial as domeMaterial
reload(domeMaterial)
domeMaterial.unlockAncestor('stereoCameraRight.rotate', True)
------------------------------------------------------------------------------
"""
"""
Show the Domemaster Wiki
--------------------------------
Loads the wiki page in your default web browser
Run using the command:
print("Open the Domemaster Wiki Page")
import domeCamera as domeCamera
domeCamera.openDomemasterWiki()
print("Open the Domemaster NING Group")
import domeCamera as domeCamera
domeCamera.openDomemasterNing()
print("Open the Domemaster Downloads Page")
import domeCamera as domeCamera
domeCamera.openDomemasterDownloads()
print("Open the Domemaster Bug Reporter")
import domeCamera as domeCamera
domeCamera.openDomemasterBugReport()
"""
def openDomemasterWiki():
import webbrowser
# Domemaster Stereo Shader - Wiki Page
url = 'https://github.com/zicher3d-org/domemaster-stereo-shader/wiki'
# Open URL in new window, raising the window if possible.
webbrowser.open_new(url)
def openDomemasterNing():
import webbrowser
# Domemaster NING Group
url = 'http://fulldome.ning.com/forum/topics/stereoscopic-domemaster-images'
# Open URL in new window, raising the window if possible.
webbrowser.open_new(url)
def openDomemasterDownloads():
import webbrowser
# Domemaster Stereo Shader - Download Page
url = 'https://github.com/zicher3d-org/domemaster-stereo-shader/releases'
# Open URL in new window, raising the window if possible.
webbrowser.open_new(url)
def openDomemasterBugReport():
import webbrowser
# Domemaster Stereo Shader - Bug Report Page
url = 'https://github.com/zicher3d-org/domemaster-stereo-shader/issues'
# Open URL in new window, raising the window if possible.
webbrowser.open_new(url)
"""
Find out the path to the sourceimages folder
----------------------
A python function to check the operating system platform and the source images folder.
"""
def getSourceImagesPath(imageFileName):
import os
import maya.cmds as cmds
import maya.mel as mel
# ---------------------------------------------------------------------
# Set up the base folder path for the Domemaster3D control maps
# ---------------------------------------------------------------------
# Check OS platform for Windows/Mac/Linux Paths
import platform
# This is the base path for the images folder
baseImagesFolder = ""
# Try and read the value from the current Maya.env file's environment variables
baseImagesFolder = os.environ.get('DOMEMASTER3D_SOURCEIMAGES_DIR') + "/"
# Typical Result: C:/Program Files/Domemaster3D/sourceimages/
# Use a fixed value if the env var is empty
if baseImagesFolder == None:
if platform.system()=='Windows':
# Check if the program is running on Windows
baseImagesFolder = "C:/Program Files/Domemaster3D/sourceimages/"
elif platform.system()== 'win32':
# Check if the program is running on Windows 32
baseImagesFolder = "C:/Program Files (x86)/Domemaster3D/sourceimages/"
elif platform.system()== 'Darwin':
# Check if the program is running on macOS
baseImagesFolder = "/Applications/Domemaster3D/sourceimages/"
elif platform.system()== 'Linux':
# Check if the program is running on Linux
baseImagesFolder = "/opt/Domemaster3D/sourceimages/"
elif platform.system()== 'Linux2':
# Check if the program is running on Linux
baseImagesFolder = "/opt/Domemaster3D/sourceimages/"
else:
# Create the empty variable as a fallback mode
baseImagesFolder = ""
combinedFileAndImagePath = baseImagesFolder + imageFileName
print "[Domemaster3D is running on a " + platform.system() + " System]"
print "[Requesting the image file]: " + combinedFileAndImagePath
return combinedFileAndImagePath
"""
Domemaster3D AutoSetup
----------------------
A python function to create a fulldome stereo rig and test grid in Maya.
"""
def autosetup():
setRenderRes()
setDomeSamplingQuality()
createFulldomeStereoRig()
createDomeGrid()
createTestShapes()
"""
Domemaster3D setDomeSamplingQuality
----------------------
A python function to setup the mental ray AA sampling quality.
"""
def setDomeSamplingQuality():
import maya.cmds as cmds
import maya.mel as mel
#---------------------------------------------------------------------
# Render AA Quality Settings
# ---------------------------------------------------------------------
# Add the mental ray miDefaultOptions settings to the scene before accessing MR indirect lighting features
mel.eval("miCreateDefaultNodes();")
# Gaussian AA Filtering
cmds.setAttr('miDefaultOptions.filter', 2)
# Filter Size 3x3
cmds.setAttr('miDefaultOptions.filterWidth', 1)
cmds.setAttr('miDefaultOptions.filterHeight', 1)
# Sample Adjustments
cmds.setAttr('miDefaultOptions.maxSamples', 2)
cmds.setAttr('miDefaultOptions.minSamples', 0)
# Production Quality Settings
cmds.setAttr('miDefaultOptions.maxReflectionRays', 10)
cmds.setAttr('miDefaultOptions.maxRefractionRays', 10)
cmds.setAttr('miDefaultOptions.maxRayDepth', 20)
cmds.setAttr('miDefaultOptions.maxShadowRayDepth', 2)
# Maya 2014, 2015+ AA settings
# Check if we are running Maya 2014+ and enable MR Unified Sampling
mayaVersion = getMayaVersionDome()
if (mayaVersion >= 2014):
# Enable Unified Sampling - The forced unified sampling mode is remmed out for Maya 2015 testing
#cmds.setAttr('miDefaultOptions.miRenderUsing', 0)
# Set the Unified Quality to 0.6
cmds.setAttr('miDefaultOptions.miSamplesQualityR', 0.6)
cmds.setAttr('miDefaultOptions.miSamplesMin', 1)
cmds.setAttr('miDefaultOptions.miSamplesMax', 100)
preMelDomemaster = 'source "domeRender.mel"; domemaster3DPreRenderMEL();'
postMelDomemaster = 'source "domeRender.mel"; domemaster3DPostRenderMEL();'
"""
Domemaster3D Add Pre/Post Render Mel
----------------------
A python function to add the Domemaster3D shader Pre/Post render mel scripts to the Maya Render Settings window.
"""
def addPrePostRenderScript():
import maya.cmds as cmds
import maya.mel as mel
print("Adding the Pre/Post Render Mel\n")
# PreRender MEL:
preMelCurrent = cmds.getAttr('defaultRenderGlobals.preMel') or ''
if not(preMelDomemaster in preMelCurrent):
preMelCurrent = preMelCurrent + ';' + preMelDomemaster
preMelCurrent = preMelCurrent.replace(';;', ';')
cmds.setAttr('defaultRenderGlobals.preMel', preMelCurrent, type = 'string')
# PostRender MEL:
postMelCurrent = cmds.getAttr('defaultRenderGlobals.postMel') or ''
if not(postMelDomemaster in postMelCurrent):
postMelCurrent = postMelCurrent + ';' + postMelDomemaster
postMelCurrent = postMelCurrent.replace(';;', ';')
cmds.setAttr('defaultRenderGlobals.postMel', postMelCurrent, type = 'string')
# Enable realtime 3D
mel.eval('source "domeRender.mel"; domemaster3DPostRenderMEL();')
"""
Domemaster3D Remove Pre/Post Render Mel
----------------------
A python function to remove the Domemaster3D shader Pre/Post render mel scripts from the Maya Render Settings window.
"""
def removePrePostRenderScript():
import maya.cmds as cmds
import maya.mel as mel
print("Removing the Pre/Post Render Mel\n")
# PreRender MEL:
preMelCurrent = cmds.getAttr('defaultRenderGlobals.preMel') or ''
preMelCurrent = preMelCurrent.replace(preMelDomemaster, '')
preMelCurrent = preMelCurrent.replace(';;', ';')
cmds.setAttr('defaultRenderGlobals.preMel', preMelCurrent, type='string')
# PostRender MEL:
postMelCurrent = cmds.getAttr('defaultRenderGlobals.postMel') or ''
postMelCurrent = postMelCurrent.replace(postMelDomemaster, '')
postMelCurrent = postMelCurrent.replace(';;', ';')
cmds.setAttr('defaultRenderGlobals.postMel', postMelCurrent, type='string')
# Disable the realtime 3D camera offsets
mel.eval('source "domeRender.mel"; domemaster3DPostRenderMEL();')
"""
Domemaster3D SetRenderRes
----------------------
A python function to setup the basic mental ray 2K x 2K square render settings.
"""
def setRenderRes():
import maya.cmds as cmds
import maya.mel as mel
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
fulldomeRenderWidth = 2048
fulldomeRenderHeight = 2048
#---------------------------------------------------------------------
# Setup the default render settings for a square domemaster image output
# ---------------------------------------------------------------------
cmds.setAttr('defaultResolution.width', fulldomeRenderWidth)
cmds.setAttr('defaultResolution.height', fulldomeRenderHeight)
cmds.setAttr('defaultResolution.deviceAspectRatio', 1)
cmds.setAttr('defaultResolution.pixelAspect', 1)
"""
Domemaster3D changeRenderRes
----------------------
A python function to change the basic mental ray resolution square render settings.
"""
def changeRenderRes(renderSizePx):
import maya.mel as mel
import maya.cmds as cmds
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
fulldomeRenderWidth = renderSizePx
fulldomeRenderHeight = renderSizePx
#---------------------------------------------------------------------
# Setup the default render settings for a square domemaster image output
# ---------------------------------------------------------------------
cmds.setAttr('defaultResolution.width', fulldomeRenderWidth)
cmds.setAttr('defaultResolution.height', fulldomeRenderHeight)
cmds.setAttr('defaultResolution.deviceAspectRatio', 1)
cmds.setAttr('defaultResolution.pixelAspect', 1)
print ("Changed the render settings to output a " + str(renderSizePx) + "x" + str(renderSizePx) + " image.")
"""
Domemaster3D changeRenderResWH
----------------------
A python function to change the basic mental ray resolution render settings.
"""
def changeRenderResWH(renderSizeW, renderSizeH):
import maya.mel as mel
import maya.cmds as cmds
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
domeRenderWidth = renderSizeW
domeRenderHeight = renderSizeH
domeDeviceAspectRatio=domeRenderWidth/domeRenderHeight
#---------------------------------------------------------------------
# Setup the default render settings for a square domemaster image output
# ---------------------------------------------------------------------
cmds.setAttr('defaultResolution.width', domeRenderWidth)
cmds.setAttr('defaultResolution.height', domeRenderHeight)
cmds.setAttr('defaultResolution.deviceAspectRatio', domeDeviceAspectRatio)
cmds.setAttr('defaultResolution.pixelAspect', 1)
print ("Changed the render settings to output a " + str(renderSizeW) + "x" + str(renderSizeW) + " image.")
"""
Domemaster3D Fulldome Stereo Rig
--------------------------------
A python function to create a fulldome stereo rig in Maya.
"""
def createFulldomeStereoRig():
import maya.cmds as cmds
import maya.mel as mel
# ---------------------------------------------------------------------
# Setup the default Maya / Mental Ray Settings
# ---------------------------------------------------------------------
cmds.loadPlugin("stereoCamera", qt=True)
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
# ---------------------------------------------------------------------
# Create the stereo rig
# ---------------------------------------------------------------------
#import maya.app.stereo.stereoCameraMenus as stereoCameraMenus
#stereoCameraMenus.buildCreateMenu()
#import maya.app.stereo.stereoCameraRig
#maya.app.stereo.stereoCameraRig.createStereoCameraRig()
from maya.app.stereo import stereoCameraRig
rig = stereoCameraRig.createStereoCameraRig('DomeStereoCamera')
# Result: [u'DomeStereoCamera', u'DomeStereoCameraLeft', u'DomeStereoCameraRight']
# Get the stereo camera rig shape nodes for the center/right/left cameras
rig_center_shape_name = getObjectShapeNode(rig[0])
# Result: [u'stereoCameraCenterCamShape', u'stereoCameraFrustum'] #
rig_left_shape_name = getObjectShapeNode(rig[1])
# Result: [u'stereoCameraLeftShape'] #
rig_right_shape_name = getObjectShapeNode(rig[2])
# Result: [u'stereoCameraRightShape'] #
"""
cmds.setAttr(rig[0]+'.rotateX', 90)
cmds.setAttr(rig[0]+'.rotateY', 0)
cmds.setAttr(rig[0]+'.rotateZ', 0)
"""
# Changes the render settings to set the stereo camera to be a renderable camera
cmds.setAttr(rig_left_shape_name[0]+'.renderable', 1) #stereoCameraLeftShape
cmds.setAttr(rig_right_shape_name[0]+'.renderable', 1) #stereoCameraRightShape
cmds.setAttr('topShape.renderable', 0)
cmds.setAttr('sideShape.renderable', 0)
cmds.setAttr('frontShape.renderable', 0)
cmds.setAttr('perspShape.renderable', 0)
# Set up the default mental ray AA sampling quality
setDomeSamplingQuality()
#import maya.cmds as cmds
#rig_center_shape_name = getObjectShapeNode(rig[0])
#lensShaderName = cmds.listConnections( rig_center_shape_name[0]+'.miLensShader')
# Debugging test line
#lensShaderName = "center_domeAFL_FOV_Stereo";
#print ("Lens shader name: " + str(lensShaderName))
# Select the center camera's domeAFL_FOV_Stereo node
#cmds.select(lensShaderName, replace=True)
leftLensShader = cmds.listConnections(rig_left_shape_name[0]+'.miLensShader')
rightLensShader = cmds.listConnections(rig_right_shape_name[0]+'.miLensShader')
centerLensShader = cmds.listConnections(rig_center_shape_name[0]+'.miLensShader')
# Select the camera's domeAFL_FOV_Stereo nodes in the attribute editor to add the Extra Attrs
#mel.eval('showEditorExact("' + centerLensShader[0] + '")')
mel.eval('showEditorExact("' + leftLensShader[0] + '")')
mel.eval('showEditorExact("' + rightLensShader[0] + '")')
# Finish off by reselecting the center lens shader
mel.eval('showEditorExact("' + centerLensShader[0] + '")')
#---------------------------------------------------------------------------
# Enable Real-time 3D in the OpenGL viewport
# using a PreRender and PostRender MEL script
#---------------------------------------------------------------------------
#import maya.cmds as cmds
addPrePostRenderScript()
return rig
"""
Domemaster3D createDomeAFL_FOV_Camera
----------------------
A python function to create a domeAFL_FOV lens shader and attach it to a camera.
"""
def createDomeAFL_FOV_Camera():
import maya.cmds as cmds
import maya.mel as mel
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
# Variables
# ---------------------------------------------------------------------
# Create the stereo rig
# ---------------------------------------------------------------------
# Create a camera and get the shape name.
cameraName = cmds.camera(name='domeAFL_FOV_Camera')
cameraShape = cameraName[1]
# ---------------------------------------------------------------------
# Create the domeAFL_FOV node
# ---------------------------------------------------------------------
domeAFL_lens_node = cmds.shadingNode('domeAFL_FOV', n='domeAFL_FOV', asUtility=True)
# Primary lens shader connection:
# Connect to the .miLensShaderList[0] input on the camera
#cmds.connectAttr(domeAFL_lens_node+'.message', cameraShape+'.miLensShaderList[0]')
# Alternate lens shader connection:
# Connect directly to the first .miLensShader input on the camera
# Note: This first lens shader connection is overwritten by the mental ray Sun & Sky system
cmds.connectAttr(domeAFL_lens_node+'.message', cameraShape+'.miLensShader')
# Scale the stereo camera rig locator larger
#cmds.setAttr(cameraShape+'.locatorScale', 1) #Scale Camera icon
# Link the new attribute 'Cam Locator Scale' to the dome camera's locator size control
cmds.addAttr(cameraName[0], longName='Cam_Locator_Scale', niceName='Cam Locator Scale', attributeType='double', defaultValue=1.0, minValue=0.001)
cmds.setAttr(cameraName[0]+'.Cam_Locator_Scale', keyable=False, channelBox=True)
cmds.connectAttr(cameraName[0]+'.Cam_Locator_Scale', cameraShape+'.locatorScale', force=True)
cmds.setAttr(cameraName[0]+'.rotateX', 90)
cmds.setAttr(cameraName[0]+'.rotateY', 0)
cmds.setAttr(cameraName[0]+'.rotateZ', 0)
# Changes the render settings to set the stereo camera to be a renderable camera
cmds.setAttr(cameraShape+'.renderable', 1) #domeAFL_FOV_CameraShape
cmds.setAttr('topShape.renderable', 0)
cmds.setAttr('sideShape.renderable', 0)
cmds.setAttr('frontShape.renderable', 0)
cmds.setAttr('perspShape.renderable', 0)
#Set up the default mental ray AA sampling quality
setDomeSamplingQuality()
# ---------------------------------------------------------------------
# Setup the stereo rig camera attributes
# ---------------------------------------------------------------------
import os
import sys
# 18 mm focal length = 90 degree FOV
defaultRealtimeFOV = 18
# 4 mm focal length = 160 degree FOV
#defaultRealtimeFOV = 4
domeOverrideFOV = int(os.getenv('DOMEMASTER3D_MAYA_REALTIME_FOV', defaultRealtimeFOV))
if((domeOverrideFOV >= 3) and (domeOverrideFOV <= 3500)):
print ("Using a Domemaster3D realtime viewport FOV value of " + str(domeOverrideFOV) + ".\n")
else:
print ("The \"DOMEMASTER3D_MAYA_REALTIME_FOV\" environment variable overridden FOV Value of " + str(domeOverrideFOV) + " is outside of the acceptable range of 3 mm to 3500mm that Maya accepts as a valid camera field of view value. The default value of " + str(defaultRealtimeFOV) + " will be used instead.\n")
domeOverrideFOV = defaultRealtimeFOV
# Use the default FOV value or pull the FOV value from the DOMEMASTER3D_MAYA_REALTIME_FOV env variable
cmds.setAttr(cameraShape+'.focalLength', domeOverrideFOV)
# 4 mm focal length = 160 degree FOV
#cmds.setAttr(cameraShape+'.focalLength', 4)
# 18 mm focal length = 90 degree FOV
#cmds.setAttr(cameraShape+'.focalLength', 18)
# Select the center camera domeAFL_FOV_Stereo node in the attribute editor
# This will add the extra attributes to the camera
mel.eval('showEditorExact("' + domeAFL_lens_node + '")')
# ---------------------------------------------------------------------
# Set the default camera separation based upon the scene size
# ---------------------------------------------------------------------
#defaultDomeRadius = 2
#Set the dome radius
#cmds.setAttr(domeAFL_lens_node+'.Dome_Radius', defaultDomeRadius)
#print("Dome Radius: " + str(defaultDomeRadius))
"""
Domemaster3D createDomeAFL_WxH_Camera
----------------------
A python function to create a domeAFL_WxH lens shader and attach it to a camera.
"""
def createDomeAFL_WxH_Camera():
import maya.cmds as cmds
#import maya.mel as mel
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
# Variables
# ---------------------------------------------------------------------
# Create the stereo rig
# ---------------------------------------------------------------------
# Create a camera and get the shape name.
cameraName = cmds.camera(name='domeAFL_WxH_Camera')
cameraShape = cameraName[1]
# ---------------------------------------------------------------------
# Create the domeAFL_WxH node
# ---------------------------------------------------------------------
domeAFL_WxH_lens_node = cmds.shadingNode('domeAFL_WxH', n='domeAFL_WxH', asUtility=True)
# Primary lens shader connection:
# Connect to the .miLensShaderList[0] input on the camera
# cmds.connectAttr(domeAFL_WxH_lens_node+'.message', cameraShape+'.miLensShaderList[0]')
# Alternate lens shader connection:
# Connect directly to the first .miLensShader input on the camera
# Note: This first lens shader connection is overwritten by the mental ray Sun & Sky system
cmds.connectAttr(domeAFL_WxH_lens_node+'.message', cameraShape+'.miLensShader')
# Scale the stereo camera rig locator larger
#cmds.setAttr(cameraShape+'.locatorScale', 1) #Scale Camera icon
# Link the new attribute 'Cam Locator Scale' to the dome camera's locator size control
cmds.addAttr(cameraName[0], longName='Cam_Locator_Scale', niceName='Cam Locator Scale', attributeType='double', defaultValue=1.0, minValue=0.001)
cmds.setAttr(cameraName[0]+'.Cam_Locator_Scale', keyable=False, channelBox=True)
cmds.connectAttr(cameraName[0]+'.Cam_Locator_Scale', cameraShape+'.locatorScale', force=True)
cmds.setAttr(cameraName[0]+'.rotateX', 90)
cmds.setAttr(cameraName[0]+'.rotateY', 0)
cmds.setAttr(cameraName[0]+'.rotateZ', 0)
# Changes the render settings to set the stereo camera to be a renderable camera
cmds.setAttr(cameraShape+'.renderable', 1) #domeAFL_WxH_CameraShape
cmds.setAttr('topShape.renderable', 0)
cmds.setAttr('sideShape.renderable', 0)
cmds.setAttr('frontShape.renderable', 0)
cmds.setAttr('perspShape.renderable', 0)
# Set up the default mental ray AA sampling quality
setDomeSamplingQuality()
# ---------------------------------------------------------------------
# Setup the stereo rig camera attributes
# ---------------------------------------------------------------------
import os
import sys
# 18 mm focal length = 90 degree FOV
defaultRealtimeFOV = 18
# 4 mm focal length = 160 degree FOV
#defaultRealtimeFOV = 4
domeOverrideFOV = int(os.getenv('DOMEMASTER3D_MAYA_REALTIME_FOV', defaultRealtimeFOV))
if((domeOverrideFOV >= 3) and (domeOverrideFOV <= 3500)):
print ("Using a Domemaster3D realtime viewport FOV value of " + str(domeOverrideFOV) + ".\n")
else:
print ("The \"DOMEMASTER3D_MAYA_REALTIME_FOV\" environment variable overridden FOV Value of " + str(domeOverrideFOV) + " is outside of the acceptable range of 3 mm to 3500mm that Maya accepts as a valid camera field of view value. The default value of " + str(defaultRealtimeFOV) + " will be used instead.\n")
domeOverrideFOV = defaultRealtimeFOV
# Use the default FOV value or pull the FOV value from the DOMEMASTER3D_MAYA_REALTIME_FOV env variable
cmds.setAttr(cameraShape+'.focalLength', domeOverrideFOV)
# 4 mm focal length = 160 degree FOV
#cmds.setAttr(cameraShape+'.focalLength', 4)
# 18 mm focal length = 90 degree FOV
#cmds.setAttr(cameraShape+'.focalLength', 18)
"""
Domemaster3D createLatLong_Camera
----------------------
A python function to create a latitude longitude lens shader and attach it to a camera.
"""
def createLatLong_Camera():
import maya.cmds as cmds
#import maya.mel as mel
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
# Variables
# ---------------------------------------------------------------------
# Create the stereo rig
# ---------------------------------------------------------------------
# Create a camera and get the shape name.
cameraName = cmds.camera(name='latlong_Camera')
cameraShape = cameraName[1]
# ---------------------------------------------------------------------
# Create the latlong_lens node
# ---------------------------------------------------------------------
latlong_lens_node = cmds.shadingNode('latlong_lens', n='latlong_lens', asUtility=True)
# Primary lens shader connection:
# Connect to the .miLensShaderList[0] input on the camera
# cmds.connectAttr(latlong_lens_node+'.message', cameraShape+'.miLensShaderList[0]')
# Alternate lens shader connection:
# Connect directly to the first .miLensShader input on the camera
# Note: This first lens shader connection is overwritten by the mental ray Sun & Sky system
cmds.connectAttr(latlong_lens_node+'.message', cameraShape+'.miLensShader')
# Scale the stereo camera rig locator larger
#cmds.setAttr(cameraShape+'.locatorScale', 1) #Scale Camera icon
# Link the new attribute 'Cam Locator Scale' to the dome camera's locator size control
cmds.addAttr(cameraName[0], longName='Cam_Locator_Scale', niceName='Cam Locator Scale', attributeType='double', defaultValue=1.0, minValue=0.001)
cmds.setAttr(cameraName[0]+'.Cam_Locator_Scale', keyable=False, channelBox=True)
cmds.connectAttr(cameraName[0]+'.Cam_Locator_Scale', cameraShape+'.locatorScale', force=True)
cmds.setAttr(cameraName[0]+'.rotateX', 0)
cmds.setAttr(cameraName[0]+'.rotateY', 0)
cmds.setAttr(cameraName[0]+'.rotateZ', 0)
# Changes the render settings to set the stereo camera to be a renderable camera
cmds.setAttr(cameraShape+'.renderable', 1) #latlong_CameraShape
cmds.setAttr('topShape.renderable', 0)
cmds.setAttr('sideShape.renderable', 0)
cmds.setAttr('frontShape.renderable', 0)
cmds.setAttr('perspShape.renderable', 0)
# Set up the default mental ray AA sampling quality
setDomeSamplingQuality()
# ---------------------------------------------------------------------
# Setup the stereo rig camera attributes
# ---------------------------------------------------------------------
import os
import sys
# 18 mm focal length = 90 degree FOV
defaultRealtimeFOV = 18
# 4 mm focal length = 160 degree FOV
#defaultRealtimeFOV = 4
domeOverrideFOV = int(os.getenv('DOMEMASTER3D_MAYA_REALTIME_FOV', defaultRealtimeFOV))
if((domeOverrideFOV >= 3) and (domeOverrideFOV <= 3500)):
print ("Using a Domemaster3D realtime viewport FOV value of " + str(domeOverrideFOV) + ".\n")
else:
print ("The \"DOMEMASTER3D_MAYA_REALTIME_FOV\" environment variable overridden FOV Value of " + str(domeOverrideFOV) + " is outside of the acceptable range of 3 mm to 3500mm that Maya accepts as a valid camera field of view value. The default value of " + str(defaultRealtimeFOV) + " will be used instead.\n")
domeOverrideFOV = defaultRealtimeFOV
# Use the default FOV value or pull the FOV value from the DOMEMASTER3D_MAYA_REALTIME_FOV env variable
cmds.setAttr(cameraShape+'.focalLength', domeOverrideFOV)
# 4 mm focal length = 160 degree FOV
#cmds.setAttr(cameraShape+'.focalLength', 4)
# 18 mm focal length = 90 degree FOV
#cmds.setAttr(cameraShape+'.focalLength', 18)
"""
Domemaster3D LatLong Stereo Rig
--------------------------------
A python function to create a LatLong stereo rig in Maya.
"""
def createLatLongStereoRig():
import maya.cmds as cmds
import maya.mel as mel
# ---------------------------------------------------------------------
# Setup the default Maya / Mental Ray Settings
# ---------------------------------------------------------------------
cmds.loadPlugin("stereoCamera", qt=True)
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
# ---------------------------------------------------------------------
# Create the stereo rig
# ---------------------------------------------------------------------
#import maya.app.stereo.stereoCameraMenus as stereoCameraMenus
#stereoCameraMenus.buildCreateMenu()
#import maya.app.stereo.stereoCameraRig
#maya.app.stereo.stereoCameraRig.createStereoCameraRig()
from maya.app.stereo import stereoCameraRig
rig = stereoCameraRig.createStereoCameraRig('LatLongStereoCamera')
# Result: [u'LatLongCamera', u'LatLongCameraLeft', u'LatLongCameraRight']
# Get the stereo camera rig shape nodes for the center/right/left cameras
rig_center_shape_name = getObjectShapeNode(rig[0])
# Result: [u'stereoCameraCenterCamShape', u'stereoCameraFrustum'] #
rig_left_shape_name = getObjectShapeNode(rig[1])
# Result: [u'stereoCameraLeftShape'] #
rig_right_shape_name = getObjectShapeNode(rig[2])
# Result: [u'stereoCameraRightShape'] #
"""
cmds.setAttr(rig[0]+'.rotateX', 90)
cmds.setAttr(rig[0]+'.rotateY', 0)
cmds.setAttr(rig[0]+'.rotateZ', 0)
"""
# Changes the render settings to set the stereo camera to be a renderable camera
cmds.setAttr(rig_left_shape_name[0]+'.renderable', 1) #stereoCameraLeftShape
cmds.setAttr(rig_right_shape_name[0]+'.renderable', 1) #stereoCameraRightShape
cmds.setAttr('topShape.renderable', 0)
cmds.setAttr('sideShape.renderable', 0)
cmds.setAttr('frontShape.renderable', 0)
cmds.setAttr('perspShape.renderable', 0)
# Set up the default mental ray AA sampling quality
setDomeSamplingQuality()
#import maya.cmds as cmds
#rig_center_shape_name = getObjectShapeNode(rig[0])
#lensShaderName = cmds.listConnections( rig_center_shape_name[0]+'.miLensShader')
# Debugging test line
#lensShaderName = "center_LatLong_Stereo";
#print ("Lens shader name: " + str(lensShaderName))
# Select the center camera's LatLong_Stereo node
#cmds.select(lensShaderName, replace=True)
leftLensShader = cmds.listConnections(rig_left_shape_name[0]+'.miLensShader')
rightLensShader = cmds.listConnections(rig_right_shape_name[0]+'.miLensShader')
centerLensShader = cmds.listConnections(rig_center_shape_name[0]+'.miLensShader')
# Select the camera's domeAFL_FOV_Stereo nodes in the attribute editor to add the Extra Attrs
#mel.eval('showEditorExact("' + centerLensShader[0] + '")')
mel.eval('showEditorExact("' + leftLensShader[0] + '")')
mel.eval('showEditorExact("' + rightLensShader[0] + '")')
# Finish off by reselecting the center lens shader
mel.eval('showEditorExact("' + centerLensShader[0] + '")')
#---------------------------------------------------------------------------
# Enable Real-time 3D in the OpenGL viewport
# using a PreRender and PostRender MEL script
#---------------------------------------------------------------------------
#import maya.cmds as cmds
addPrePostRenderScript()
return rig
"""
Domemaster3D LatLong Stereo Aim Rig
--------------------------------
A python function to create a LatLong stereo rig in Maya with an aim constraint applied.
"""
def createLatLongStereoAimRig():
import maya.cmds as cmds
import maya.mel as mel
# ---------------------------------------------------------------------
# Setup the default Maya / Mental Ray Settings
# ---------------------------------------------------------------------
cmds.loadPlugin("stereoCamera", qt=True)
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
# ---------------------------------------------------------------------
# Create the stereo rig
# ---------------------------------------------------------------------
#import maya.app.stereo.stereoCameraMenus as stereoCameraMenus
#stereoCameraMenus.buildCreateMenu()
#import maya.app.stereo.stereoCameraRig
#maya.app.stereo.stereoCameraRig.createStereoCameraRig()
from maya.app.stereo import stereoCameraRig
rig = stereoCameraRig.createStereoCameraRig('LatLongStereoCamera')
# Result: [u'LatLongCamera', u'LatLongCameraLeft', u'LatLongCameraRight']
# Get the stereo camera rig shape nodes for the center/right/left cameras
rig_center_shape_name = getObjectShapeNode(rig[0])
# Result: [u'stereoCameraCenterCamShape', u'stereoCameraFrustum'] #
rig_left_shape_name = getObjectShapeNode(rig[1])
# Result: [u'stereoCameraLeftShape'] #
rig_right_shape_name = getObjectShapeNode(rig[2])
# Result: [u'stereoCameraRightShape'] #
"""
cmds.setAttr(rig[0]+'.rotateX', 90)
cmds.setAttr(rig[0]+'.rotateY', 0)
cmds.setAttr(rig[0]+'.rotateZ', 0)
"""
# Changes the render settings to set the stereo camera to be a renderable camera
cmds.setAttr(rig_left_shape_name[0]+'.renderable', 1) #stereoCameraLeftShape
cmds.setAttr(rig_right_shape_name[0]+'.renderable', 1) #stereoCameraRightShape
cmds.setAttr('topShape.renderable', 0)
cmds.setAttr('sideShape.renderable', 0)
cmds.setAttr('frontShape.renderable', 0)
cmds.setAttr('perspShape.renderable', 0)
# Set up the default mental ray AA sampling quality
setDomeSamplingQuality()
#import maya.cmds as cmds
#rig_center_shape_name = getObjectShapeNode(rig[0])
#lensShaderName = cmds.listConnections( rig_center_shape_name[0]+'.miLensShader')
# Debugging test line
#lensShaderName = "center_LatLong_Stereo";
#print ("Lens shader name: " + str(lensShaderName))
# Select the center camera's domeAFL_FOV_Stereo node
#cmds.select(lensShaderName, replace=True)
leftLensShader = cmds.listConnections(rig_left_shape_name[0]+'.miLensShader')
rightLensShader = cmds.listConnections(rig_right_shape_name[0]+'.miLensShader')
centerLensShader = cmds.listConnections(rig_center_shape_name[0]+'.miLensShader')
# Select the camera's LatLong_Stereo nodes in the attribute editor to add the Extra Attrs
#mel.eval ('showEditorExact("' + centerLensShader[0] + '")')
mel.eval('showEditorExact("' + leftLensShader[0] + '")')
mel.eval('showEditorExact("' + rightLensShader[0] + '")')
# Finish off by reselecting the center lens shader
mel.eval('showEditorExact("' + centerLensShader[0] + '")')
#---------------------------------------------------------------------------
# Enable Real-time 3D in the OpenGL viewport
# using a PreRender and PostRender MEL script
#---------------------------------------------------------------------------
#import maya.cmds as cmds
addPrePostRenderScript()
# Convert the camera rig to to an Aim Camera Rig
# Uses the MEL based function cameraMakeNode from:
# C:\Program Files\Autodesk\Maya2016\scripts\others\cameraMakeNode.mel
#cameraEvalString = ("cameraMakeNode 2 \"\";")
cameraEvalString = ("cameraMakeNode 2 " + rig[0] + ";")
mel.eval(cameraEvalString)
return rig
"""
Domemaster3D LatLong Stereo Zenith Rig
--------------------------------
A python function to create a LatLong stereo rig in Maya.
The lens shaders have the Zenith Mode checkboxes enabled by default.
"""
def createLatLongStereoZenithRig():
import maya.cmds as cmds
import maya.mel as mel
# ---------------------------------------------------------------------
# Setup the default Maya / Mental Ray Settings
# ---------------------------------------------------------------------
cmds.loadPlugin("stereoCamera", qt=True)
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
# ---------------------------------------------------------------------
# Create the stereo rig
# ---------------------------------------------------------------------
#import maya.app.stereo.stereoCameraMenus as stereoCameraMenus
#stereoCameraMenus.buildCreateMenu()
#import maya.app.stereo.stereoCameraRig
#maya.app.stereo.stereoCameraRig.createStereoCameraRig()
from maya.app.stereo import stereoCameraRig
rig = stereoCameraRig.createStereoCameraRig('LatLongStereoCamera')
# Result: [u'LatLongCamera', u'LatLongCameraLeft', u'LatLongCameraRight']
# Get the stereo camera rig shape nodes for the center/right/left cameras
rig_center_shape_name = getObjectShapeNode(rig[0])
# Result: [u'stereoCameraCenterCamShape', u'stereoCameraFrustum'] #
rig_left_shape_name = getObjectShapeNode(rig[1])
# Result: [u'stereoCameraLeftShape'] #
rig_right_shape_name = getObjectShapeNode(rig[2])
# Result: [u'stereoCameraRightShape'] #
# Orient the camera rig upright
cmds.setAttr(rig[0]+'.rotateX', 90)
cmds.setAttr(rig[0]+'.rotateY', 0)
cmds.setAttr(rig[0]+'.rotateZ', 0)
# Changes the render settings to set the stereo camera to be a renderable camera
cmds.setAttr(rig_left_shape_name[0]+'.renderable', 1) #stereoCameraLeftShape
cmds.setAttr(rig_right_shape_name[0]+'.renderable', 1) #stereoCameraRightShape
cmds.setAttr('topShape.renderable', 0)
cmds.setAttr('sideShape.renderable', 0)
cmds.setAttr('frontShape.renderable', 0)
cmds.setAttr('perspShape.renderable', 0)
# Set up the default mental ray AA sampling quality
setDomeSamplingQuality()
#import maya.cmds as cmds
#rig_center_shape_name = getObjectShapeNode(rig[0])
#lensShaderName = cmds.listConnections( rig_center_shape_name[0]+'.miLensShader')
# Debugging test line
#lensShaderName = "center_LatLong_Stereo";
#print ("Lens shader name: " + str(lensShaderName))
# Select the center camera's LatLong_Stereo node
#cmds.select(lensShaderName, replace=True)
leftLensShader = cmds.listConnections(rig_left_shape_name[0]+'.miLensShader')
rightLensShader = cmds.listConnections(rig_right_shape_name[0]+'.miLensShader')
centerLensShader = cmds.listConnections(rig_center_shape_name[0]+'.miLensShader')
# Select the camera's LatLong_Stereo nodes in the attribute editor to add the Extra Attrs
#mel.eval('showEditorExact("' + centerLensShader[0] + '")')
mel.eval('showEditorExact("' + leftLensShader[0] + '")')
mel.eval('showEditorExact("' + rightLensShader[0] + '")')
# Finish off by reselecting the center lens shader
mel.eval('showEditorExact("' + centerLensShader[0] + '")')
# Enable the Zenith Mode Checkbox
cmds.setAttr(centerLensShader[0]+'.Zenith_Mode', 1)
#---------------------------------------------------------------------------
# Enable Real-time 3D in the OpenGL viewport
# using a PreRender and PostRender MEL script
#---------------------------------------------------------------------------
#import maya.cmds as cmds
addPrePostRenderScript()
return rig
"""
Domemaster3D LatLongGrid test background
--------------------------------------
A python function to create a spherical yellow test grid in Maya that is rotated 90 degrees on the RotateX.
"""
def createLatLongGrid():
import maya.cmds as cmds
import maya.mel as mel
# Create a spherical yellow test grid in Maya.
createDomeGrid()
# Align the grid on the horizontal axis
#cmds.setAttr('domeGridSurface.rotateX', 90)
# Set the grid to a full 360 degree FOV sphere
cmds.setAttr('domeGrid.fieldOfView', 360)
# Change the grid from 12 spans (fulldome) to 24 spans (sphere) to cover the full 360 degree FOV with more uniform square patches.
cmds.setAttr('domeGrid.Dome_Spans', 24)
"""
Domemaster3D DomeGrid test background
--------------------------------------
A python function to create a hemispherical yellow test grid in Maya.
"""
# Suggested Maya Scene Grid Settings:
# Length and width: 360 units
# Grid lines every: 180 units
# Subdivisions: 2
def createDomeGrid():
import maya.cmds as cmds
import maya.mel as mel
#---------------------------------------------------------------------------
# Variables
#---------------------------------------------------------------------------
# Reference Grid Meshes
#domeGridSurface = 'domeGridSurface'
domeGridSurface = 'domeGridSurface'
domeGridlineSurface = 'domeGridlineSurface'
# Set the diameter of the dome shape
startingDomeDiameter = 360
#---------------------------------------------------------------------------
# Remove any existing domeGrid elements
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# Remove old geometry and paint effects nodes
#---------------------------------------------------------------------------
if cmds.objExists('domeGrid'):
print('Removing existing Domemaster3D object: domeGrid')
cmds.select('domeGrid', replace=True)
cmds.delete()
if cmds.objExists('MeshGroup'):
print('Removing existing Domemaster3D object: MeshGroup')
cmds.select('MeshGroup', replace=True)
cmds.delete()
if cmds.objExists(domeGridSurface):
print('Removing existing Domemaster3D object: ' + domeGridSurface)
cmds.select(domeGridSurface, replace=True)
cmds.delete()
if cmds.objExists('domeGridToon'):
print('Removing existing Domemaster3D object: domeGridToon')
cmds.select('domeGridToon', replace=True)
cmds.delete()
if cmds.objExists('domeGrid_displayModeExpr'):
print('Removing existing Domemaster3D object: domeGrid_displayModeExpr')
cmds.select('domeGrid_displayModeExpr', replace=True)
cmds.delete()
#--------------------------------------------------------------------------
# Remove old dome Grid surface materials
#---------------------------------------------------------------------------
if cmds.objExists('domeGridLinesSurfaceShader'):
print('Removing existing Domemaster3D object: domeGridLinesSurfaceShader')
cmds.select('domeGridLinesSurfaceShader', replace=True)
cmds.delete()
if cmds.objExists('domeGridLinesSurfaceShaderSG'):
print('Removing existing Domemaster3D object: domeGridLinesSurfaceShaderSG')
cmds.select('domeGridLinesSurfaceShaderSG', replace=True)
cmds.delete()
if cmds.objExists('domeGridSurfaceShaderSG'):
print('Removing existing Domemaster3D object: domeGridSurfaceShaderSG')
cmds.select('domeGridSurfaceShaderSG', replace=True)
cmds.delete()
if cmds.objExists('domeGridSurfaceShader'):
print('Removing existing Domemaster3D object: domeGridSurfaceShader')
cmds.select('domeGridSurfaceShader', replace=True)
cmds.delete()
#--------------------------------------------------------------------------
# Protect any existing surface shaders from the paint effects node
#---------------------------------------------------------------------------
if cmds.objExists('surfaceShader1SG'):
print('Renaming existing object: surfaceShader1SG')
cmds.rename('surfaceShader1SG', 'aSurfaceShader1SG')
if cmds.objExists('surfaceShader1'):
print('Renaming existing object: surfaceShader1')
cmds.rename('surfaceShader1', 'aSurfaceShader1')
#--------------------------------------------------------------------------
# Make the dome mesh
#--------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Create a hybrid NURBS/Polygon Paint effects Toon Surface
#-----------------------------------------------------------------------------
startingCurveRadius = 1.0
#startingToonThickness = 0.1
# Create the base curve with a 90 degree arc
domeRadiusCurveName = cmds.circle(name='domeGridSurfaceCurve', c=(0, 0, 0), nr=(0, 0, 1), sw=90, r=startingCurveRadius, d=3, ut=0, tol=0.01, s=10, ch=1)
# Get the curve's shape node name
domeCurveShape = getObjectShapeNode(domeRadiusCurveName[0])
# Setup the NURBS to Poly conversion prefs
#nurbsToPolygonsPref -q -f;
cmds.nurbsToPolygonsPref(format=3, uType=3, uNumber=1, vType=3, vNumber=1)
"""
# MEL Code to debug NURBS to polygon conversion:
int $f = `nurbsToPolygonsPref -q -f`;
int $ut = `nurbsToPolygonsPref -q -ut`;
int $un = `nurbsToPolygonsPref -q -un`;
int $vt = `nurbsToPolygonsPref -q -vt`;
int $vn = `nurbsToPolygonsPref -q -vn`;
print ($f + " " + $ut + " " + $un+ " " + $vt+ " " + $vn);
"""
# Revolve the base 90 degree arc curve into a NURBS dome shape
domeRadiusSurfaceName = cmds.revolve(domeCurveShape, name='domeGridSurface', ch=1, po=0, rn=0, ssw=0, esw=360, ut=0, tol=0.01, degree=3, s=40, ulp=1, ax=(0, 1, 0), polygon=1)
domeSurfaceShape = getObjectShapeNode(domeRadiusSurfaceName[0])
print "\nDome Preview elements:"
print domeRadiusSurfaceName
print "Dome Preview shape node:"
print domeSurfaceShape
print "\n"
# Find out the preview curve's makeNurbCircle node name
makeCurveShapeName = domeCurveShape
makeCurveObject = cmds.listConnections(makeCurveShapeName[0]+'.create', type='makeNurbCircle')
makeCurveNodeName = makeCurveObject[0]
print("The NURBS circle creation node is: ")
print(makeCurveNodeName)
#-----------------------------------------------------------------------------
# Make the NURBS Curve able to be moved without effecting the revolves
#-----------------------------------------------------------------------------
# Find out the name of the "makeNurbCircle" node that is used to create the domeGridPreviewCurve shape
makeRevolveObjects= cmds.listConnections(makeCurveShapeName[0]+'.worldSpace', type='revolve')
makeRevolveNodeName = makeRevolveObjects[0];
print("The circle creation node is: ")
print(makeRevolveNodeName)
# Reconnect the curve to the revolve node using local space
# This replaces the curve's previous .worldSpace connection that inhibited the
# ability to move the curve without effecting the revolve
cmds.connectAttr(makeCurveShapeName[0]+".local", makeRevolveNodeName+".inputCurve", f=True)
# Put the domeSurface "PreviewShape" inside the domeGrid group
# Have the revolved shape aligned relative to the domeGrid
#cmds.parent(domeRadiusSurfaceName[0], domeRadiusTransform)
# Parent the NURBS revolve curve to the domeGrid
#cmds.parent(domeRadiusCurveName[0], domeRadiusTransform)
# Create the base sphere with a 1 unit scale
#domeGridName = cmds.polySphere( name=domeGridSurface, radius = 1, subdivisionsX=36, subdivisionsY=20, axis=(0, 1, 0), createUVs=2, constructionHistory=True)
# Chop the polysphere into a hemispherical dome
#domeGridTransform = domeGridName[0]
#domeGridShape = getObjectShapeNode( domeGridName[0])
#cmds.select(domeGridTransform+'.f[0:323]', domeGridTransform+'.f[648:683]', replace=True)
#cmds.delete()
domeGridTransform = domeRadiusSurfaceName[0];
# Make the curve an intermediate shape
cmds.setAttr(domeCurveShape[0]+'.intermediateObject', 1)
# Tell the domeGridSurface to move with the domeGrid group node
cmds.setAttr(domeGridTransform+'.inheritsTransform', 1)
#---------------------------------------------------------------------------
# Create the PaintFX Toon stroke outlines
# --------------------------------------------------------------------------
cmds.select(domeGridTransform, replace=True)
# Assign the paint effects toon outlines
mel.eval('assignNewPfxToon;')
# Rename the toon shader
domeToonShader = 'domeGridToon'
domeToonShaderShape = 'domeGridToonShape'
cmds.rename('pfxToon1', domeToonShader)
# Define the new toon shader controls
cmds.setAttr(domeToonShaderShape+'.profileLines', 0)
cmds.setAttr(domeToonShaderShape+'.borderLines', 0)
cmds.setAttr(domeToonShaderShape+'.creaseLineWidth', 15)
cmds.setAttr(domeToonShaderShape+'.creaseColor', 1, 1, 0, type='double3')
cmds.setAttr(domeToonShaderShape+'.hardCreasesOnly', 0)
cmds.setAttr(domeToonShaderShape+'.creaseBreakAngle', 0)
cmds.setAttr(domeToonShaderShape+'.creaseAngleMin', 0)
cmds.setAttr(domeToonShaderShape+'.creaseAngleMax', 0)
cmds.setAttr(domeToonShaderShape+'.meshVertexColorMode', 1)
cmds.setAttr(domeToonShaderShape+'.meshQuadOutput', 1)
cmds.setAttr(domeToonShaderShape+'.meshHardEdges', 1)
# Create a polygon paint effects stroke output
cmds.select(domeToonShader, replace=True)
# The catchQuiet command is used to handle the event that mental ray might not be installed and a doPaintEffectsToPoly function based Maya code dependency is going to try and change the .miFinalGatherCast attribute...
mel.eval('catch(doPaintEffectsToPoly(1,1,1,1,100000));')
#mel.eval('catchQuiet(doPaintEffectsToPoly(1,1,1,1,100000));')
# Make a local space mesh connection to fix the grouped node double translation issue
#connectAttr -f domeGridToonShape.outMainMesh MainShape.inMesh;
# Result: Connected domeGridToonShape.outMainMesh to MainShape.inMesh. //
cmds.connectAttr(domeToonShaderShape+'.outMainMesh', 'MainShape.inMesh', force=True)
if cmds.objExists('MeshGroup'):
print('Unlinking the Toon shader\'s inheritsTransform attribute')
cmds.setAttr('MeshGroup.inheritsTransform', 0)
# --------------------------------------------------------------------------
# Adjust the grid lines shader
#---------------------------------------------------------------------------
#domeGridlineShadingGroup = cmds.sets( renderable=True, noSurfaceShader=True, empty=True, name='domeGridLinesSurfaceShaderSG')
domeGridlineMaterial = 'domeGridLinesSurfaceShader'
domeGridlineShadingGroup = 'domeGridLinesSurfaceShaderSG'
# Rename the default gridlines shader
cmds.rename('surfaceShader1', domeGridlineMaterial)
cmds.rename('surfaceShader1SG', domeGridlineShadingGroup)
# Standard Yellow Color
#cmds.setAttr('surfaceShader1.outColor', 1, 1, 0, type='double3')
# Super Bright Yellow Color for Physical Sky Compatibility
cmds.setAttr(domeGridlineMaterial+'.outColor', 15, 15, 0, type='double3')
#---------------------------------------------------------------------------
# Adjust the grid surface shader
#---------------------------------------------------------------------------
# Create the dome Grid surface shader + shading group
domeGridShadingGroup = cmds.sets(renderable=True, noSurfaceShader=True, empty=True, name='domeSurfaceShaderSG')
domeGridMaterial = cmds.shadingNode('surfaceShader', name='domeGridSurfaceShader', asShader=True)
# Make the surface shader black
cmds.setAttr(domeGridMaterial+'.outColor', 0, 0, 0, type='double3')
# Set the polygon surface to be transparent
cmds.setAttr(domeGridMaterial+'.outTransparency', 1, 1, 1, type='double3')
# Connect the surface shader to the shading group and the polygon surface
cmds.connectAttr(domeGridMaterial+'.outColor', domeGridShadingGroup+'.surfaceShader')
cmds.select(domeGridSurface)
cmds.hyperShade(assign=domeGridShadingGroup)
#---------------------------------------------------------------------------
# Group the domeGrid surfaces under a node called "domeGrid"
#---------------------------------------------------------------------------
#cmds.group('domeGridSurface', 'domeGridToon', 'MeshGroup', name='domeGrid')
cmds.group(domeRadiusCurveName[0], domeRadiusSurfaceName[0], 'domeGridToon', 'MeshGroup', name='domeGrid')
#
#---------------------------------------------------------------------------
# Add Extra Attrs to the domeGrid shape
#---------------------------------------------------------------------------
baseNodeName = 'domeGrid'
#---------------------------------------------------------------------------
# Add a Field of View control to the domeGrid's transform node
#---------------------------------------------------------------------------
attrName = 'fieldOfView'
# Check if the attribute exists on the domeGrid node
#if(mel.attributeExists(attrName, baseNodeName) == 0):
cmds.addAttr(baseNodeName, longName=attrName, attributeType="double", min=0.1, max=360, defaultValue=180 , keyable=True)
print('Adding custom Attributes ' + baseNodeName + '.' + attrName)
#---------------------------------------------------------------------------
# Add a Field of View expression
#---------------------------------------------------------------------------
# Connect the domeGrid dome radius control to the sphere's makeNurbCircle radius attribute
expressionBuilderString = makeCurveNodeName + ".sweep = " + (baseNodeName+'.'+attrName) + "/2;"
gridFOVRadiusExpressionName = 'domeGrid_FOVExpr'
print "DomeGrid FOV Extra Attribute Expressions:"
print expressionBuilderString
cmds.expression(name=gridFOVRadiusExpressionName, string=expressionBuilderString, object=baseNodeName, alwaysEvaluate=True, unitConversion=all)
# Connect the domeGrid dome radius control to the sphere's makeNurbCircle radius attribute:
#cmds.connectAttr((baseNodeName+'.'+attrName), makeCurveObject[0]+'.sweep', force=True)
#---------------------------------------------------------------------------
# Add a dome Radius control to the domeGrid's transform node
#---------------------------------------------------------------------------
attrName = 'Dome_Radius'
# Check if the attribute exists on the domeGrid node
#if(mel.attributeExists(attrName, baseNodeName) == 0):
cmds.addAttr(baseNodeName, longName=attrName, attributeType="double", min=0.1, max=1000000, hasSoftMaxValue=True, softMaxValue=360, defaultValue=startingDomeDiameter , keyable=True)
print('Adding custom Attributes ' + baseNodeName + '.' + attrName)
# Connect the domeGrid dome radius control to the sphere's makeNurbCircle radius attribute:
cmds.connectAttr((baseNodeName+'.'+attrName), makeCurveObject[0]+'.radius', force=True)
#---------------------------------------------------------------------------
# Add a Dome Height Spans control to the domeGrid's transform node
#---------------------------------------------------------------------------
attrName = 'Dome_Spans'
# Check if the attribute exists on the domeGrid node
#if(mel.attributeExists(attrName, baseNodeName) == 0):
# 180 degree dome default value = 12
# 360 degree dome default value = 24
cmds.addAttr(baseNodeName, longName=attrName, attributeType="double", min=4, max=120, hasSoftMaxValue=True, softMaxValue=40, defaultValue=12 , keyable=True)
print('Adding custom Attributes ' + baseNodeName + '.' + attrName)
# Connect the domeGrid dome radius control to the sphere's makeNurbCircle sections attribute:
cmds.connectAttr((baseNodeName+'.'+attrName), makeCurveObject[0]+'.sections', force=True)
#---------------------------------------------------------------------------
# Add a Dome Width Sections control to the domeGrid's transform node
#---------------------------------------------------------------------------
attrName = 'Dome_Sections'
# Check if the attribute exists on the domeGrid node
#if(mel.attributeExists(attrName, baseNodeName) == 0):
cmds.addAttr(baseNodeName, longName=attrName, attributeType="double", min=4, max=240, hasSoftMaxValue=True, softMaxValue=120, defaultValue=42 , keyable=True)
print('Adding custom Attributes ' + baseNodeName + '.' + attrName)
# Connect the domeGrid dome radius control to the sphere's revolve sections attribute:
cmds.connectAttr((baseNodeName+'.'+attrName), makeRevolveNodeName+'.sections', force=True)
#---------------------------------------------------------------------------
# Add a Display Mode control to the domeGrid's transform node
#---------------------------------------------------------------------------
attrName = 'displayMode'
#if(mel.attributeExists(attrName, baseNodeName) == 0):
cmds.addAttr(baseNodeName, longName=attrName, attributeType="enum", en="Off:Wireframe:Shaded:Wireframe on Shaded", defaultValue=2, keyable=True)
print('Adding custom Attributes ' + baseNodeName + '.' + attrName)
#---------------------------------------------------------------------------
# Add a Double Sided Rendering control to the domeGrid's transform node
#---------------------------------------------------------------------------
attrName = 'doubleSidedShading'
#if(mel.attributeExists(attrName, baseNodeName) == 0):
cmds.addAttr(baseNodeName, longName=attrName, attributeType="enum", en="Double Sided:Show Frontfaces:Show Backfaces", defaultValue=2, min=0, keyable=True)
print('Adding custom Attributes ' + baseNodeName + '.' + attrName)
#---------------------------------------------------------------------------
# Add a Grid Line Thickness control to the domeGrid's transform node
#---------------------------------------------------------------------------
attrName = 'gridLineThickness'
# This is the default starting value for the grid line strokes
# Mental Ray optimized thin lines
#initialGridLineThickness = 0.05
# PlayblastVR compatible thicker lines
initialGridLineThickness = 0.200
# Check if the attribute exists on the domeGrid node
#if(mel.attributeExists(attrName, baseNodeName) == 0):
cmds.addAttr(baseNodeName, longName=attrName, attributeType="double", min=0.001, max=50, hasSoftMaxValue=True, softMaxValue=2, defaultValue=initialGridLineThickness, keyable=True)
print('Adding custom Attributes ' + baseNodeName + '.' + attrName)
# Connect the domeGrid Grid Line Thickness to the toon shader line width attribute:
cmds.connectAttr((baseNodeName+'.'+attrName), domeToonShaderShape+'.lineWidth', force=True)
#---------------------------------------------------------------------------
# Add a Grid Line Color control to the domeGrid's transform node - Default color 1,1,0 = Yellow
#---------------------------------------------------------------------------
attrName = 'gridLineColor'
attrRName = "gridLineColorR";
attrGName = "gridLineColorG";
attrBName = "gridLineColorB";
#if(mel.attributeExists(attrName, baseNodeName) == 0):
cmds.addAttr(baseNodeName, longName=attrName, attributeType="float3", usedAsColor=True, keyable=True)
cmds.addAttr(baseNodeName, parent=attrName, longName=attrRName, attributeType="float", keyable=True, defaultValue=15)
cmds.addAttr(baseNodeName, parent=attrName, longName=attrGName, attributeType="float", keyable=True, defaultValue=15)
cmds.addAttr(baseNodeName, parent=attrName, longName=attrBName, attributeType="float", keyable=True, defaultValue=0)
print('Adding custom Attributes ' + baseNodeName + '.' + attrName)
#Connect the Grid Line Color swatch to the surface shader
cmds.connectAttr((baseNodeName+'.'+attrName), domeGridlineMaterial+'.outColor', force=True)
#---------------------------------------------------------------------------
# Add a Grid Line Transparency control to the domeGrid's transform node - Default value 0.25
#---------------------------------------------------------------------------
attrName = 'gridLineTransparency'
cmds.addAttr(baseNodeName, longName=attrName, attributeType="double", keyable=True, defaultValue=0.0, min=0, max=1)
# Connect the Grid Line transparency swatch to the surface shader
cmds.connectAttr((baseNodeName+'.'+attrName), domeGridlineMaterial+'.outTransparencyR', force=True)
cmds.connectAttr((baseNodeName+'.'+attrName), domeGridlineMaterial+'.outTransparencyG', force=True)
cmds.connectAttr((baseNodeName+'.'+attrName), domeGridlineMaterial+'.outTransparencyB', force=True)
#---------------------------------------------------------------------------
# Add a Grid Surface Color control to the domeGrid's transform node - Default color 0,0,0 = Black
#---------------------------------------------------------------------------
attrName = 'gridSurfaceColor'
attrRName = "gridSurfaceColorR";
attrGName = "gridSurfaceColorG";
attrBName = "gridSurfaceColorB";
#if(mel.attributeExists(attrName, baseNodeName) == 0):
cmds.addAttr(baseNodeName, longName=attrName, attributeType="float3", usedAsColor=True, keyable=True)
cmds.addAttr(baseNodeName, parent=attrName, longName=attrRName, attributeType="float", keyable=True, defaultValue=0)
cmds.addAttr(baseNodeName, parent=attrName, longName=attrGName, attributeType="float", keyable=True, defaultValue=0)
cmds.addAttr(baseNodeName, parent=attrName, longName=attrBName, attributeType="float", keyable=True, defaultValue=0)
print('Adding custom Attributes ' + baseNodeName + '.' + attrName)
# Connect the Grid Surface Color swatch to the surface shader
cmds.connectAttr((baseNodeName+'.'+attrName), domeGridMaterial+'.outColor', force=True)
#---------------------------------------------------------------------------
# Add a Grid Surface Transparency control to the domeGrid's transform node - Default value 0.25
#---------------------------------------------------------------------------
attrName = 'gridSurfaceTransparency'
cmds.addAttr(baseNodeName, longName=attrName, attributeType="double", keyable=True, defaultValue=.5, min=0, max=1)
# Connect the Grid Surface transparency swatch to the surface shader
cmds.connectAttr((baseNodeName+'.'+attrName), domeGridMaterial+'.outTransparencyR', force=True)
cmds.connectAttr((baseNodeName+'.'+attrName), domeGridMaterial+'.outTransparencyG', force=True)
cmds.connectAttr((baseNodeName+'.'+attrName), domeGridMaterial+'.outTransparencyB', force=True)
#---------------------------------------------------------------------------
# Add a display mode expression to the domeGrid's transform node
#---------------------------------------------------------------------------
domeRadiusTransform = "domeGrid"
domeSurfaceShape = "domeGridSurface"
domeSurfaceShapeNode = getObjectShapeNode(domeSurfaceShape)
exprName = ""
previewAttrName = "displayMode"
#The expression name is domeGrid_displayModeExpr
exprName = domeRadiusTransform + "_" + previewAttrName + "Expr"
PreviewShapeExpr = ""
PreviewShapeExpr += "// Custom " + previewAttrName + " Preview Shape Expressions\n\n"
PreviewShapeExpr += "string $currentPanel;\n"
PreviewShapeExpr += "if ( " + domeRadiusTransform + "." + previewAttrName + " == 0){\n"
PreviewShapeExpr += " //Off Mode\n"
PreviewShapeExpr += " " + domeSurfaceShape + ".overrideDisplayType = 2;\n"
PreviewShapeExpr += " " + domeRadiusTransform + ".overrideEnabled = 1;\n"
PreviewShapeExpr += " " + domeRadiusTransform + ".overrideShading = 0;\n"
PreviewShapeExpr += " " + domeRadiusTransform + ".visibility = 0;\n"
PreviewShapeExpr += " MeshGroup.visibility = 0;\n"
PreviewShapeExpr += "} else if (" + domeRadiusTransform + "." + previewAttrName + " == 1){\n"
PreviewShapeExpr += " //Wireframe Mode\n"
PreviewShapeExpr += " " + domeRadiusTransform + ".overrideEnabled = 1;\n"
PreviewShapeExpr += " " + domeRadiusTransform + ".overrideShading = 0;\n"
PreviewShapeExpr += " " + domeRadiusTransform + ".visibility = 1;\n"
PreviewShapeExpr += " MeshGroup.visibility = 0;\n"
PreviewShapeExpr += "} else if (" + domeRadiusTransform + "." + previewAttrName + " == 2){\n"
PreviewShapeExpr += " //Shaded Mode\n"
PreviewShapeExpr += " $currentPanel = \"modelPanel4\";\n"
PreviewShapeExpr += " if ( `modelEditor -exists currentPanel`)\n"
PreviewShapeExpr += " modelEditor -edit -wireframeOnShaded 0 currentPanel;\n"
PreviewShapeExpr += " $currentPanel = \"StereoPanel\";\n"
PreviewShapeExpr += " if ( `modelEditor -exists currentPanel`)\n"
PreviewShapeExpr += " modelEditor -edit -wireframeOnShaded 0 currentPanel;\n"
PreviewShapeExpr += " " + domeSurfaceShape + ".overrideDisplayType = 2;\n"
PreviewShapeExpr += " " + domeRadiusTransform + ".overrideEnabled = 1;\n"
PreviewShapeExpr += " " + domeRadiusTransform + ".overrideShading = 1;\n"
PreviewShapeExpr += " " + domeRadiusTransform + ".visibility = 1;\n"
PreviewShapeExpr += " MeshGroup.visibility = 1;\n"
PreviewShapeExpr += "} else if (" + domeRadiusTransform + "." + previewAttrName + " == 3){\n"
PreviewShapeExpr += " //Wireframe on Shaded Mode\n"
PreviewShapeExpr += " $currentPanel = \"modelPanel4\";\n"
PreviewShapeExpr += " if ( `modelEditor -exists currentPanel`)\n"
PreviewShapeExpr += " modelEditor -edit -wireframeOnShaded 1 currentPanel;\n"
PreviewShapeExpr += " $currentPanel = \"StereoPanel\";\n"
PreviewShapeExpr += " if ( `modelEditor -exists currentPanel`)\n"
PreviewShapeExpr += " modelEditor -edit -wireframeOnShaded 1 currentPanel;\n"
PreviewShapeExpr += " " + domeSurfaceShape + ".overrideDisplayType = 2;\n"
PreviewShapeExpr += " " + domeRadiusTransform + ".overrideEnabled = 1;\n"
PreviewShapeExpr += " " + domeRadiusTransform + ".overrideShading = 1;\n"
PreviewShapeExpr += " " + domeRadiusTransform + ".visibility = 1;\n"
PreviewShapeExpr += " MeshGroup.visibility = 1;\n"
PreviewShapeExpr += "}\n"
PreviewShapeExpr += "\n"
PreviewShapeExpr += "\n"
#---------------------------------------------------------------------------
# Add a Double Sided Shading expression to the domeGrid's transform node
#---------------------------------------------------------------------------
previewAttrName = "doubleSidedShading";
PreviewShapeExpr += "// Custom Double Sided Shading Expressions\n\n"
PreviewShapeExpr += "if (" + previewAttrName + " == 0){\n"
PreviewShapeExpr += " print(\"Double Sided Shading Enabled\\n\");\n"
PreviewShapeExpr += " setAttr \"" + domeSurfaceShape + ".doubleSided\" 1; \n"
PreviewShapeExpr += " setAttr \"" + domeSurfaceShape + ".opposite\" 0; \n"
PreviewShapeExpr += "} else if (" + previewAttrName + " == 1){\n"
PreviewShapeExpr += " print(\"Backface Shading Enabled\\n\");\n"
PreviewShapeExpr += " setAttr \"" + domeSurfaceShape + ".doubleSided\" 0; \n"
PreviewShapeExpr += " setAttr \"" + domeSurfaceShape + ".opposite\" 0; \n"
PreviewShapeExpr += "} else if (" + previewAttrName + " == 2){\n"
PreviewShapeExpr += " print(\"Frontface Shading Enabled\\n\");\n"
PreviewShapeExpr += " setAttr \"" + domeSurfaceShape + ".doubleSided\" 0; \n"
PreviewShapeExpr += " setAttr \"" + domeSurfaceShape + ".opposite\" 1; \n"
PreviewShapeExpr += "}\n"
print "DomeGrid Extra Attribute Expressions:"
print PreviewShapeExpr
cmds.expression( name=exprName, string=PreviewShapeExpr, object='domeGrid', alwaysEvaluate=True, unitConversion=all)
# Force a first value into the double sided shading attribute
cmds.setAttr((domeRadiusTransform+".doubleSidedShading"), 0)
#---------------------------------------------------------------------------
# Select the domeGrid node in the Attribute Editor
#---------------------------------------------------------------------------
mel.eval('showEditorExact("' + domeRadiusTransform + '")')
"""
Domemaster3D createTestShapes
----------------------
A python function to create a test sphere and cube in Maya.
"""
def createTestShapes():
import maya.cmds as cmds
if cmds.objExists('domeTestLight'):
print('Removing existing Domemaster3D object: domeTestLight')
cmds.select('domeTestLight', replace=True)
cmds.delete()
if cmds.objExists('polyTestSphere'):
print('Removing existing Domemaster3D object: polyTestSphere')
cmds.select('polyTestSphere', replace=True)
cmds.delete()
if cmds.objExists('polyTestCube'):
print('Removing existing Domemaster3D object: polyTestCube')
cmds.select('polyTestCube', replace=True)
cmds.delete()
test_sphere_name = cmds.polySphere( name='polyTestSphere', radius=24, subdivisionsX=20, subdivisionsY=20, axis=(0, 1, 0), createUVs=2, constructionHistory=True)
cmds.setAttr(test_sphere_name[0]+'.translateX', 80)
cmds.setAttr(test_sphere_name[0]+'.translateY', 75)
# Smooth the render time polygon sphere shape
cmds.displaySmoothness( test_sphere_name, divisionsU=3, divisionsV=3, pointsWire=16, pointsShaded=4, polygonObject=3)
test_cube_name = cmds.polyCube( name='polyTestCube', width=40, height=40, depth=40, subdivisionsX=1, subdivisionsY=1, subdivisionsZ=1, axis=(0, 1, 0), createUVs=4, constructionHistory=True)
cmds.setAttr(test_cube_name[0]+'.translateX', 0)
cmds.setAttr(test_cube_name[0]+'.translateY', 75)
cmds.setAttr(test_cube_name[0]+'.translateZ', -80)
cmds.setAttr(test_cube_name[0]+'.rotateX', 88)
cmds.setAttr(test_cube_name[0]+'.rotateY', 0)
cmds.setAttr(test_cube_name[0]+'.rotateZ', 0)
dome_light_shape_name = cmds.directionalLight()
dome_light_name = getObjectParentNode(dome_light_shape_name)
dome_light_name = cmds.rename (dome_light_name, "domeTestLight")
cmds.setAttr((dome_light_name+'.translateX'), -32)
cmds.setAttr((dome_light_name+'.rotateX'), 38)
cmds.setAttr((dome_light_name+'.rotateY'), 47)
cmds.setAttr((dome_light_name+'.rotateZ'), -62)
"""
Domemaster3D createRobLookup
----------------------
A python function to create a mental ray screen space texture
and connect it to a robLookupBackground lens shader.
"""
def createRobLookup():
import maya.cmds as cmds
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
# ---------------------------------------------------------------------
# Setup the base folder path for the Domemaster3D control maps
# ---------------------------------------------------------------------
# Variables
separationMapFileTexture = getSourceImagesPath("separation_map.png")
print "[Loading Separation Map]: " + separationMapFileTexture
# Create a camera and get the shape name.
cameraName = cmds.camera(name='robLookupCamera')
cameraShape = cameraName[1]
# ---------------------------------------------------------------------
# Create the robLookupBackground node
# ---------------------------------------------------------------------
rob_lens_node = cmds.shadingNode('rob_lookup_background', n='rob_lookup_background', asUtility=True)
cmds.connectAttr(rob_lens_node+'.message', cameraShape+'.miLensShader')
# ---------------------------------------------------------------------
# Create the custom shading network connections
# ---------------------------------------------------------------------
mayaVersion = getMayaVersionDome()
if (mayaVersion <= 2015):
# Temporary: Do the dev testing for Maya 2017 using Maya 2016
#if (mayaVersion <= 2016):
# Maya 2010-2016.5 uses a stock mentalrayTexture approach
# Create the nodes
rob_map_tex_filter = cmds.shadingNode('mib_texture_filter_lookup', n='rob_map_mib_texture_filter_lookup1', asTexture=True)
rob_tex_vector = cmds.shadingNode('mib_texture_vector', n='rob_mib_texture_vector1', asUtility=True)
rob_tex_remap = cmds.shadingNode('mib_texture_remap', n='rob_mib_texture_remap1', asUtility=True)
rob_map_mr_tex = cmds.shadingNode('mentalrayTexture', n='rob_map_mentalrayTexture1', asTexture=True)
# Set the node to use mode (4) which is screen space
cmds.setAttr(rob_tex_vector+'.selspace', 4)
# Connect the nodes
cmds.connectAttr(rob_map_tex_filter+'.outValueR', rob_lens_node+'.tex')
cmds.connectAttr(rob_map_mr_tex+'.message', rob_map_tex_filter+'.tex')
cmds.connectAttr(rob_tex_vector+'.outValue', rob_tex_remap+'.input')
cmds.connectAttr(rob_tex_remap+'.outValue', rob_map_tex_filter+'.coord')
cmds.setAttr(rob_map_mr_tex+'.fileTextureName', separationMapFileTexture, type="string")
else:
# Maya 2017+ uses a maya file node based screen space texture approach
rob_maya_placement = cmds.shadingNode('place2dTexture', n='rob_place2dTexture', asUtility=True)
rob_tex_vector = cmds.shadingNode('mib_texture_vector', n='rob_mib_texture_vector1', asUtility=True)
rob_maya_tex = cmds.shadingNode('file', n='rob_FileTexture', asTexture=True)
# Set the texture vector node to use mode (4) which is screen space
cmds.setAttr(rob_tex_vector+'.selspace', 4)
# Connect the place2D texture to the Maya file texture
cmds.connectAttr(rob_maya_placement+'.coverage', rob_maya_tex+'.coverage', f=True)
cmds.connectAttr(rob_maya_placement+'.translateFrame', rob_maya_tex+'.translateFrame', f=True)
cmds.connectAttr(rob_maya_placement+'.rotateFrame', rob_maya_tex+'.rotateFrame', f=True)
cmds.connectAttr(rob_maya_placement+'.mirrorU', rob_maya_tex+'.mirrorU', f=True)
cmds.connectAttr(rob_maya_placement+'.mirrorV', rob_maya_tex+'.mirrorV', f=True)
cmds.connectAttr(rob_maya_placement+'.stagger', rob_maya_tex+'.stagger', f=True)
cmds.connectAttr(rob_maya_placement+'.wrapU', rob_maya_tex+'.wrapU', f=True)
cmds.connectAttr(rob_maya_placement+'.wrapV', rob_maya_tex+'.wrapV', f=True)
cmds.connectAttr(rob_maya_placement+'.repeatUV', rob_maya_tex+'.repeatUV', f=True)
cmds.connectAttr(rob_maya_placement+'.offset', rob_maya_tex+'.offset', f=True)
cmds.connectAttr(rob_maya_placement+'.rotateUV', rob_maya_tex+'.rotateUV', f=True)
cmds.connectAttr(rob_maya_placement+'.noiseUV', rob_maya_tex+'.noiseUV', f=True)
cmds.connectAttr(rob_maya_placement+'.vertexUvOne', rob_maya_tex+'.vertexUvOne', f=True)
cmds.connectAttr(rob_maya_placement+'.vertexUvTwo', rob_maya_tex+'.vertexUvTwo', f=True)
cmds.connectAttr(rob_maya_placement+'.vertexUvThree', rob_maya_tex+'.vertexUvThree', f=True)
cmds.connectAttr(rob_maya_placement+'.vertexCameraOne', rob_maya_tex+'.vertexCameraOne', f=True)
#cmds.connectAttr(rob_maya_placement+'.outUV', rob_maya_tex+'.uvCoord', f=True)
cmds.connectAttr(rob_maya_placement+'.outUvFilterSize', rob_maya_tex+'.uvFilterSize', f=True)
# Hook the mental ray texture vector node to the file node's UV coordinates inputs
cmds.connectAttr(rob_tex_vector+'.outValueX', rob_maya_tex+'.uCoord', f=True)
# Result: Connected mib_texture_vector1.outValue.outValueX to file1.uvCoord.uCoord. #
cmds.connectAttr(rob_tex_vector+'.outValueY', rob_maya_tex+'.vCoord', f=True)
# Result: Connected mib_texture_vector1.outValue.outValueY to file1.uvCoord.vCoord. #
# Connect the Maya file node to the rob_lookup_background node
cmds.connectAttr(rob_maya_tex+'.outColorR', rob_lens_node+'.tex', f=True)
# Result: Connected rob_FileTexture.outColor.outColorR to rob_lookup_background.tex. #
# Assign an initial texture map to the file node
cmds.setAttr(rob_maya_tex+'.fileTextureName', separationMapFileTexture, type="string")
"""
Domemaster3D createDomeRampTexture
----------------------
A python function to create a mental ray screen space ramp texture
and connect it to a robLookupBackground lens shader.
"""
def createDomeRampTexture():
import maya.cmds as cmds
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
# Create a camera and get the shape name.
cameraName = cmds.camera(name='robLookupCamera')
cameraShape = cameraName[1]
# ---------------------------------------------------------------------
# Create the robLookupBackground node
# ---------------------------------------------------------------------
rob_lens_node = cmds.shadingNode('rob_lookup_background', n='rob_lookup_background', asUtility=True)
cmds.connectAttr(rob_lens_node+'.message', cameraShape+'.miLensShader')
# ---------------------------------------------------------------------
# Create the custom shading network connections
# ---------------------------------------------------------------------
# Create the Ramp node
dome_ramp = cmds.shadingNode('ramp', n='domeRamp', asTexture=True)
cmds.setAttr(dome_ramp+'.colorEntryList', s=2)
cmds.setAttr(dome_ramp+'.colorEntryList[0].ep', 0.5)
cmds.setAttr(dome_ramp+'.colorEntryList[0].ec', 1, 1, 1, type="float3")
cmds.setAttr(dome_ramp+'.colorEntryList[2].ep', 0.44999998807907104)
cmds.setAttr(dome_ramp+'.colorEntryList[2].ec', 0, 0, 0, type="float3")
# Create the texture space conversion node
rob_tex_vector = cmds.shadingNode('mib_texture_vector', n='rob_mib_texture_vector1', asUtility=True)
# Set the node to use mode (4) which is screen space
cmds.setAttr(rob_tex_vector+'.selspace', 4)
# Connect the texture_vector node to the ramp node using the XY values for the UV coordinates.
cmds.connectAttr(dome_ramp+'.outColor.outColorR', rob_lens_node+'.tex')
cmds.connectAttr(rob_tex_vector+'.outValue.outValueX', dome_ramp+'.uvCoord.uCoord')
cmds.connectAttr(rob_tex_vector+'.outValue.outValueY', dome_ramp+'.uvCoord.vCoord')
"""
A python function to create a mental ray texture and connect it to an mental ray mentalrayIbl node.
The this function will offset the texture coordinates so a 180 degree fisheye image would sit in the center of the mental ray IBL system's 360 degree angular fisheye input.
The input isMasked variable allows you to choose if you want an alpha channel to be applied to the fulldome image to crop off the outside frame labelling.
"""
def createFulldomeIBL():
import maya.cmds as cmds
import maya.mel as mel
isMasked = 1
iblDialogString = 'Do you want to have a circular alpha mask applied to the imagery in your fulldome IBL shading network?\n'
iblDialogString += '\n'
iblDialogString += 'Note: This is useful for hiding comments written in the border zone of the domemaster frame and will stop them from showing up in the environment map background.'
iblDialogButtonSelected = cmds.confirmDialog( title='FulldomeIBL Creation', message=iblDialogString, button=['Yes','No'], defaultButton='No', cancelButton='No', dismissString='No', icon='question')
if(iblDialogButtonSelected == 'Yes'):
print 'Creating a circular masked FulldomeIBL Shading Network.\n'
# Masked Domemaster Frame Boundary input = 1
isMasked = 1
else:
print 'Creating a regular FulldomeIBL Shading Network.\n'
# UnMasked Domemaster Frame Boundary input = 0
isMasked = 0
# Check if we are running Maya 2015+ and then enable the emit light mode
mayaVersion = getMayaVersionDome()
# ---------------------------------------------------------------------
# Setup the base folder path for the Domemaster3D control maps
# ---------------------------------------------------------------------
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
# Remove old mental ray IBL Shape nodes
if cmds.objExists('mentalrayIbl*'):
cmds.delete('mentalrayIbl*')
print("Removing the old mentalrayIbl shape.")
#Add the mental ray miDefaultOptions settings to the scene before accessing MR indirect lighting features
mel.eval("miCreateDefaultNodes();")
# Show the render settings window
#mel.eval("unifiedRenderGlobalsWindow;")
# Variables
#iblMapFileTexture = ""
#iblMapFileTexture = getSourceImagesPath("fulldome_2K.jpg")
iblMapFileTexture = getSourceImagesPath("fulldomeAlignmentGrid_4k.png")
print "[Loading IBL Map]: " + iblMapFileTexture
# Create the alpha domemaster frame mask
if(isMasked):
domeMaskMapFileTexture = getSourceImagesPath("domemaster_mask.png")
print "[Loading Domemaster Circular Mask Map]: " + domeMaskMapFileTexture
# ---------------------------------------------------------------------
# Create the mentalrayIblShape node
# ---------------------------------------------------------------------
# miCreateIbl is found in:
# C:\Program Files\Autodesk\mentalrayForMaya2015\scripts\createMentalRayIndirectLightingTab.mel
# C:/Users/<User Account>/Documents/maya/2015-x64/prefs/scripts/createMentalRayIndirectLightingTab.mel
#import maya.mel as mel
mel.eval("source \"createMentalRayIndirectLightingTab.mel\";")
mel.eval("miCreateIbl();")
# Find the name of the new IBL shape
# Hardcoded IBL testing names
#iblTransformName = 'mentalrayIbl1'
#iblShapeName = 'mentalrayIblShape1'
# Search the scene for Mental ray IBL shapes
iblNodeList = cmds.ls( 'mentalrayIbl*')
#print iblNodeList
# Result: [u'mentalrayIbl1', u'mentalrayIblShape1']
iblTransformName = iblNodeList[0]
iblShapeName = iblNodeList[1]
# Select the mentalrayIbl1 node in the attribute editor
# This will add the extra attributes to the mentalrayIbl node
mel.eval('showEditorExact("' + iblShapeName + '")')
# ---------------------------------------------------------------------
# Create the custom shading network connections
# ---------------------------------------------------------------------
# Create the nodes
#dome_map_tex_filter = cmds.shadingNode('mib_texture_filter_lookup', n='dome_map_mib_texture_filter_lookup1', asTexture=True)
dome_map_tex_filter = cmds.shadingNode('mib_texture_lookup', n='dome_map_mib_texture_lookup1', asTexture=True)
dome_tex_vector = cmds.shadingNode('mib_texture_vector', n='dome_mib_texture_vector1', asUtility=True)
dome_tex_remap = cmds.shadingNode('mib_texture_remap', n='dome_mib_texture_remap1', asUtility=True)
dome_map_mr_tex = cmds.shadingNode('mentalrayTexture', n='dome_map_mentalrayTexture1', asTexture=True)
dome_remap_color = cmds.shadingNode('remapColor', n='dome_remapColor1', asTexture=True)
# Create the alpha domemaster frame mask
if(isMasked):
dome_mask_map_mr_tex = cmds.shadingNode('mentalrayTexture', n='dome_mask_mentalrayTexture1', asTexture=True)
dome_mask_tex_filter = cmds.shadingNode('mib_texture_lookup', n='dome_mask_mib_texture_lookup1', asTexture=True)
dome_mask_multiply = cmds.shadingNode('multiplyDivide', n='dome_mask_multiply', asUtility=True)
# Connect the nodes
# RGB Domemaster Texture Map
cmds.setAttr(dome_map_mr_tex+'.fileTextureName', iblMapFileTexture , type="string")
#cmds.setAttr(dome_map_mr_tex+'.fileTextureName', '' , type="string")
# Create the alpha domemaster frame mask
if(isMasked):
# Circular Domemaster Mask Texture Map
cmds.setAttr(dome_mask_map_mr_tex+'.fileTextureName', domeMaskMapFileTexture , type="string")
#cmds.setAttr(dome_mask_tex_filter+'.fileTextureName', '' , type="string")
# Set the IBL mapping to "angular"
cmds.setAttr(iblShapeName+'.mapping', 1)
# Set the IBL image type to "texture"
cmds.setAttr(iblShapeName+'.type', 1)
# Connect the rest of the MR texture shading network
# RGB Domemaster Map
cmds.connectAttr(dome_map_mr_tex+'.message', dome_map_tex_filter+'.tex')
cmds.connectAttr(dome_tex_vector+'.outValue', dome_tex_remap+'.input')
cmds.connectAttr(dome_tex_remap+'.outValue', dome_map_tex_filter+'.coord')
# Create the alpha domemaster frame mask
if(isMasked):
# Circular Domemaster Mask Map
cmds.connectAttr(dome_mask_map_mr_tex+'.message', dome_mask_tex_filter+'.tex')
cmds.connectAttr(dome_tex_remap+'.outValue', dome_mask_tex_filter+'.coord')
# Create a multiply divide node to comp the alpha mask texture over the fulldome image.
cmds.connectAttr(dome_mask_tex_filter+'.outValue', dome_mask_multiply+'.input1')
cmds.connectAttr(dome_map_tex_filter+'.outValue', dome_mask_multiply+'.input2')
# Create the alpha domemaster frame mask
# Apply a composited fulldome mask over the domemaster RGB image
cmds.connectAttr(dome_mask_multiply+'.output', dome_remap_color+'.color')
else:
# Skip the domemaster mask and just apply the raw RGB image
cmds.connectAttr(dome_map_tex_filter+'.outValue', dome_remap_color+'.color')
# Connect the mr material to the ibl texture input
# Connect the remapColor node between the mib_texture_lookup and mentalrayIblShape1 nodes
cmds.connectAttr(dome_remap_color+'.outColor', iblShapeName+'.color')
# or
# Skip the remapColor node and connect mib_texture_lookup.OutValue > mentalrayIblShape1.color
#cmds.connectAttr(dome_map_tex_filter+'.outValue', iblShapeName+'.color')
# Scale the texture to fit a 180 degree angular fisheye in the IBL nodes' 360 degree fisheye image space
cmds.setAttr(dome_tex_remap+'.minX', -0.5)
cmds.setAttr(dome_tex_remap+'.minY', -0.5)
cmds.setAttr(dome_tex_remap+'.minZ', 0)
cmds.setAttr(dome_tex_remap+'.maxX', 1.5)
cmds.setAttr(dome_tex_remap+'.maxY', 1.5)
cmds.setAttr(dome_tex_remap+'.maxZ', 1.0)
# Set the matrix to use a -1 mirror effect on the transform matrix
#cmds.setAttr(dome_tex_remap+'.transform',(-1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1),type='matrix')
# Work around a Maya 2010 Tupple matrix setAttr issue with the above command
melSetAttrMatrixString = 'setAttr "' + dome_tex_remap + '.transform" -type "matrix" -1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1;'
mel.eval(melSetAttrMatrixString)
#melSetAttrMatrixString = 'setAttr \\"' + dome_tex_remap + '.transform\\" -type \\"matrix\\" -1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1;'
#melRunString = 'mel.eval("' + melSetAttrMatrixString + '")'
#print("Mel string: " + melRunString)
#cmds.evalDeferred(melRunString)
"""
if (mayaVersion >= 2015):
# Run the ENV Light creator attr from:
#/Applications/Autodesk/mentalrayForMaya2015/scripts/AETemplates/AEmentalrayIblShapeTemplate.mel
mel.eval('miSetEnvironmentLightingQuality()')
# Set the IBL light emission quality to 0.5
envLightQuality = 0.5
cmds.floatSliderGrp('miEnvironmentLightingQualityCtrl', edit=True, value=envLightQuality)
# Enable Light Emission
cmds.setAttr(iblShapeName+'.enableEmitLight', 1)
"""
# Position the IBL dome shape
cmds.setAttr(iblTransformName+'.translateX', 0)
cmds.setAttr(iblTransformName+'.translateY', 0)
cmds.setAttr(iblTransformName+'.translateZ', 0)
cmds.setAttr(iblTransformName+'.rotateZ', 0)
# Right side up
#cmds.setAttr(iblTransformName+'.rotateX', 90)
#cmds.setAttr(iblTransformName+'.rotateY', 90)
# Regular Dome Up Orientation
cmds.setAttr(iblTransformName+'.rotateX', 90)
cmds.setAttr(iblTransformName+'.rotateY', 0)
# Flip the env upside down
#cmds.setAttr(iblTransformName+'.rotateX', -90)
#cmds.setAttr(iblTransformName+'.rotateY', 90)
# Mirror the fulldome camera view with the FlipX command
#cmds.setAttr(domeAFL_lens_node+'.Flip_Ray_X' ,1)
"""
# Scale the IBL preview shape to 50 units in size
cmds.setAttr(iblTransformName+'.scaleX', 50)
cmds.setAttr(iblTransformName+'.scaleY', 50)
cmds.setAttr(iblTransformName+'.scaleZ', 50)
"""
# Scale the IBL preview shape to 100 units in size
cmds.setAttr(iblTransformName+'.scaleX', 100)
cmds.setAttr(iblTransformName+'.scaleY', 100)
cmds.setAttr(iblTransformName+'.scaleZ', 100)
# Create Mental Ray Texture Extra Attributes
import domeMaterial as domeMaterial
reload(domeMaterial)
domeMaterial.createMentalrayTextureExtraAttrs(dome_map_mr_tex, iblMapFileTexture)
# Select the mentalray texture node in the attribute editor
melRunString = 'import maya.mel as mel \n'
melRunString += 'mel.eval(\"showEditorExact(\\"' + dome_map_mr_tex + '\\")\")'
#print("Deferred string: " + melRunString)
cmds.evalDeferred(melRunString)
# or
#mel.eval('showEditorExact("'+ dome_map_mr_tex + '")')
"""
A python function to create a mental ray texture and connect it to an mental ray mentalrayIbl node.
The this function will offset the texture coordinates so a hemirect/hemi-equirectangular (half height equirectangular image) would sit in at the top of the mental ray IBL system's 360x180 degree spherical input.
"""
def createHemirectIBL():
import maya.cmds as cmds
import maya.mel as mel
#Check if we are running Maya 2015+ and then enable the emit light mode
mayaVersion = getMayaVersionDome()
# ---------------------------------------------------------------------
# Setup the base folder path for the Domemaster3D control maps
# ---------------------------------------------------------------------
# Make sure the mental ray plugin was loaded
forceMentalRayLoad()
# Remove old mental ray IBL Shape nodes
if cmds.objExists('mentalrayIbl*'):
cmds.delete('mentalrayIbl*')
print("Removing the old mentalrayIbl shape.")
# Add the mental ray miDefaultOptions settings to the scene before accessing MR indirect lighting features
mel.eval("miCreateDefaultNodes();")
# Show the render settings window
#mel.eval("unifiedRenderGlobalsWindow;")
# Variables
#iblMapFileTexture = ""
iblMapFileTexture = getSourceImagesPath("hemirectAlignmentGrid_4x2k.png")
print "[Loading IBL Map]: " + iblMapFileTexture
# ---------------------------------------------------------------------
# Create the mentalrayIblShape node
# ---------------------------------------------------------------------
# miCreateIbl is found in:
# C:\Program Files\Autodesk\mentalrayForMaya2015\scripts\createMentalRayIndirectLightingTab.mel
# C:/Users/<User Account>/Documents/maya/2015-x64/prefs/scripts/createMentalRayIndirectLightingTab.mel
#import maya.mel as mel
mel.eval("source \"createMentalRayIndirectLightingTab.mel\";")
mel.eval("miCreateIbl();")
# Find the name of the new IBL shape
# Hardcoded IBL testing names
#iblTransformName = 'mentalrayIbl1'
#iblShapeName = 'mentalrayIblShape1'
# Search the scene for Mental ray IBL shapes
iblNodeList = cmds.ls( 'mentalrayIbl*')
#print iblNodeList
# Result: [u'mentalrayIbl1', u'mentalrayIblShape1']
iblTransformName = iblNodeList[0]
iblShapeName = iblNodeList[1]
# Select the mentalrayIbl1 node in the attribute editor
# This will add the extra attributes to the mentalrayIbl node
mel.eval('showEditorExact("' + iblShapeName + '")')
# ---------------------------------------------------------------------
# Create the custom shading network connections
# ---------------------------------------------------------------------
# Create the nodes
#dome_map_tex_filter = cmds.shadingNode('mib_texture_filter_lookup', n='dome_map_mib_texture_filter_lookup1', asTexture=True)
dome_map_tex_filter = cmds.shadingNode('mib_texture_lookup', n='hemi_map_mib_texture_lookup1', asTexture=True)
dome_tex_vector = cmds.shadingNode('mib_texture_vector', n='hemi_mib_texture_vector1', asUtility=True)
dome_tex_remap = cmds.shadingNode('mib_texture_remap', n='hemi_mib_texture_remap1', asUtility=True)
dome_map_mr_tex = cmds.shadingNode('mentalrayTexture', n='hemi_map_mentalrayTexture1', asTexture=True)
dome_remap_color = cmds.shadingNode('remapColor', n='hemi_remapColor1', asTexture=True)
# Connect the nodes
cmds.setAttr(dome_map_mr_tex+'.fileTextureName', iblMapFileTexture , type="string")
#cmds.setAttr(dome_map_mr_tex+'.fileTextureName', '' , type="string")
# Set the IBL mapping to "spherical"
cmds.setAttr(iblShapeName+'.mapping', 0)
# Set the IBL image type to "texture"
cmds.setAttr(iblShapeName+'.type', 1)
# Connect the image to the IBL
# Connect the mr material to the IBL texture input
# Skip the remapColor node and connect mib_texture_lookup.OutValue > mentalrayIblShape1.color
#cmds.connectAttr(dome_map_tex_filter+'.outValue', iblShapeName+'.color')
# Connect the remapColor node between the mib_texture_lookup and mentalrayIblShape1 nodes
cmds.connectAttr(dome_map_tex_filter+'.outValue', dome_remap_color+'.color')
cmds.connectAttr(dome_remap_color+'.outColor', iblShapeName+'.color')
# Connect the rest of the MR texture shading network
cmds.connectAttr(dome_map_mr_tex+'.message', dome_map_tex_filter+'.tex')
cmds.connectAttr(dome_tex_vector+'.outValue', dome_tex_remap+'.input')
cmds.connectAttr(dome_tex_remap+'.outValue', dome_map_tex_filter+'.coord')
# Scale the texture to fit a half-height latlong image to the top of a spherical image space in the IBL nodes' 360x180 degree spherical image space
cmds.setAttr(dome_tex_remap+'.minX', 0)
cmds.setAttr(dome_tex_remap+'.minY', 0)
cmds.setAttr(dome_tex_remap+'.minZ', 0)
cmds.setAttr(dome_tex_remap+'.maxX', 1)
cmds.setAttr(dome_tex_remap+'.maxY', 2)
cmds.setAttr(dome_tex_remap+'.maxZ', 1)
cmds.setAttr(dome_tex_remap+'.offsetX', 0)
cmds.setAttr(dome_tex_remap+'.offsetY', -1)
cmds.setAttr(dome_tex_remap+'.offsetZ', 0)
# Set the matrix to use a -1 mirror effect on the transform matrix
#cmds.setAttr(dome_tex_remap+'.transform',(-1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1),type='matrix')
# Work around a Maya 2010 Tupple matrix setAttr issue with the above command
melSetAttrMatrixString = 'setAttr "' + dome_tex_remap + '.transform" -type "matrix" -1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1;'
mel.eval(melSetAttrMatrixString)
#melSetAttrMatrixString = 'setAttr \\"' + dome_tex_remap + '.transform\\" -type \\"matrix\\" -1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1;'
#melRunString = 'mel.eval("' + melSetAttrMatrixString + '")'
#print("Mel string: " + melRunString)
#cmds.evalDeferred(melRunString)
"""
if (mayaVersion >= 2015):
# Run the ENV Light creator attr from:
#/Applications/Autodesk/mentalrayForMaya2015/scripts/AETemplates/AEmentalrayIblShapeTemplate.mel
mel.eval('miSetEnvironmentLightingQuality()')
#Set the IBL light emission quality to 0.5
envLightQuality = 0.5
cmds.floatSliderGrp('miEnvironmentLightingQualityCtrl', edit=True, value=envLightQuality)
# Enable Light Emission
cmds.setAttr(iblShapeName+'.enableEmitLight', 1)
"""
# Position the IBL dome shape
cmds.setAttr(iblTransformName+'.translateX', 0)
cmds.setAttr(iblTransformName+'.translateY', 0)
cmds.setAttr(iblTransformName+'.translateZ', 0)
cmds.setAttr(iblTransformName+'.rotateZ', 0)
# Regular Dome Up Orientation
cmds.setAttr(iblTransformName+'.rotateX', 0)
cmds.setAttr(iblTransformName+'.rotateY', 0)
"""
# Scale the IBL preview shape to 50 units in size
cmds.setAttr(iblTransformName+'.scaleX', 50)
cmds.setAttr(iblTransformName+'.scaleY', 50)
cmds.setAttr(iblTransformName+'.scaleZ', 50)
"""
# Scale the IBL preview shape to 100 units in size
cmds.setAttr(iblTransformName+'.scaleX', 100)
cmds.setAttr(iblTransformName+'.scaleY', 100)
cmds.setAttr(iblTransformName+'.scaleZ', 100)
# Create Mental Ray Texture Extra Attributes
import domeMaterial as domeMaterial
reload(domeMaterial)
domeMaterial.createMentalrayTextureExtraAttrs(dome_map_mr_tex, iblMapFileTexture)
# Select the mentalray texture node in the attribute editor
#dome_map_mr_tex = "hemi_map_mentalrayTexture1"
#mel.eval('showEditorExact("'+ dome_map_mr_tex + '")')
melRunString = 'import maya.mel as mel \n'
melRunString += 'mel.eval(\"showEditorExact(\\"' + dome_map_mr_tex + '\\")\")'
#print("Deferred string: " + melRunString)
cmds.evalDeferred(melRunString)
"""
A python function to make sure mental ray is active
and the MR shading nodes are read to be used.
"""
def forceMentalRayLoad():
import maya.cmds as cmds
import maya.mel as mel
# Make sure the mental ray plugin was loaded
if not (cmds.pluginInfo("Mayatomr",q=True,loaded=True)):
cmds.loadPlugin("Mayatomr")
print("The Mental Ray plugin was loaded.")
#else:
# print("The Mental Ray plugin is already active.")
# Set the active renderer to mental ray to avoid Hypershade red node errors
#mel.eval("setCurrentRenderer mentalRay")
#or
melRunString = 'import maya.mel as mel \n'
melRunString += 'mel.eval(\"setCurrentRenderer mentalRay\")'
#print("Deferred string: " + melRunString)
cmds.evalDeferred(melRunString)
# Check what version of Maya is active
def getMayaVersionDome():
import maya.cmds as cmds
import maya.mel as mel
# Check if we are running Maya 2011 or higher
mayaVersion = mel.eval("getApplicationVersionAsFloat;")
# Debug Test Mode
# Test this GUI using the Maya 2010 - non-docked GUI mode
#mayaVersion = 2010;
# Write out the current Maya version number
print("Maya " + str(mayaVersion) + " detected.\n")
return mayaVersion
"""
A python function to get the current object's shape node
getObjectShapeNode("stereoCamera")
# Result: [u'stereoCameraCenterCamShape', u'stereoCameraFrustum'] #
"""
def getObjectShapeNode(object):
import maya.cmds as cmds
shape = cmds.listRelatives(object, children=True, shapes=True)
print('Shape: ')
print(shape)
return shape
"""
A python function to get the current object's parent node
getObjectParentNode("nurbsSphereShape1")
# Result: [u'nurbsSphere1'] #
"""
def getObjectParentNode(object):
import maya.cmds as cmds
parent = cmds.listRelatives(object, parent=True)
print('Parent: ')
print(parent)
return parent
"""
A python function to lock/unlock an ancestor plug connection
unlockAncestor('stereoCameraRight.rotate', True)
# Result: [u'stereoCameraRight.rotate'] #
"""
def unlockAncestor(connectionName, lockState):
import maya.cmds as cmds
if cmds.connectionInfo( connectionName, getLockedAncestor=True):
cmds.setAttr(connectionName, lock=lockState)
print('[Locked Ancestor State] ' + connectionName + ' ' + str(lockState))
return connectionName
|
992,068 | ea01654d46b56aa7aa8bbf3ad3ea7c0414a068a4 | class Solution:
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
if not nums2:
return
idx = m + n - 1 # start from bottom
i, j = m-1, n-1
while idx >= 0 and i >= 0 and j >= 0:
if nums1[i] > nums2[j]:
nums1[idx] = nums1[i]
i -= 1
idx -= 1
else:
nums1[idx] = nums2[j]
j -= 1
idx -= 1
if i < 0:
while j >= 0:
nums1[idx] = nums2[j]
j -= 1
idx -= 1
elif j < 0:
while i >= 0:
nums1[idx] = nums1[i]
i -= 1
idx -= 1
|
992,069 | 07cf935d7a6d71c48b22be9a86cf423d76c0b08f | import glob
import sys
import cdms2 as cdms
import numpy as np
import MV2 as MV
import difflib
import pickle
#import ENSO_years_piControl as en
global crunchy
import socket
if socket.gethostname().find("crunchy")>=0:
crunchy = True
else:
crunchy = False
import peakfinder as pf
import cdtime,cdutil,genutil
from eofs.cdms import Eof
from eofs.multivariate.cdms import MultivariateEof
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.patches as patches
### Set classic Netcdf (ver 3)
cdms.setNetcdfShuffleFlag(0)
cdms.setNetcdfDeflateFlag(0)
cdms.setNetcdfDeflateLevelFlag(0)
import scipy.stats as stats
from scipy.interpolate import interp1d
from Plotting import *
import CMIP5_tools as cmip5
class CLOUDS():
def __init__(self):
""" Read in cloud and precipitation obs data"""
self.datasets = ["ISCCP","ISCCP_raw","PATMOSX","PATMOSX_raw"]
f = cdms.open("OBS/clt_ISCCP_corrected_198301-200912.nc")
fp = cdms.open("OBS/clt_PATMOSX_corrected_198301-200912.nc")
f_old = cdms.open("OBS/clt_ISCCP_198307-200806.nc")
fp_old = cdms.open("OBS/clt_PATMOSX_198200-200912.nc")
fgpcp = cdms.open("OBS/GPCP.precip.mon.mean.nc")
fcmap = cdms.open("OBS/CMAP.std.precip.mon.mean.nc")
self.ISCCP = f("clt",time=('1984-1-1','2009-12-31'))
self.ISCCP = MV.masked_where(np.isnan(self.ISCCP),self.ISCCP)
cdutil.setTimeBoundsMonthly(self.ISCCP)
self.PATMOSX = fp("clt",time=('1984-1-1','2009-12-31'))
self.PATMOSX = MV.masked_where(np.isnan(self.PATMOSX),self.PATMOSX)
cdutil.setTimeBoundsMonthly(self.PATMOSX)
self.ISCCP_raw = f_old("clt",time=('1984-1-1','2008-6-31'))
self.ISCCP_raw = MV.masked_where(np.isnan(self.ISCCP_raw),self.ISCCP_raw)
cdutil.setTimeBoundsMonthly(self.ISCCP_raw)
self.PATMOSX_raw = fp_old("clt",time=('1982-1-1','2009-12-31'))
self.PATMOSX_raw = MV.masked_where(np.isnan(self.PATMOSX_raw),self.PATMOSX_raw)
cdutil.setTimeBoundsMonthly(self.PATMOSX_raw)
self.GPCP = cdutil.averager(fgpcp("precip",time=('1979-1-1','2014-12-31'),latitude=(-90,90)),axis='x')
cdutil.setTimeBoundsMonthly(self.GPCP)
self.CMAP = cdutil.averager(fcmap("precip",time=('1979-1-1','2014-12-31'),latitude=(-90,90)),axis='x')
self.CMAP.setAxis(0,self.GPCP.getTime())
cdutil.setTimeBoundsMonthly(self.CMAP)
def trough_trends(self,dataset,smooth = None):
start='1984-1-1'
stop = '2008-6-31'
seasons = ["DJF","MAM","JJA","SON"]
nhtrends = {}
shtrends = {}
data = getattr(self,dataset)(time=(start,stop))
tax_units = data.getTime().units.split()[0]
if tax_units == "days":
fac = 3650 #per day -> per decade
elif tax_units == "months":
fac = 120 #per month -> per decade
elif tax_units == "hours":
fac = 3650*24 #per month -> per decade
else:
print "units not recognized"
fac=1
for season in seasons:
seasondata = getattr(cdutil,season)(data,criteriaarg=(1,None))
if smooth is not None:
seasondata = pf.spatially_smooth(seasondata,sigma=smooth)
NH = NH_trough(seasondata)
SH = SH_trough(seasondata)
slope,error = genutil.statistics.linearregression(NH,axis=0,nointercept=1,error=1)
nhtrends[season]=[fac*float(slope),fac*float(error),np.ma.average(NH)]
slope,error = genutil.statistics.linearregression(SH,axis=0,nointercept=1,error=1)
shtrends[season]=[fac*float(slope),fac*float(error),np.ma.average(SH)]
return nhtrends,shtrends
def get_all_trough_trends(self,smooth=None):
NH = {}
SH = {}
datasets = ["ISCCP","ISCCP_raw","PATMOSX","PATMOSX_raw","GPCP","CMAP"]
for dataset in datasets:
nhtrends,shtrends = self.trough_trends(dataset,smooth=smooth)
NH[dataset] = nhtrends
SH[dataset] = shtrends
return NH,SH
def test_smooth(self):
self.NH_nosmooth,self.SH_nosmooth = self.get_all_trough_trends()
self.NH5,self.SH5 = self.get_all_trough_trends(smooth=5)
self.NH2,self.SH2 = self.get_all_trough_trends(smooth=2)
def trough_correlations(self,season,smooth = None):
NH = np.zeros((6,6))
SH = np.zeros((6,6))
start='1984-1-1'
stop = '2008-6-31'
datasets = ["ISCCP","ISCCP_raw","PATMOSX","PATMOSX_raw","GPCP","CMAP"]
for i in range(6):
dataset1 = getattr(self,datasets[i])(time=(start,stop))
if smooth is not None:
dataset1 = pf.spatially_smooth(dataset1,sigma=smooth)
j = 5
while j>i:
dataset2 = getattr(self,datasets[j])(time=(start,stop))
if smooth is not None:
dataset2 = pf.spatially_smooth(dataset2,sigma=smooth)
SH[i,j]=float(genutil.statistics.correlation(SH_trough(getattr(cdutil,season)(dataset1,criteriaarg=(1,None))),SH_trough(getattr(cdutil,season)(dataset2,criteriaarg=(1,None)))))
NH[i,j]=float(genutil.statistics.correlation(NH_trough(getattr(cdutil,season)(dataset1,criteriaarg=(1,None))),NH_trough(getattr(cdutil,season)(dataset2,criteriaarg=(1,None)))))
j -= 1
NH = MV.masked_where(NH==0,NH)
SH = MV.masked_where(SH==0,SH)
plt.subplot(121)
plt.pcolor(NH,vmin=-1,vmax=1)
plt.xticks(np.arange(6)+.5,datasets,rotation=90)
plt.yticks(np.arange(6)+.5,datasets)
for i in range(6):
for j in range(6):
if not NH.mask[i,j]:
plt.text(j+.5,i+.5,str(np.round(NH[i,j],2)))
plt.subplot(122)
plt.pcolor(SH,vmin=-1,vmax=1)
plt.xticks(np.arange(6)+.5,datasets,rotation=90)
plt.yticks(np.arange(6)+.5,datasets)
for i in range(6):
for j in range(6):
if not SH.mask[i,j]:
plt.text(j+.5,i+.5,str(np.round(SH[i,j],2)))
return SH,NH
def amplitudes(self,dataset):
""" Calculate the amplitude of the annual cycle for each year """
if ((dataset =="GPCP") or (dataset == "CMAP")):
start = '1979-1-1'
stop = '2014-12-31'
else:
start = '1984-1-1'
if dataset == "ISCCP_raw":
stop = '2007-12-31'
else:
stop = '2009-12-31'
X = getattr(self,dataset)(time=(start,stop))
R,P = sc.fast_annual_cycle(X)
return MV.masked_where(np.isnan(R),R)
def phases(self,dataset):
""" Calculate phases of the annual cycle for each year """
start = '1984-1-1'
if dataset == "ISCCP_raw":
stop = '2007-12-31'
else:
stop = '2009-12-31'
X = getattr(self,dataset)(time=(start,stop))
R,P = sc.fast_annual_cycle(X)
return MV.masked_where(np.isnan(P),P)
def get_colors(self,label):
d={}
d["ISCCP"]=cm.Blues(.9)
d["ISCCP_raw"]=cm.Blues(.5)
d["PATMOSX"]=cm.Reds(.9)
d["PATMOSX_raw"]=cm.Reds(.5)
d["GPCP"] = cm.PuOr(.1)
d["CMAP"]=cm.PuOr(.9)
return d[label]
def plot_seasonal_climatologies(self,season,**kwargs):
raw=kwargs.pop("raw",False)
precip = kwargs.pop("precip",False)
print precip
if not raw:
if precip:
datasets = ["GPCP","CMAP"]
else:
datasets = ["ISCCP","PATMOSX"]
else:
datasets = ["ISCCP_raw","PATMOSX_raw"]
for dataset in datasets:
clim=getattr(cdutil,season).climatology(getattr(self,dataset))[0]
plt.plot(clim.getLatitude()[:],clim.asma(),lw=3,color=self.get_colors(dataset),label=dataset)
def plot_seasonal_trends(self,season,**kwargs):
raw=kwargs.pop("raw",False)
if not raw:
datasets = ["ISCCP","PATMOSX"]
else:
datasets = ["ISCCP_raw","PATMOSX_raw"]
for dataset in datasets:
data = getattr(cdutil,season).departures(getattr(self,dataset))
trends = genutil.statistics.linearregression(data,axis=0,nointercept=1)*120
lat = data.getLatitude()[:]
plt.plot(lat,trends.asma(),"-",color=self.get_colors(dataset))#,mec=self.get_colors(dataset))
plt.axhline(0,ls=":",c="k")
def get_amplitude_extrema(self,dataset,smooth=None,raw=False,precip=False,normalize_by_annual = False):
functions = [SH_peak_stormtrack,SH_eqflank_stormtrack,SH_ITCZ_extent,thermal_equator,NH_ITCZ_extent,NH_eqflank_stormtrack,NH_peak_stormtrack]
d = {}
L = len(functions)
for i in range(L):
func = functions[i]
R = getattr(self,"amplitudes")(dataset)
if normalize_by_annual:
R = cmip5.cdms_clone(R/cdutil.YEAR(getattr(self,dataset)),R)
if smooth is not None:
R = pf.spatially_smooth(R,sigma=smooth)
test_i = func(R)
d[func.__name__]=test_i
return d
def amplitude_extrema(self,anom=False,smooth=None,raw=False,precip=False,normalize_by_annual = False):
functions = [SH_peak_stormtrack,SH_eqflank_stormtrack,SH_ITCZ_extent,thermal_equator,NH_ITCZ_extent,NH_eqflank_stormtrack,NH_peak_stormtrack]
if raw:
isccp="ISCCP_raw"
patmos = "PATMOSX_raw"
elif precip:
isccp = "GPCP"
patmos = "CMAP"
else:
isccp = "ISCCP"
patmos = "PATMOSX"
D = {}
D[isccp] = {}
D[patmos] = {}
L = len(functions)
for i in range(L):
plt.subplot(4,2,i+1)
func = functions[i]
R = getattr(self,"amplitudes")(isccp)
if normalize_by_annual:
R = cmip5.cdms_clone(R/cdutil.YEAR(getattr(self,isccp)),R)
if smooth is not None:
R = pf.spatially_smooth(R,sigma=smooth)
test_i = func(R)
D[isccp][func.__name__]=test_i
if anom:
test_i = test_i.anom()
time_plot(test_i,color=self.get_colors(isccp),marker="o",mec=self.get_colors(isccp))
R = getattr(self,"amplitudes")(patmos)
if normalize_by_annual:
R = cmip5.cdms_clone(R/cdutil.YEAR(getattr(self,patmos)),R)
if smooth is not None:
R = pf.spatially_smooth(R,sigma=smooth)
test_p = func(R)
D[patmos][func.__name__]=test_p
if anom:
test_p = test_p.anom()
time_plot(test_p,color=self.get_colors(patmos),marker="o",mec=self.get_colors(patmos))
plt.title(func.__name__)
# try:
# print "CORRELATION FOR "+func.__name__+ "IS :"+str(genutil.statistics.correlation(test_p,test_i))
#except:
# print "CORRELATION FOR "+func.__name__+ "doesn't exist because records are different lengths"
if anom:
plt.ylim(-2.5,2.5)
return D
def get_mma_for_fingerprint(self,z,piControl=False):
functions = [SH_peak_stormtrack,SH_eqflank_stormtrack,SH_ITCZ_extent,thermal_equator,NH_ITCZ_extent,NH_eqflank_stormtrack,NH_peak_stormtrack]
if piControl:
X = z.piControl_raw("amp")
else:
X = z.amp
nmod,nt,nlat = X.shape
nfunc = len(functions)
test = MV.zeros((nmod,nt,nfunc))+1.e20
for i in range(nfunc):
func = functions[i]
for modeli in range(20):
try:
test[modeli,:,i] = func(X[modeli])
except:
continue
test = MV.masked_where(test>1.e10,test)
test.setAxis(0,X.getAxis(0))
test.setAxis(1,X.getTime())
funcax = cdms.createAxis(np.arange(nfunc))
funcax.points = str([x.__name__ for x in functions])
test.setAxis(-1,funcax)
return test
def plot_trends_amplitude_extrema(self,raw=False,smooth=None,precip=False,normalize_by_annual=False):
labels = []
functions = [SH_peak_stormtrack,SH_eqflank_stormtrack,SH_ITCZ_extent,thermal_equator,NH_ITCZ_extent,NH_eqflank_stormtrack,NH_peak_stormtrack]
L = len(functions)
for i in range(L):
# plt.subplot(4,2,i+1)
func = functions[i]
if raw:
isccp="ISCCP_raw"
patmos = "PATMOSX_raw"
elif precip == True:
isccp = "GPCP"
patmos = "CMAP"
else:
isccp = "ISCCP"
patmos = "PATMOSX"
R = getattr(self,"amplitudes")(isccp)
if normalize_by_annual:
R = cmip5.cdms_clone(R/cdutil.YEAR(getattr(self,isccp)),R)
if smooth is not None:
R = pf.spatially_smooth(R,sigma=smooth)
test_i = func(R)
trend_i,error_i,Pt1,Pt2,Pf1,Pf2 = genutil.statistics.linearregression(test_i,axis=0,nointercept=1,probability=1,error=1)
plt.errorbar([i],trend_i*120,yerr=error_i*120,color=self.get_colors(isccp),marker="o",mec=self.get_colors(isccp),markersize=10,lw=3,label=isccp)
#units are months so turn into per decade
R = getattr(self,"amplitudes")(patmos)
if normalize_by_annual:
R = cmip5.cdms_clone(R/cdutil.YEAR(getattr(self,patmos)),R)
if smooth is not None:
R = pf.spatially_smooth(R,sigma=smooth)
test_p = func(R)
trend_p,error_p,Pt1,Pt2,Pf1,Pf2 = genutil.statistics.linearregression(test_p,axis=0,nointercept=1,probability=1,error=1)
plt.errorbar([i+.4],trend_p*120,yerr=error_p*120,color=self.get_colors(patmos),marker="o",mec=self.get_colors(patmos),markersize=10,lw=3,label=patmos)
#units are months so turn into per de
labels += [func.__name__]
plt.xticks(np.arange(L)+.2,labels,rotation=90)
def plot_amplitude_and_climatology(self,dataset):
start = '1984-1-1'
if dataset == "ISCCP_raw":
stop = '2007-12-31'
else:
stop = '2009-12-31'
X = getattr(self,dataset)(time=(start,stop))
lat_plot(MV.average(X,axis=0),color=self.get_colors(dataset),ls="--",lw=4,label = "Climatology ("+dataset+")")
R = MV.average(self.amplitudes(dataset),axis=0)
lat_plot(R,color=self.get_colors(dataset),ls="-",lw=4,label = "Annual cycle amplitude ("+dataset+")")
x,y = pf.find_all_peaks(R(latitude=(-40,40)))
plt.plot(x,y,"o",color=self.get_colors(dataset),mec=self.get_colors(dataset))
for thing in x:
plt.axvline(thing,color=self.get_colors(dataset),ls=":")
def plot_all_amplitudes_and_climatologies(self):
plt.subplot(211)
self.plot_amplitude_and_climatology("ISCCP")
self.plot_amplitude_and_climatology("ISCCP_raw")
plt.xticks(np.arange(-90,90+15,15))
latitude_label_ticks(plt.gca())
plt.xlim(-90,90)
plt.ylabel("Total Cloud Fraction")
plt.legend(loc=0,ncol=2,numpoints=1,fontsize=10)
plt.title("ISCCP")
plt.subplot(212)
self.plot_amplitude_and_climatology("PATMOSX")
self.plot_amplitude_and_climatology("PATMOSX_raw")
plt.xticks(np.arange(-90,90+15,15))
latitude_label_ticks(plt.gca())
plt.xlim(-90,90)
plt.ylabel("Total Cloud Fraction")
plt.legend(loc=0,ncol=2,numpoints=1,fontsize=10)
plt.title("PATMOS-x")
def plot_seasonal_variations(self,dataset,**kwargs):
if (dataset == "ISCCP") or (dataset == "PATMOSX"):
variable="Total Cloud Fraction"
else:
variable = "Precip (mm/day)"
linecolor = kwargs.pop("c","k")
d = self.get_amplitude_extrema(dataset,**kwargs)
keys = ['NH_peak_stormtrack','NH_ITCZ_extent','SH_ITCZ_extent','SH_peak_stormtrack']
for k in keys:
plt.axvline(np.ma.average(d[k]),c=linecolor)
data = getattr(self,dataset)
months = ["JAN","FEB","MAR","APR","MAY","JUN","JUL","AUG","SEP","OCT","NOV","DEC"]
colors = [cm.hsv(i/12.) for i in range(12)]
i=0
for season in months:
lat_plot(getattr(cdutil,season).climatology(data)[0],lw=3,color=colors[i])
i+=1
latitude_label_ticks(plt.gca())
plt.ylabel(variable)
plt.title(dataset)
a = plt.axes([.65, .6, .2, .2])
patches,texts=a.pie(np.ones(12),startangle=90,colors=colors,labels= months)
import seasonal_cycle_utils as sc
def get_amplitude(dataset):
start = '1984-1-1'
stop = '2009-12-31'
X = dataset(time=(start,stop))
R,P = sc.fast_annual_cycle(X)
return MV.masked_where(np.isnan(R),R)
def plot_amplitude(dataset,cmap=cm.RdYlBu):
R = get_amplitude(dataset)
nt,nlat = R.shape
for i in range(nt):
lat_plot(R[i],c=cmap(i/float(nt)))
def SH_ITCZ_extent(R,value=False):
nt,nlat = R.shape
lat_bounds = (-20,0)
test = []
climatology = MV.average(R,axis=0)
Xclim = climatology(latitude=lat_bounds)
clim_max,clim_min,clim_ymax,clim_ymins=pf.find_all_peaks(Xclim,return_maxmin=True)
if len(clim_max)>1:
idx = np.argmax(clim_ymax)
clim_max = clim_max[idx]
for i in range(nt):
X = R[i](latitude=lat_bounds)
maxes,mins,ymax,ymins=pf.find_all_peaks(X,return_maxmin=True)
if len(maxes) == 1:
if value:
test += [ymax[0]]
else:
test+=[ maxes[0]]
elif len(maxes) >1:
if value:
test+=[ymaxes[np.argmin(np.abs(maxes-clim_max))]]
else:
test+=[maxes[np.argmin(np.abs(maxes-clim_max))]]
else:
test+=[1.e20]
test = MV.masked_where(np.array(test)>1.e10,test)
test.setAxis(0,R.getTime())
return test
def NH_ITCZ_extent(R,value=False):
nt,nlat = R.shape
lat_bounds = (0,20)
test = []
climatology = MV.average(R,axis=0)
Xclim = climatology(latitude=lat_bounds)
clim_max,clim_min,clim_ymax,clim_ymins=pf.find_all_peaks(Xclim,return_maxmin=True)
if len(clim_max)>1:
idx = np.argmax(clim_ymax)
clim_max = clim_max[idx]
for i in range(nt):
X = R[i](latitude=lat_bounds)
maxes,mins,ymax,ymins=pf.find_all_peaks(X,return_maxmin=True)
if len(maxes) == 1:
if value:
test += [ymax[0]]
else:
test+=[ maxes[0]]
elif len(maxes) >1:
if value:
test+=[ymaxes[np.argmin(np.abs(maxes-clim_max))]]
else:
test+=[maxes[np.argmin(np.abs(maxes-clim_max))]]
else:
test+=[1.e20]
test = MV.masked_where(np.array(test)>1.e10,test)
test.setAxis(0,R.getTime())
return test
def NH_peak_stormtrack(R,value=False):
nt,nlat = R.shape
lat_bounds = (30,50)
test = []
climatology = MV.average(R,axis=0)
Xclim = climatology(latitude=lat_bounds)
clim_max,clim_min,clim_ymax,clim_ymins=pf.find_all_peaks(Xclim,return_maxmin=True)
if len(clim_max)>1:
idx = np.argmax(clim_ymax)
clim_max = clim_max[idx]
for i in range(nt):
X = R[i](latitude=lat_bounds)
maxes,mins,ymax,ymins=pf.find_all_peaks(X,return_maxmin=True)
if len(maxes) == 1:
if value:
test += [ymax[0]]
else:
test+=[ maxes[0]]
elif len(maxes) >1:
if value:
test+=[ymaxes[np.argmin(np.abs(maxes-clim_max))]]
else:
test+=[maxes[np.argmin(np.abs(maxes-clim_max))]]
else:
test+=[1.e20]
test = MV.masked_where(np.array(test)>1.e10,test)
test.setAxis(0,R.getTime())
return test
def SH_peak_stormtrack(R,value=False):
nt,nlat = R.shape
lat_bounds = (-50,-30)
test = []
climatology = MV.average(R,axis=0)
Xclim = climatology(latitude=lat_bounds)
clim_max,clim_min,clim_ymax,clim_ymins=pf.find_all_peaks(Xclim,return_maxmin=True)
if len(clim_max)>1:
idx = np.argmax(clim_ymax)
clim_max = clim_max[idx]
for i in range(nt):
X = R[i](latitude=lat_bounds)
maxes,mins,ymax,ymins=pf.find_all_peaks(X,return_maxmin=True)
if len(maxes) == 1:
if value:
test += [ymax[0]]
else:
test+=[ maxes[0]]
elif len(maxes) >1:
if value:
test+=[ymaxes[np.argmin(np.abs(maxes-clim_max))]]
else:
test+=[maxes[np.argmin(np.abs(maxes-clim_max))]]
else:
test+=[1.e20]
test = MV.masked_where(np.array(test)>1.e10,test)
test.setAxis(0,R.getTime())
return test
def thermal_equator(R,value=False):
nt,nlat = R.shape
lat_bounds = (-10,10)
test = []
climatology = MV.average(R,axis=0)
Xclim = climatology(latitude=lat_bounds)
clim_max,clim_min,clim_ymax,clim_ymins=pf.find_all_peaks(Xclim,return_maxmin=True)
if len(clim_min)>1:
idx = np.argmin(clim_ymins)
clim_min = clim_min[idx]
for i in range(nt):
X = R[i](latitude=lat_bounds)
maxes,mins,ymax,ymins=pf.find_all_peaks(X,return_maxmin=True)
if len(mins) == 1:
if value:
test += [ymins[0]]
else:
test+=[ mins[0]]
elif len(mins) >1:
if value:
test+=[ymins[np.argmin(np.abs(mins-clim_min))]]
else:
test+=[mins[np.argmin(np.abs(mins-clim_min))]]
else:
test+=[1.e20]
test = MV.masked_where(np.array(test)>1.e10,test)
test.setAxis(0,R.getTime())
return test
def NH_eqflank_stormtrack(R,value=False):
nt,nlat = R.shape
lat_bounds = (10,40)
test = []
climatology = MV.average(R,axis=0)
Xclim = climatology(latitude=lat_bounds)
clim_max,clim_min,clim_ymax,clim_ymins=pf.find_all_peaks(Xclim,return_maxmin=True)
if len(clim_min)>1:
idx = np.argmin(clim_ymins)
clim_min = clim_min[idx]
for i in range(nt):
X = R[i](latitude=lat_bounds)
maxes,mins,ymax,ymins=pf.find_all_peaks(X,return_maxmin=True)
if len(mins) == 1:
if value:
test += [ymins[0]]
else:
test+=[ mins[0]]
elif len(mins) >1:
if value:
test+=[ymins[np.argmin(np.abs(mins-clim_min))]]
else:
test+=[mins[np.argmin(np.abs(mins-clim_min))]]
else:
test+=[1.e20]
test = MV.masked_where(np.array(test)>1.e10,test)
test.setAxis(0,R.getTime())
return test
def SH_eqflank_stormtrack(R,value=False):
nt,nlat = R.shape
lat_bounds = (-40,-10)
test = []
climatology = MV.average(R,axis=0)
Xclim = climatology(latitude=lat_bounds)
clim_max,clim_min,clim_ymax,clim_ymins=pf.find_all_peaks(Xclim,return_maxmin=True)
if len(clim_min)>1:
idx = np.argmin(clim_ymins)
clim_min = clim_min[idx]
for i in range(nt):
X = R[i](latitude=lat_bounds)
maxes,mins,ymax,ymins=pf.find_all_peaks(X,return_maxmin=True)
if len(mins) == 1:
if value:
test += [ymins[0]]
else:
test+=[ mins[0]]
elif len(mins) >1:
if value:
test+=[ymins[np.argmin(np.abs(mins-clim_min))]]
else:
test+=[mins[np.argmin(np.abs(mins-clim_min))]]
else:
test+=[1.e20]
test = MV.masked_where(np.array(test)>1.e10,test)
test.setAxis(0,R.getTime())
return test
def get_locations(dataset):
#Climatological locations of max and min
R = get_amplitude(dataset)
Rsmooth=pf.spatially_smooth(R,sigma=2)
Rsmooth = MV.masked_where(np.abs(Rsmooth)>1.e10,Rsmooth)
nt,nlat = R.shape
PEAKS = MV.zeros((nt,3))+1.e20
TROUGHS = MV.zeros((nt,2))+1.e20
PEAKS[:,0]=SH_ITCZ_extent(R)
PEAKS[:,1]=NH_ITCZ_extent(R)
PEAKS[:,2]=NH_peak_stormtrack(R)
TROUGHS[:,0]=SH_eqflank_stormtrack(R)
TROUGHS[:,1]=NH_eqflank_stormtrack(R)
PEAKS = MV.masked_where(PEAKS>1.e10,PEAKS)
TROUGHS = MV.masked_where(TROUGHS>1.e10,TROUGHS)
PEAKS.setAxis(0,R.getTime())
TROUGHS.setAxis(0,R.getTime())
return PEAKS,TROUGHS
def SH_trough(R,value=False):
nt,nlat = R.shape
lat_bounds = (-40,-10)
test = []
climatology = MV.average(R,axis=0)
Xclim = climatology(latitude=lat_bounds)
clim_max,clim_min,clim_ymax,clim_ymins=pf.find_all_peaks(Xclim,return_maxmin=True)
if len(clim_min)>1:
clim_min = np.min(clim_min)
for i in range(nt):
X = R[i](latitude=lat_bounds)
if len(np.where(X.mask)[0]) == len(X):
test+=[1.e20]
continue
maxes,mins,ymax,ymins=pf.find_all_peaks(X,return_maxmin=True)
if len(mins) == 1:
if value:
test += [ymins[0]]
else:
test+=[ mins[0]]
elif len(mins) >1:
if value:
test += [ymins[np.argmin(np.abs(mins-clim_min))]]
else:
test+=[mins[np.argmin(np.abs(mins-clim_min))]]
else:
test+=[1.e20]
test = MV.masked_where(np.array(test)>1.e10,test)
test.setAxis(0,R.getTime())
return test
def central_max(R,value=False):
nt,nlat = R.shape
lat_bounds = (-20,20)
test = []
climatology = MV.average(R,axis=0)
Xclim = climatology(latitude=lat_bounds)
clim_max,clim_min,clim_ymax,clim_ymins=pf.find_all_peaks(Xclim,return_maxmin=True)
if len(clim_min)>1:
clim_min = np.min(clim_min)
for i in range(nt):
X = R[i](latitude=lat_bounds)
if len(np.where(X.mask)[0]) == len(X):
test+=[1.e20]
continue
maxes,mins,ymax,ymins=pf.find_all_peaks(X,return_maxmin=True)
if len(maxes) == 1:
if value:
test += [ymax[0]]
else:
test+=[ maxes[0]]
elif len(mins) >1:
if value:
test += [ymax[np.argmin(np.abs(maxes-clim_max))]]
else:
test+=[maxes[np.argmin(np.abs(maxes-clim_max))]]
else:
test+=[1.e20]
test = MV.masked_where(np.array(test)>1.e10,test)
test.setAxis(0,R.getTime())
return test
def NH_trough(R,value=False):
nt,nlat = R.shape
lat_bounds = (10,40)
test = []
climatology = MV.average(R,axis=0)
Xclim = climatology(latitude=lat_bounds)
clim_max,clim_min,clim_ymax,clim_ymins=pf.find_all_peaks(Xclim,return_maxmin=True)
if len(clim_min)>1:
clim_min = np.min(clim_min)
for i in range(nt):
X = R[i](latitude=lat_bounds)
if len(np.where(X.mask)[0]) == len(X):
test+=[1.e20]
continue
maxes,mins,ymax,ymins=pf.find_all_peaks(X,return_maxmin=True)
if len(mins) == 1:
if value:
test += [ymins[0]]
else:
test+=[ mins[0]]
elif len(mins) >1:
if value:
test += [ymins[np.argmin(np.abs(mins-clim_min))]]
else:
test+=[mins[np.argmin(np.abs(mins-clim_min))]]
else:
test+=[1.e20]
test = MV.masked_where(np.array(test)>1.e10,test)
test.setAxis(0,R.getTime())
return test
def plot_trough_trends(NH,SH):
datasets = ["ISCCP","ISCCP_raw","PATMOSX","PATMOSX_raw","GPCP","CMAP"]
seasons =["DJF","MAM","JJA","SON"]
ax1 = plt.subplot(111)
plt.xlim(-.5,4.5)
plt.ylim(-60,60)
seasoncolors = {}
seasoncolors["JJA"]=cm.Reds(.8)
seasoncolors["DJF"]=cm.Blues(.8)
seasoncolors["MAM"]=cm.Greens(.8)
seasoncolors["SON"]=cm.Oranges(.5)
diffs = np.linspace(0,.6,6)
for dataset in datasets:
spacing = diffs[datasets.index(dataset)]
for season in seasons:
i = seasons.index(season)
x = spacing+i
trend,err,mean = NH[dataset][season]
y = mean
dx = 0
dy = 10*trend
width=0.2
ax1.add_patch(patches.Arrow(x,y,dx,dy,width,color=seasoncolors[season]))
ax1.text(x,0,dataset,rotation="vertical",verticalalignment="center",fontsize=10)
trend,err,mean = SH[dataset][season]
y = mean
dx = 0
dy = 10*trend
width=0.2
ax1.add_patch(patches.Arrow(x,y,dx,dy,width,color=seasoncolors[season]))
def latplot_check(c,dataset,smooth=None):
counter =1
for season in ["DJF","MAM","JJA","SON"]:
plt.subplot(2,2,counter)
data=getattr(cdutil,season)(getattr(c,dataset),criteriaarg=(1,None))
if smooth is not None:
data = pf.spatially_smooth(data,sigma=smooth)
nt,nlat = data.shape
for i in range(nt):
lat_plot(data[i],c=cm.RdYlBu(i/float(nt)),lw=1)
plt.title(season)
counter +=1
sys.path.append("/Users/kmarvel/Google Drive/SEASONAL/")
from Final_code import ZonalData
import DA_tools as DA
def noise_histograms(control=None):
if control is None:
z = ZonalData(restrict_poles=(-60,60))
c = CLOUDS()
test = c.get_mma_for_fingerprint(z)
clim = MV.average(test(time=('1979-1-1','2014-12-31')),axis=1)
ocontrol = c.get_mma_for_fingerprint(z,piControl=True)
control_anoms = cmip5.cdms_clone(ocontrol.asma()-clim.asma()[:,np.newaxis,:],ocontrol)
control = DA.concatenate_this(control_anoms)
N = MV.zeros((91,7))
count = 0
for i in np.arange(10,101,1):
for j in range(7):
N[count,j] = np.ma.std(DA.get_slopes(control[:,j],i))
count +=1
return N
|
992,070 | cef731783cdc91dc3af6e8a7d0c7da6d91b59534 | #!/usr/bin/env python
# coding: utf-8
# In[7]:
# Basic solution - 2^n O
def fib(n):
if n == 1 or n == 2:
result = 1
else:
result = fib(n-1) + fib(n-2)
return result
# In[8]:
fib(5)
# In[9]:
fib(35)
# already taking a few seconds
# In[12]:
# A memoized solution - 2n+1 O
def fib_2(n, memo):
if memo[n] is not None:
return memo[n]
if n == 1 or n == 2:
result = 1
else:
result = fib_2(n-1, memo) + fib_2(n-2, memo)
memo[n] = result
return result
def fib_memo(n):
memo = [None] * (n + 1)
return fib_2(n, memo)
# In[14]:
fib_memo(35)
# In[15]:
fib_memo(1000)
# In[18]:
fib_memo(5000)
# start to give RecursionError at 5000
# In[4]:
# A bottom-up solution - n O
def fib_bottom_up(n):
if n == 1 or n == 2:
return 1
bottom_up = [None] * (n+1)
bottom_up[1] = 1
bottom_up[2] = 1
for i in range(3, n+1):
bottom_up[i] = bottom_up[i-1] + bottom_up[i-2]
return bottom_up[n]
# In[21]:
fib_bottom_up(5000)
# handled easily
|
992,071 | 9454b359f30e7fef6e21b4fa0e47f32e3f3a5f4d | # default feature sets
DEF_LIB_SETS = ['mfcc']
DEF_ECHO_SETS = []
# deault dataset cache capacity
DEF_CACHE_CAP = 6
# top genres of songs in small dataset
TOP_GENRES = ['Experimental', 'Electronic', 'Rock', 'Instrumental', 'Pop', 'Folk', 'Hip-Hop', 'International']
|
992,072 | 8a42190eb8d73838261aa53d8985db42e12e4093 | import sys
import time
import RPi.GPIO as GPIO
import math
# set mode to GPIO
GPIO.setmode(GPIO.BOARD)
DEFAULT_STEP_PINS = [11,13,15,19]
class MotorController():
def __init__(self,delaytime = 0.001,StepPins = DEFAULT_STEP_PINS):
self.time_delay = delaytime
self.step_pins = StepPins
for pin in self.step_pins :
print("Setting up pins..")
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin, False)
self.sequences = [[1,0,0,1],
[1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1]]
self.step_count = len(self.sequences)
self.step_dir = 1 # positive or negative
self.step_amount = 1 # one or two
self.counter = 0
self.position = 0
def resetPosition(self):
self.position = 0
def changeDirection(self,direction = None):
if direction == None:
if self.step_dir == 1:
self.step_dir = -1
else:
self.step_dir = 1
else:
self.step_dir = direction
def getNextSequence(self):
self.counter += self.step_dir*self.step_amount
if (self.counter >= self.step_count):
self.counter = self.counter%self.step_count
if (self.counter < 0):
self.counter += self.step_count
return self.sequences[self.counter]
def step(self,new_seq = None):
if new_seq == None:
new_seq = self.getNextSequence()
# print(new_seq)
for pin in range(0, 4):
xpin = self.step_pins [pin]#
if new_seq[pin]!=0:
GPIO.output(xpin, True)
else:
GPIO.output(xpin, False)
time.sleep(self.time_delay )
def turnOff(self):
self.step([0,0,0,0])
def turnDegrees(self,deg):
if deg < 0:
self.step_dir = -1
else:
self.step_dir = 1
fullrev = 1024*4/self.step_amount
numsteps = math.ceil(fullrev*deg/360)
for i in range(numsteps):
self.step()
self.turnOff()
def turnToPosition(self,pos):
diffpos = pos - self.position
if diffpos < 0:
self.step_dir = -1
diffpos = -diffpos
else:
self.step_dir = 1
fullrev = 1024*4/self.step_amount
numsteps = math.ceil(fullrev*diffpos)
for i in range(numsteps):
self.step()
self.turnOff()
self.position = pos
if __name__ == '__main__':
mc = MotorController(0.001)
mc.turnOff()
mc.step_amount = 1
mc.turnToPosition(0.2)
time.sleep(1)
mc.turnToPosition(0.1)
time.sleep(1)
mc.turnToPosition(0.3)
time.sleep(1)
mc.turnToPosition(0)
|
992,073 | 5b86d992421c3fbad8b93959b77f6f65b4afed80 | __author__ = 'lanx'
import regression
import numpy as np
import matplotlib.pyplot as plt
def loadDataSet(fileName):
numFeat = len(open(fileName).readline().split('\t'))-1
dataMat=[]; labelMat=[]
fr=open(fileName)
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
# print "curLine:" , curLine
for i in range(numFeat):
# print "curLine[%d]" % i, (curLine[i])
lineArr.append(float(curLine[i]))
# print "lineArr: ", lineArr
# print "curLine[-1]", curLine[2]
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat, labelMat
x, y = loadDataSet('ex0.txt')
ws = regression.standRegres(x, y)
ws2 = regression.gradRegres(x, y)
#ws3 = regression.gradRegressMatrix(x, y)
print ws
print ws2
#print ws3
xArr = np.asarray(x)
yArr = np.asarray(y)
yHat = xArr * ws
print np.corrcoef(yHat.T, yArr)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xArr[:,1], yArr)
xCopy = xArr.copy()
xCopy.sort(0)
yHat = xCopy * ws
ax.plot(xCopy[:,1],yHat)
plt.show()
|
992,074 | 1419ace98160dee4aa8956e363e9a2472c991739 | # Copyright (C) 2017 Daniel Watkins <daniel@daniel-watkins.co.uk>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from configparser import ConfigParser
from unittest import mock
from jenkins_job_linter import _filter_config
from jenkins_job_linter.linters import Linter, LintResult
NAMES = ('linter-{}'.format(num) for num in itertools.count())
def create_linter_mock(check_result=LintResult.PASS, check_msg=None,
default_config=None, **kwargs):
linter_mock = mock.create_autospec(Linter)
linter_mock.return_value.check.return_value = check_result, check_msg
linter_mock.default_config = default_config or {}
return linter_mock, kwargs
def create_mock_for_class(cls, **kwargs):
special_cases = {
Linter: create_linter_mock,
}
if cls in special_cases:
created_mock, kwargs = special_cases[cls](**kwargs)
else:
created_mock = mock.create_autospec(cls)
for key, value in kwargs.items():
setattr(created_mock, key, value)
return created_mock
def get_config():
return _filter_config(ConfigParser())
def mock_LINTERS(mocker, linter_mocks):
linters = dict(zip(NAMES, linter_mocks))
mocker.patch('jenkins_job_linter.LINTERS', linters)
mocker.patch('jenkins_job_linter.config.LINTERS', linters)
return linters
|
992,075 | 51eae6e001aa5fac5437402b75eb6bd416055653 | from __future__ import print_function
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import CleanData
import StudentConfidence
import OfficeHourRequests
import ConfidenceAndExtraResources
import ConfidenceAndSmithVideos
import StudentStudyHoursOverTime
import OfficeHourRequests
import AnythingElse
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('ClassSurveyCredentials.json', scope)
client = gspread.authorize(creds)
sheet = client.open('Class Survey 2020 (Responses)').sheet1
surveyResults = pd.DataFrame(sheet.get_all_records())
#Cleaning data to make it easier to read
CleanData.change_column_names(surveyResults)
################################################################################
#Getting charts for student confidence
classes = ["2A Integrated Math 3", "3-5AIntegrated Math 3", "6-8A Integrated Math 3", "9A Integrated Math 3", "10A Algebra 3/Trigonometry"]
#StudentConfidence.class_confidence_chart(surveyResults, classes)
################################################################################
#Aggregating data for students requesting office hours
#students_requesting_office_hours = surveyResults['set_meeting'] == 'Yes'
#print(surveyResults[students_requesting_office_hours]['name'] + " " + surveyResults[students_requesting_office_hours]['meeting_time'])
################################################################################
#Getting chart for comparing student confidence to watching extra videos and resources
#ConfidenceAndExtraResources.confidence_and_extra_resources_chart(surveyResults)
################################################################################
#Getting chart for comparing student confidence to watching videos by the teacher
#ConfidenceAndSmithVideos.confidence_and_smith_videos_chart(surveyResults)
################################################################################
#Getting chart for a specific student and their study hours over time
#StudentStudyHoursOverTime.student_study_hours_over_time(surveyResults, "Smith, Krista")
################################################################################
#Getting list of students requesting office hour meeting. The return is a list of tuples
#office_hour_requests = OfficeHourRequests.get_office_hour_requests(surveyResults)
################################################################################
#Getting short answers to anything else on the students' minds. Returns a list of tuples
#that contains the name of the student and their response.
#students_other_thoughts = AnythingElse.get_students_other_thoughts(surveyResults)
plt.show() |
992,076 | ee49494a0078a5de7b4f909970b2909416cd2b56 | import requests
from bs4 import BeautifulSoup
url = "https://www.ptt.cc/bbs/joke/index.html"
for i in range(10): #the page
r = requests.get(url)
soup = BeautifulSoup(r.text,"html.parser")
sel = soup.select("div.title a") #標題
u = soup.select("div.btn-group.btn-group-paging a") #a標籤
print ("本頁的URL為"+url)
url = "https://www.ptt.cc"+ u[1]["href"] #上一頁的網址
for s in sel: #印出網址跟標題
print(s["href"],s.text)
|
992,077 | 7a9fa1d3cad09d207f33686994a09466f07ff0f1 | import unittest
from os.path import join, dirname, abspath
import sys
root_directory = abspath(join(dirname(__file__), '..', '..', '..', 'src'))
sys.path += [root_directory]
class TestSortingMethods(unittest.TestCase):
data_folder = abspath(join(dirname(__file__), '..', '..', '..', 'output'))
file_to_open = join(data_folder, "top_cost_drug.txt")
answer_folder = join(dirname(dirname(abspath(__file__))), "test_2", "output")
answer_file_to_open = join(answer_folder, "top_cost_drug.txt")
def test_first_line(self):
with open(self.file_to_open) as f:
f.readline()
first_line = f.readline()
with open(self.answer_file_to_open) as a:
a.readline()
answer_first_line = a.readline()
self.assertEqual(first_line.strip(), answer_first_line.strip())
if __name__ == '__main__':
unittest.main()
|
992,078 | b6ac333c0a131069ba104d58f16be4e66ddf58be | import os
import gensim, logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class Corpus(object):
def __init__(self, filename):
self.filename = filename
def __iter__(self):
for line in open(self.filename, encoding = "utf8"):
if not line.startswith("<doc id="):
yield line.split()
wikiSentences = Corpus('C://Users/Easwaran/Desktop/wikipedia_sentences_tokenised.txt')
model = gensim.models.Word2Vec(wikiSentences, size = 500, window = 10, negative = 10, hs = 0, sample = 1e-5, iter = 3, min_count = 10) |
992,079 | f6f93b7015142f25bc3f8812c5f32bb00358312a |
class Parent(object):
def __init__(self):
pass
def deco(fun):
def decorated(self, x):
print("Calling decorated function...")
x = fun(self, x)
print("Exit...")
return x
return decorated
@deco
def f(self, x):
raise NotImplementedError("Bla")
class Child(Parent):
def f(self, x):
self.x = x
return x+1
if __name__ == "__main__":
c = Child()
p = Parent()
print(c.f(1))
print(p.f(1)) |
992,080 | ccc7662af76961d613d4391babacc103d2cbc19b | #! /usr/bin/python3
print("content-type: text/html")
print()
import cgi
import subprocess as sp
db = cgi.FieldStorage()
choice = db.getvalue("choice")
image = db.getvalue("image")
tag = db.getvalue("tag")
container = db.getvalue("container")
port = db.getvalue("port")
state = db.getvalue("state")
output=""
if choice =="1" :
grap = sp.getstatusoutput("sudo docker")
if grap[0]==0:
output="docker already installed"
elif grap[0]==1:
output1=sp.getoutput("sudo yum install docker-ce --nobest -y")
output2=sp.getoutput("sudo systemctl start docker")
output = "{} \n\n {}".format(output1,output2)
else:
output = "Something Went Wrong"
elif choice == "2":
output = sp.getoutput("sudo systemctl {} docker".format(state))
output = output + "\n\n Docker Service {}d".format(state)
elif choice == "3":
output = sp.getoutput("sudo systemctl status docker")
elif choice == "4":
output= sp.getoutput("sudo docker image ls")
elif choice == "5":
output = sp.getoutput("sudo docker ps -a")
elif choice == "6":
output = sp.getoutput("sudo docker ps")
elif choice == "7":
output = sp.getoutput("sudo docker pull {}:{}".format(image,tag))
elif choice == "8":
output = sp.getoutput("sudo docker run -dit --name {} {}:{}" .format(container,image,tag))
elif choice == "9":
output = sp.getoutput("sudo docker {} {}".format(state,container))
elif choice == "10":
output = sp.getoutput("sudo docker rmi {}:{}".format(image,tag))
elif choice == "11":
output = sp.getoutput("sudo docker rm {}".format(container))
elif choice == "12":
output = sp.getoutput("sudo systemctl {} docker".format(state))
output = "Docker Service {}".format(state)
else:
output = "Something went Wrong..."
print("""<style>
body{
background-color:90EE90;
text-align:center;
justify-content:center;
}
pre{
font-size: 20px;
color:DC143C;
font-weight: bold;
padding -top:0px
}
h1{
color : DarkGreen;
padding-bottom:0px;
}
</style>""")
print("""
<body>
<pre>
<h1 style = "">****************************</h1>
{}
</pre>
</body>
""".format(output))
|
992,081 | 4ba70139154009d6e137fd5babf1e62f52bfdd41 | import os
import glob
#os.chdir('/home/jacob/Desktop/S.AureusCOL_seq/')
#alignment_files = glob.glob('*.aln')
#print alignment_files
#rate_out_files = '/home/jacob/Desktop/RESULT/MODULE2'
#if not os.path.exists(rate_out_files):
#os.makedirs(rate_out_files)
#with open(os.path.join(rate_out_files,alignment_files)) as rateout:
#for aln in alignment_files[:25]:
#print aln
#os.system('rate4site -s aln -o /home/jacob/Desktop/')
def rate4site():
os.system('rate4site -s /home/jacob/Desktop/S.AureusCOL_seq/SACOL0098.aln -o /home/jacob/Desktop/SACOL0098.txt')
rate4site()
|
992,082 | 6d78607df1208fc3f47880657b0d0b007c2aacab | """
The file can generate a smooth trajectory (a list of waypoint) according to
given initial state and goal state.
The algorithm is mainly based on following publications
"Trajectory Generation For Car-Like Robots Using Cubic Curvature Polynomials",
Nagy and Kelly, 2001
"Adaptive Model Predictive Motion Planning for Navigation in Complex Environments",
Thomas M. Howard, 2009, (chapter 4)
"Motion Planning for Self-Driving-Cars", Coursera 4 of Self-Driving-Car Specialization
on Coursera
The implementation also references from Matthew O'Kelly's C++ implementation.
You can find the source code in Autoware Repository.
https://gitlab.com/autowarefoundation/autoware.ai/core_planning/-/blob/master/lattice_planner/lib/libtraj_gen.cpp
"""
import numpy as np
# for visualization and debugging
import matplotlib.pyplot as plt
import time
import pdb
class TrajectoryGenerator():
def __init__(self):
self.is_converge = False
self.max_iter = 50
# criteria of terminal state
self.acceptable_dx = 0.01 # m
self.acceptable_dy = 0.01 # m
self.acceptable_dtheta = 1 *np.pi/180 # rad
self.acceptable_dkappa = 1 *np.pi/180 # rad
# pertubation value
self.pertub = 0.0001
# path sampling resolution
self.sample_resolution = 1 # m
def _initialize_spline(self, initial_state, final_state):
"""
The function initializes spline paramters
:param: final_state: goal of the path [x, y, theta, kappa]
:return: a, b, c, s: parameter of the spline
"""
x_f = final_state[0]
y_f = final_state[1]
theta_f = final_state[2] # rad
kappa_f = final_state[3] # rad
kappa_0 = initial_state[3] # rad
d = np.sqrt(x_f**2 + y_f**2)
theta_delta = np.abs(theta_f)
s = d*(( (theta_delta**2) / 5) +1) + 2*theta_delta/5
# Initialization method from Nagy and Kelly, 2001
# a = 6*theta_f/(s**2) - 2*kappa_0/s + 4*kappa_f/s
# c = 0
# b = 3*(kappa_0 + kappa_f)/(s**2) + 6*theta_f/(s**3)
# Initilalization method from Thomas M. Howard, 2009
a = 0.0
b = 0.0
c = kappa_f
return a, b, c, s
def _compute_theta(self, theta, a_p, b_p, c_p, d_p, s):
theta_final = theta + d_p*(s**4)/4 + \
c_p*(s**3)/3 + b_p*(s**2)/2 + \
a_p*s
return theta_final
def _compute_x(self, x_0, theta_0, a_p, b_p, c_p, d_p, s):
theta_s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, s/8.0)
theta_2s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, 2*s/8.0)
theta_3s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, 3*s/8.0)
theta_4s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, 4*s/8.0)
theta_5s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, 5*s/8.0)
theta_6s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, 6*s/8.0)
theta_7s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, 7*s/8.0)
theta_s = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, s)
x_final = x_0 + (np.cos(theta_0) + 4*np.cos(theta_s_8) + 2*np.cos(theta_2s_8) + \
4*np.cos(theta_3s_8) + 2*np.cos(theta_4s_8) + \
4*np.cos(theta_5s_8) + 2*np.cos(theta_6s_8) + \
4*np.cos(theta_7s_8) + np.cos(theta_s))* s/24.0
return x_final
def _compute_y(self, y_0, theta_0, a_p, b_p, c_p, d_p, s):
theta_s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, s/8.0)
theta_2s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, 2*s/8.0)
theta_3s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, 3*s/8.0)
theta_4s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, 4*s/8.0)
theta_5s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, 5*s/8.0)
theta_6s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, 6*s/8.0)
theta_7s_8 = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, 7*s/8.0)
theta_s = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, s)
y_final = y_0 + (np.sin(theta_0) + 4*np.sin(theta_s_8) + 2*np.sin(theta_2s_8) + \
4*np.sin(theta_3s_8) + 2*np.sin(theta_4s_8) + \
4*np.sin(theta_5s_8) + 2*np.sin(theta_6s_8) + \
4*np.sin(theta_7s_8) + np.sin(theta_s))* s/24.0
return y_final
def _motion_update_one_shot(self, initial_state, a, b, c, s):
"""
predict the final state according to initial state
and spline parameter using one shot method
:param initial_state: initial state of the vehcle [x, y, theta, kappa]
:param a: spline parameter
:param b: spline parameter
:param c: spline parameter
:param s: spline parameter
:return final_state_pred: predicted final state
"""
x_0 = initial_state[0]
y_0 = initial_state[1]
theta = initial_state[2] # rad
kappa = initial_state[3] # rad
theta_0 = np.copy(theta)
kappa_0 = np.copy(kappa)
a_p = kappa_0
b_p = -(11*kappa_0 - 18*a + 9*b - 2*c)/(2* s)
c_p = 9*(2*kappa_0 - 5*a + 4*b - c)/(2* s**2)
d_p = -9*(kappa_0 - 3*a + 3*b -c)/(2* s**3)
kappa_final = a_p + b_p*s + c_p*(s**2) + d_p*(s**3)
theta_final = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, s)
x_final = self._compute_x(x_0, theta_0, a_p, b_p, c_p, d_p, s)
y_final = self._compute_y(y_0, theta_0, a_p, b_p, c_p, d_p, s)
final_state_pred = [x_final, y_final, theta_final, kappa_final]
return final_state_pred
def _check_converge(self, final_state, final_state_pred):
"""
check if the optimization result is converged
"""
x_diff = float(abs(final_state[0] - final_state_pred[0]))
y_diff = float(abs(final_state[1] - final_state_pred[1]))
theta_diff = float(abs(final_state[2] - final_state_pred[2]))
kappa_diff = float(abs(final_state[3] - final_state_pred[3]))
converge = (x_diff <= self.acceptable_dx) & \
(y_diff <= self.acceptable_dy) & \
(theta_diff <= self.acceptable_dtheta) & \
(kappa_diff <= self.acceptable_dkappa)
return converge
def _compute_correction(self, initial_state, final_state, a, b, c, s):
"""
compute the correction of spline parameter
"""
pertub = self.pertub
pertub_s = pertub *10
pred_no_pertub = self._motion_update_one_shot(initial_state, a, b, c, s)
pred_pertub_a = self._motion_update_one_shot(initial_state, a +pertub, b, c, s)
pred_pertub_b = self._motion_update_one_shot(initial_state, a, b +pertub, c, s)
# no need to correct C, C is constrained by kappa_final
# # pred_pertub_c = self._motion_update_one_shot(initial_state, a, b, c +pertub, s)
pred_pertub_s = self._motion_update_one_shot(initial_state, a, b, c, s +pertub_s)
d_state = np.zeros((3,1))
d_pertub_state = np.zeros((3,3))
Jacobian = np.zeros((3,3))
for i in range(0, 3):
d_pertub_state[i][0] = (final_state[i] - pred_pertub_a[i]) # a
d_pertub_state[i][1] = (final_state[i] - pred_pertub_b[i]) # b
# d_pertub_state[i][2] = (final_state[i] - pred_pertub_c[i]) # c (no update)
d_pertub_state[i][2] = (final_state[i] - pred_pertub_s[i]) # s
d_state[i] = final_state[i] - pred_no_pertub[i]
Jacobian[i][0] = (d_pertub_state[i][0] - d_state[i])/pertub # a
Jacobian[i][1] = (d_pertub_state[i][1] - d_state[i])/pertub # b
# Jacobian[i][2] = (d_pertub_state[i][2] - d_state[i])/pertub # c (no update)
Jacobian[i][2] = (d_pertub_state[i][2] - d_state[i])/pertub_s # s
# inv_Jacobian = np.linalg.inv(Jacobian)
inv_Jacobian = np.linalg.pinv(Jacobian)
correction = np.dot(inv_Jacobian, d_state)
# pdb.set_trace()
return correction
def compute_spline(self, initial_state, final_state):
"""
main function to compute the trajectory by optimizing an spline using
initial and final state
Total parameter for a spline is
(kappa_0, a, b, c, s) --> equals (p0, p1, p2, p3, s) in papaer
Consider the constraints by start and goal point kappa, the parameter will be
(kappa_0, a, b, kappa_f, s)
so, only need to estimate 3 parameters (a, b, s)
"""
a, b, c, s = self._initialize_spline(initial_state, final_state)
final_state_pred = self._motion_update_one_shot(initial_state, a, b, c, s)
converge = self._check_converge(final_state, final_state_pred)
total_iter = 0
# pdb.set_trace()
while (total_iter < self.max_iter) & (converge is not True): # (total_iter < self.max_iter)
correction = self._compute_correction(initial_state, final_state, a, b, c, s)
a = a - correction[0]
b = b - correction[1]
# c = c - correction[2]
s = s - correction[2]
final_state_pred = self._motion_update_one_shot(initial_state, a, b, c, s)
converge = self._check_converge(final_state, final_state_pred)
total_iter = total_iter +1
# print(total_iter)
# print(final_state_pred)
# print(s)
# sometimes it converge to negative s (travel distance) which
# is invalid..., need to figure it out...
if (converge == True) & (s > 0):
final_state_pred, point_list = self._path_sampling_one_shot(initial_state, a, b, c, s)
else:
point_list = [[-1,-1]]
return point_list
def _path_sampling_one_shot(self, initial_state, a, b, c, s):
"""
run through the spline and sample waypoint in a fix interval
:param: initial_state: initial state of the vehcle [x, y, theta, kappa]
:param a: spline parameter
:param b: spline parameter
:param c: spline parameter
:param s: spline parameter
:return: final_state_pred: predicted final state
"""
x_0 = initial_state[0]
y_0 = initial_state[1]
theta_0 = initial_state[2] # rad
kappa_0 = initial_state[3] # rad
x = np.copy(x_0)
y = np.copy(y_0)
theta = np.copy(theta_0)
kappa = np.copy(kappa_0)
a_p = kappa_0
b_p = -(11*kappa_0 - 18*a + 9*b - 2*c)/(2* s)
c_p = 9*(2*kappa_0 - 5*a + 4*b - c)/(2* s**2)
d_p = -9*(kappa_0 - 3*a + 3*b -c)/(2* s**3)
resolution = self.sample_resolution
total_sample = int(s / resolution)
point_list = []
point_list.append([x_0, y_0])
for i in range(1, total_sample +1):
current_s = float(i) * resolution
kappa = a_p + b_p*current_s + c_p*(current_s**2) + d_p*(current_s**3)
theta = self._compute_theta(theta_0, a_p, b_p, c_p, d_p, current_s)
x = self._compute_x(x_0, theta_0, a_p, b_p, c_p, d_p, current_s)
y = self._compute_y(y_0, theta_0, a_p, b_p, c_p, d_p, current_s)
point_list.append([x, y])
final_state_pred = [x, y, theta, kappa]
return final_state_pred, point_list
def main():
"""
do trajectory generation demo
"""
PathGenerator = TrajectoryGenerator()
## coordinate
# Y
# ^ /
# | /
# | / <theta>
# o -- -- -- >X
x_0 = 0.0 # initial x position
y_0 = 0.0 # initial y position
theta_0 = 0.0 *np.pi/180 # initial heading angle of the vehicle
kappa_0 = 0.0 *np.pi/180 # initial steering angle
initial_state = [x_0, y_0, theta_0, kappa_0]
x_f = 13.0 # final x position
y_f = 8.0 # final y position
theta_f = 0.0 *np.pi/180 # final heading angle of the vehicle
kappa_f = 0.0 *np.pi/180 # final steering angle
final_state = [x_f, y_f, theta_f, kappa_f]
traject = PathGenerator.compute_spline(initial_state, final_state)
point_array = np.asarray(traject)
plt.plot(point_array[:,0], point_array[:,1],'o')
sample_resolution = 0.5
temp_goal_list = []
for i in range(-2, 3):
temp_final_state = np.copy(final_state)
temp_final_state[1] = temp_final_state[1] + float(i)*sample_resolution
temp_goal_list.append(temp_final_state)
start = time.time()
point_list = []
for i in range(0, 5):
temp_goal = temp_goal_list[i]
traject = PathGenerator.compute_spline(initial_state, temp_goal)
point_list.append(traject)
end = time.time()
print('Executed time is %f'%(end - start))
# pdb.set_trace()
for i in range(0,5):
point_array = np.asarray(point_list[i])
plt.plot(point_array[:,0], point_array[:,1],'o')
plt.axis('equal')
plt.show()
if __name__ == "__main__":
main() |
992,083 | aa49b57be62ae2b9b5b4128b9b43584390b6c5ad | #!/usr/bin/python
#-*- coding: utf-8 -*-
from lda import Collection, Dictionary, Model, Info, Viewer, utils, Word2Vec, ImagePlotter
from lda.docLoader import loadCategories
from gensim.parsing.preprocessing import STOPWORDS
from nltk.corpus import names
from gensim.models import TfidfModel
import os.path
from lda import dataframeUtils as df
import csv
import pandas as pd
import pdb
def TopicModeling_ICAAD():
info = Info()
# Categories and Keywords
info.categories = loadCategories('Documents/categories.txt')[0] #0 -human rights categories 1 - Scientific Paper categories
keywordFile = 'Documents/ICAAD/CategoryLists.csv'
keywords_df = pd.read_csv(keywordFile).astype(str)
keywords = list(df.toListMultiColumns(keywords_df, keywords_df.columns))
#### PARAMETERS ####
word2vec = Word2Vec()
info.data = 'ICAAD' # 'ICAAD' 'NIPS' 'scifibooks' 'HRC'
# Preprocessing #
info.preprocess = 0
info.startDoc = 0
info.numberDoc= None
info.specialChars = set(u'''[,\.\'\`=\":\\\/_+]''')
info.includeEntities = 0
info.bigrams = 1
numbers = [str(nr) for nr in range(0,500)]
info.whiteList= word2vec.net.vocab.keys() + numbers + keywords
info.stoplist = list(STOPWORDS) + utils.lowerList(names.words())
info.stoplist = [x.strip() for x in open('stopwords/english.txt')]
info.removeNames = 1
# Dictionary #
info.analyseDictionary = 0
info.lowerFilter = 8 # in Documents
info.upperFilter = 0.25 # in percent
# LDA #
info.modelType = 'LDA' # 'LDA' 'LSI'
info.numberTopics = 20
info.tfidf = 0
info.passes = 20
info.iterations = 70
info.online = 0
info.chunksize = 4100
info.multicore = 0
info.setup()
#### EVALUATION ####
evaluationFile = 'Documents/PACI.csv'
dataFeatures = pd.read_csv(evaluationFile)
filenames = dataFeatures['Filename'].tolist()
filenames = [name.replace('.txt', '') for name in filenames]
dataFeatures['Filename'] = filenames
dataFeatures = dataFeatures.rename(columns = {'Unnamed: 0': 'id'})
#### MODEL ####
collection = Collection()
html = Viewer(info)
#pdb.set_trace()
if not os.path.exists(info.collectionName) or info.preprocess:
print 'Load and preprocess Document Collection'
collection.load(info.path, info.fileType, info.startDoc, info.numberDoc)
collection.setDocNumber()
for doc in collection.documents:
doc.title = doc.title.replace('.rtf.txt', '')
features = dataFeatures[dataFeatures['Filename']==doc.title]
doc.id = df.getValue(features, 'id')
doc.SA = df.getValue(features, 'Sexual.Assault.Manual')
doc.DV = df.getValue(features, 'Domestic.Violence.Manual')
doc.extractYear()
doc.extractCourt()
collection.prepareDocumentCollection(lemmatize=True, includeEntities=info.includeEntities, stopwords=info.stoplist, removeShortTokens=True, threshold=2, specialChars=info.specialChars, whiteList=info.whiteList, bigrams=info.bigrams)
collection.saveDocumentCollection(info.collectionName)
else:
print 'Load Processed Document Collection'
collection.loadPreprocessedCollection(info.collectionName)
#pdb.set_trace()
collection.documents = collection.documents[20:1000]
print 'Create Dictionary'
dictionary = Dictionary(info.stoplist)
dictionary.addCollection(collection.documents)
if info.analyseDictionary:
'Analyse Word Frequency'
collectionLength = collection.number
dictionary.analyseWordFrequencies(info, html, collectionLength)
print 'Filter extremes'
dictionary.filter_extremes(info.lowerFilter, info.upperFilter, keywords)
if info.analyseDictionary:
dictionary.plotWordDistribution(info)
print 'Create Corpus'
corpus = collection.createCorpus(dictionary)
print 'TF_IDF Model'
tfidf = TfidfModel(corpus, normalize=True)
if tfidf:
corpus = tfidf[corpus]
print 'Topic Modeling - LDA'
lda = Model(info)
lda.createModel(corpus, dictionary.ids, info)
lda.createTopics(info)
print 'Topic Coverage'
topicCoverage = lda.model[corpus]
print 'Get Documents related to Topics'
lda.getTopicRelatedDocuments(topicCoverage, info)
print 'Similarity Analysis'
lda.computeSimilarityMatrix(corpus, numFeatures=info.numberTopics, num_best = 7)
maxTopicCoverage = []
for document in collection.documents:
docTopicCoverage = topicCoverage[document.nr]
document.setTopicCoverage(docTopicCoverage, lda.name)
lda.computeSimilarity(document)
collection.computeRelevantWords(tfidf, dictionary, document)
maxTopicCoverage.append(document.LDACoverage[0][1])
document.createTokenCounter()
for category in keywords_df.columns.tolist():
wordsInCategory = df.getColumn(keywords_df, category)
keywordFrequency = document.countOccurance(wordsInCategory)
document.entities.addEntities(category, utils.sortTupleList(keywordFrequency))
document.mostFrequentEntities = document.entities.getMostFrequent(5)
#pdb.set_trace()
ImagePlotter.plotHistogram(maxTopicCoverage, 'Maximal Topic Coverage', 'html/' + info.data+'_'+info.identifier+'/Images/maxTopicCoverage.jpg', 'Maximal LDA Coverage', 'Number of Docs', log=1)
print 'Create HTML Files'
html.htmlDictionary(dictionary)
html.printTopics(lda)
info.SATopics = input('Sexual Assault Topics:')
info.DVTopics = input('Domestic Violence Topics:')
info.otherTopics = input('Other Topics: ')
selectedTopics = info.SATopics + info.DVTopics + info.otherTopics
info.SAthreshold = 0.2
info.DVthreshold = 0.2
for doc in collection.documents:
doc.predictCases('SA', info, info.SAthreshold)
doc.tagPrediction('SA')
doc.predictCases('DV', info, info.DVthreshold)
doc.tagPrediction('DV')
SAevaluation = collection.evaluate('SA')
collection.getConfusionDocuments('SA')
html.results(SAevaluation, collection, info)
DVevaluation = collection.evaluate('DV')
collection.getConfusionDocuments('DV')
html.results(DVevaluation, collection, info)
html.printDocuments(collection.documents, lda)
html.printDocsRelatedTopics(lda, collection.documents, openHtml=False)
html.documentOverview(collection.documents)
print('Write Feature File')
#collection.writeDocumentFeatureFile(info, selectedTopics, keywords)
info.saveToFile()
if __name__ == "__main__":
TopicModeling_ICAAD()
|
992,084 | 6c08d139595331a49bed9f3a5a1702898af3e6ff |
# coding: utf-8
# # yangs - Yet Another NonoGram Solver
#
#
#
# ## Aim
#
# Solve the first part of the GCHQ Christmas puzzle using a concise, easy-to-understand Python program.
#
# ## Method
#
# The [puzzle](http://www.gchq.gov.uk/press_and_media/news_and_features/Pages/Directors-Christmas-puzzle-2015.aspx) is a 25x25 [nonogram](https://en.wikipedia.org/wiki/Nonogram). I decided to write a program that uses the same method that I would use if solving the puzzle by hand:
#
# 1. For each row and column, **generate the set of patterns** that match the given clues
# 2. For each row, **eliminate the patterns** that don't match the cells we already know
# 3. For each row, **deduce the known cells** based on the reduced pattern set
# 4. For each column, **eliminate the patterns** that don't match the cells we already know
# 5. For each column, **deduce the known cells** based on the reduced pattern set
# 6. Go back to 2. and repeat, unless we've solved it
# ## Imports and settings
# In[1]:
from itertools import combinations_with_replacement
SHOW_TESTS = True
# ## Input data
#
# Put all the clues into a list of lists:
# In[2]:
ROW_CLUES = [
[7, 3, 1, 1, 7,],
[1, 1, 2, 2, 1, 1,],
[1, 3, 1, 3, 1, 1, 3, 1,],
[1, 3, 1, 1, 6, 1, 3, 1,],
[1, 3, 1, 5, 2, 1, 3, 1,],
[1, 1, 2, 1, 1,],
[7, 1, 1, 1, 1, 1, 7,],
[3, 3,],
[1, 2, 3, 1, 1, 3, 1, 1, 2,],
[1, 1, 3, 2, 1, 1,],
[4, 1, 4, 2, 1, 2,],
[1, 1, 1, 1, 1, 4, 1, 3,],
[2, 1, 1, 1, 2, 5,],
[3, 2, 2, 6, 3, 1,],
[1, 9, 1, 1, 2, 1,],
[2, 1, 2, 2, 3, 1,],
[3, 1, 1, 1, 1, 5, 1,],
[1, 2, 2, 5,],
[7, 1, 2, 1, 1, 1, 3,],
[1, 1, 2, 1, 2, 2, 1,],
[1, 3, 1, 4, 5, 1,],
[1, 3, 1, 3, 10, 2,],
[1, 3, 1, 1, 6, 6,],
[1, 1, 2, 1, 1, 2,],
[7, 2, 1, 2, 5,],
]
COL_CLUES = [
[7, 2, 1, 1, 7,],
[1, 1, 2, 2, 1, 1,],
[1, 3, 1, 3, 1, 3, 1, 3, 1,],
[1, 3, 1, 1, 5, 1, 3, 1,],
[1, 3, 1, 1, 4, 1, 3, 1,],
[1, 1, 1, 2, 1, 1,],
[7, 1, 1, 1, 1, 1, 7,],
[1, 1, 3,],
[2, 1, 2, 1, 8, 2, 1,],
[2, 2, 1, 2, 1, 1, 1, 2,],
[1, 7, 3, 2, 1,],
[1, 2, 3, 1, 1, 1, 1, 1,],
[4, 1, 1, 2, 6,],
[3, 3, 1, 1, 1, 3, 1,],
[1, 2, 5, 2, 2,],
[2, 2, 1, 1, 1, 1, 1, 2, 1,],
[1, 3, 3, 2, 1, 8, 1,],
[6, 2, 1,],
[7, 1, 4, 1, 1, 3,],
[1, 1, 1, 1, 4,],
[1, 3, 1, 3, 7, 1,],
[1, 3, 1, 1, 1, 2, 1, 1, 4,],
[1, 3, 1, 4, 3, 3,],
[1, 1, 2, 2, 2, 6, 1,],
[7, 1, 3, 2, 1, 1,],
]
ROW_LEN = len(COL_CLUES)
COL_LEN = len(ROW_CLUES)
# Set up a grid to contain the result, and fill in the cells we already know.
#
# Notes:
# - I'm using 1 to represent a known filled cell, 0 for a known blank cell, and None for an unknown cell.
# - The rows must be repeated using list comprehension. Using list repetition **(i.e. result = [[None] \* ROW_LEN] * COL_LEN)** would result in a list of references to the same row list.
# In[3]:
# Create blank results grid
result = [[None] * ROW_LEN for c in range(COL_LEN)]
# Fill in the cells we already know
known_coords = {3: (3, 4, 12, 13, 21),
8: (6, 7, 10, 14, 15, 18),
16: (6, 11, 16, 20),
21: (3, 4, 9, 10, 15, 20, 21)
}
for row, cols in known_coords.items():
for col in cols:
result[row][col] = 1
# ## Helper functions
#
# Define some functions to display the results:
# In[4]:
SYMBOLS = {None: '?', 1:'#', 0:'.'}
def print_row(row, i=0):
print '%s [%s]' % (str(i).rjust(2), ' '.join(SYMBOLS[cell] for cell in row))
def print_grid(rows):
col_labels = [str(i).rjust(2) for i in range (len(rows[0]))]
print ' ', ' '.join(cl[0] for cl in col_labels)
print ' ', ' '.join(cl[1] for cl in col_labels)
for i, row in enumerate(rows):
print_row(row, i)
# Test
if SHOW_TESTS:
print_grid(result)
# And a function to count the number of unknown cells in the results. We'll use this to check if we've solved the puzzle, or if we're not making progress.
# In[5]:
def count_unknowns(rows):
return sum(row.count(None) for row in rows)
# Test
if SHOW_TESTS:
print 'Unknowns = %s' % count_unknowns(result)
# ## Generate sets of patterns
#
# Terminology:
# - Block = one or more consecutive filled cells
# - Space = an unfilled cell
# - Clue = the lengths of each block
# - Pattern = a series of cells that match the clue
#
# The **generate_patterns** function does step 1 of the general approach: give it the clue and the pattern length, and it generates all the possible patterns.
#
# For the clue [7, 3, 1, 1, 7] and length = 25:
#
# - There are 5 blocks, comprising 7 + 3 + 1 + 1 + 7 = 19 filled cells.
# - There are 6 **positions** where spaces can be inserted (numbered 0 through 5)
# - The middle 4 positions all must have at least one space - so there are 4 **fixed spaces**
# - That means there are 25 - 19 - 4 = 2 **movable spaces**, which can go in any permutation of the 6 positions
# - Therefore, there are 6CR2 = 21 different patterns that match this clue
#
#
# | position | 0 | 1 | 1 | 2 | 2 | 3 | 3 | 4 | 4 | 5 | 5 |
# |-
# | blocks and fixed spaces | | 1111111 | 0 | 111 | 0 | 1 | 0 | 1 | 0 | 1111111 | |
# | movable spaces (any 2 of 6 positions) | ? | | ? | | ? | | ? | | ? | | ? |
#
#
# In[6]:
def generate_patterns(clue, length):
blocks = [0] + clue
fixed_spaces = [0] + [1] * (len(clue) - 1) + [0]
positions = range(len(blocks))
n_movable_spaces = length - sum(clue) - len(clue) + 1
for space_positions in combinations_with_replacement(positions, n_movable_spaces):
movable_spaces = [space_positions.count(p) for p in positions]
yield sum(([1] * b + [0] * (fs + ms) for b, fs, ms in zip(blocks, fixed_spaces, movable_spaces)), [])
# Test
if SHOW_TESTS:
test_pats = list(generate_patterns([7, 3, 1, 1, 7], 25))
print_grid(test_pats)
# ## Eliminate patterns
#
# The **eliminate_patterns** function does steps 2 and 4 of the general approach: give it a list of patterns, and it will eliminate the patterns that don't match the already-known cells.
#
# Notes:
# - The set of patterns is iterated in reverse order so that deleting an item won't change the indices of the items yet to be iterated.
# - The **any** function will short circuit as soon as it finds a cell in the pattern that conflicts with the known cells
# - I've used **del patterns[i]** instead of **patterns.pop(i)** because we don't need to do anything with the item after it's removed from the list
#
#
# In[7]:
def eliminate_patterns(patterns, knowns):
for i in reversed(range(len(patterns))):
if any(k not in (p, None) for p, k in zip(patterns[i], knowns)):
del patterns[i]
# Test
if SHOW_TESTS:
print 'Before:'
print_grid(test_pats)
print
print 'Apply Knowns:'
test_knowns = [1] + [None]*23 + [1]
print_row(test_knowns)
print
print 'After:'
eliminate_patterns(test_pats, test_knowns)
print_grid(test_pats)
# ## Deduce known cells
#
# The **generate_knowns** function does steps 3 and 5 of the general approach: give it a set of patterns, and it will deduce which cells are now known.
#
# For each cell, if all the patterns have the same value for each cell, then that is the only possible value for that cell.
#
# Notes:
# - **zip(*patterns)** transposes the pattern set so that we check one column at a time instead of one row a time
# - the **all** function will short circuit as soon as it finds a cell that doesn't match the cell in first pattern
# In[8]:
def generate_knowns(patterns):
for col in zip(*patterns):
yield col[0] if all(c == col[0] for c in col) else None
# Test
if SHOW_TESTS:
print 'Patterns:'
print_grid(test_pats)
print
print 'Knowns:'
test_knowns = list(generate_knowns(test_pats))
print_row(test_knowns)
# ## Solve it
#
# Do step 1: generate sets of patterns that match the clues given for each row and column.
# In[9]:
rows_valid_patterns = [list(generate_patterns(c, ROW_LEN)) for c in ROW_CLUES]
cols_valid_patterns = [list(generate_patterns(c, COL_LEN)) for c in COL_CLUES]
# Calculate how many unknowns there are before we start trying to solve:
# In[10]:
unknowns = [count_unknowns(result)]
# Repeat through steps 2-5 until either:
# - There are no more unknowns i.e. we've solved it
# - The number of unknowns hasn't decreased since the last pass i.e. we've solved as much as possible
#
# Notes:
# - **map** is used to apply **eliminate_patterns** and **generate_knowns** to multiple rows/columns
# - **zip(\*)** is used to transpose the **results** grid when working on columns, and transpose it back again before working on rows
# In[11]:
while len(unknowns) < 2 or unknowns[-1] not in (0, unknowns[-2]):
map(eliminate_patterns, rows_valid_patterns, result)
result = map(generate_knowns, rows_valid_patterns)
map(eliminate_patterns, cols_valid_patterns, zip(*result))
result = zip(*map(generate_knowns, cols_valid_patterns))
unknowns.append(count_unknowns(result))
# Display the results:
# In[12]:
for i, u in enumerate(unknowns):
print 'pass %s: %s unknowns' % (i, u)
print_grid(result)
# In[ ]:
|
992,085 | f73b5f34d05792638ffda6d679b73664fd170dd2 | class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
num=0
k=len(digits)
for i in range(k):
num+=digits[i]*10**(k-i-1)
digits=[]
if num==0:
for i in range(k):
digits.append(0)
digits[-1]=1
else:
num+=1
num=str(num)
for i in range(len(num)):
digits.append(int(num[i]))
return digits |
992,086 | 3496736d2d913232e982d399ae9954c802808ce8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
##############################################################################
#
# comprueba.py 0.99: a simple program checker
#
# Copyright (C) 2008 Juan Miguel Vilar
# Universitat Jaume I
# Castelló (Spain)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Any questions regarding this software should be directed to:
#
# Juan Miguel Vilar
# Departament de Llenguatges i Sistemes Informàtics
# Universitat Jaume I
# E12071 Castellón (SPAIN)
#
# email: jvilar@lsi.uji.es
#
##############################################################################
#
# comprueba.py
#
from optparse import OptionParser
import os
from subprocess import Popen
import sys
from tempfile import TemporaryFile
################################################################################
#
# Colores:
#
AMARILLO = 33
AZUL = 34
MAGENTA = 35
NEGRO = 30
ROJO = 31
VERDE = 32
FONDOROJO = 41
BRILLANTE = 1
noColores = False
def colorea(color, cadena, color_salida=0):
if noColores:
return cadena
if isinstance(color, int):
r = "\x1b[%dm" % color
else:
r = ""
for i in color:
r += "\x1b[%dm" % i
if cadena[-1] != "\n":
r += cadena
else:
r += cadena[:-1]
if isinstance(color_salida, int):
r += "\x1b[%dm" % color_salida
else:
for i in color_salida:
r += "\x1b[%dm" % i
if cadena[-1] == "\n":
r += "\n"
return r
################################################################################
#
# Errores:
#
def error(m):
sys.stderr.write(colorea(ROJO, "Error: %s\n" % m))
sys.exit(1)
def aviso(m):
sys.stderr.write(colorea(AMARILLO, "Aviso: %s\n" % m))
################################################################################
#
# Ficheros:
#
def abre(n):
try:
return open(n)
except:
error("No he podido abrir %s para lectura" % n)
def abre_o_none(n):
if n is None:
return None
return abre(n)
################################################################################
#
# Redirección:
#
class RedirigeSalida:
def __init__(self, original=sys.stdout):
self.original = original
self._acumulado = ""
def write(self, l):
self._acumulado += l
def flush(self):
pass
def acumulado(self):
return self._acumulado
def limpia(self):
self._acumulado = ""
def escribe_original(self, l):
self.original.write(l)
def __del__(self):
if self._acumulado:
self.original.write(self._acumulado)
self._acumulado = ""
class RedirigeEntrada:
def __init__(self, fentrada, salida, error, procesador):
self.fentrada = fentrada # fichero de entrada
self.salida = salida
self.error = error
self.procesador = procesador
self.eof = False
self.entrada = ""
self.nlinea = 0
self.pos = 0
def leelinea(self):
if self.eof:
return ""
if self.nlinea:
salida_encontrada = self.salida.acumulado()
self.salida.limpia()
error_encontrado = self.error.acumulado()
self.error.limpia()
self.procesador.presenta_salida(salida_encontrada, error_encontrado)
l = self.fentrada.readline()
if not l:
self.eof = True
self.procesador.trata_EOF()
return
self.nlinea = self.nlinea + 1
self.entrada = self.procesador.trata_linea(l, self.nlinea)
def read(self, n):
if self.eof:
return ""
if n != 1:
l = ""
for i in range(n):
l = l + self.read(1)
return l
if self.pos == len(self.entrada):
self.leelinea()
if self.eof:
return ""
c = self.entrada[self.pos]
self.pos = self.pos + 1
return c
def readline(self):
if self.eof:
return ""
self.leelinea()
if self.eof:
return ""
return self.entrada
def isatty(self):
return 0
def close(self):
pass
def flush(self):
pass
def __del__(self):
if not self.eof:
salida_encontrada = self.salida.acumulado()
self.salida.limpia()
error_encontrado = self.error.acumulado()
self.error.limpia()
self.procesador.presenta_salida(salida_encontrada, error_encontrado)
self.procesador.trata_EOF()
def __iter__(self):
while 1:
line = self.readline()
if not line:
break
yield line
################################################################################
#
# Procesadores (actuan sobre la entrada y la salida):
#
class Procesador:
def trata_linea(self, l, nlinea):
"""Recibe la línea leída y el número de línea"""
return l
def presenta_salida(self, salida_encontrada, error_encontrado):
"""Recibe la salida y el error correspondiente a la última línea leída"""
pass
def trata_EOF(self):
"""Se invoca cuando se ha terminado de procesar el fichero"""
pass
class Comprobador(Procesador):
"""Procesador que comprueba que lo encontrado coincide con lo esperado"""
def __init__(self, salida, mostrar_todas, separa_campos, separa_error, nombre):
self.salida = salida
self.mostrarTodas = mostrar_todas
self.separaCampos = separa_campos
self.separaError = separa_error
self.salida.write(self.barra(" " + nombre + " "))
self.vistaEntrada = False
self.nerrores = 0
self.nlinea = 0
def trata_linea(self, l, nlinea):
self.vistaEntrada = True
hay_fin_l = l[-1] == "\n"
if hay_fin_l:
l = l[:-1]
campos = l.split(self.separaCampos)
campos_error = campos[-1].split(self.separaError)
campos[-1] = campos_error[0]
campos_error = campos_error[1:]
self.entrada = campos[0].rstrip()
if hay_fin_l:
self.entrada = self.entrada + "\n"
self.salidaEsperada = [s.strip() for s in campos[1:]]
self.errorEsperado = [s.strip() for s in campos_error]
self.nlinea = nlinea
return self.entrada
def error_en_salida(self, esperado, encontrado):
if len(esperado) == 0 and encontrado == "":
return False
fin_ok = (encontrado == "" or encontrado[-1] == "\n")
if len(esperado) != 0 and fin_ok:
s = [c.strip() for c in encontrado.split("\n")]
s = s[:-1] # La última línea tiene \n por fin_ok
if s == esperado:
return False
return True
def muestra_salida(self, esperado, encontrado, titulo_esperado, titulo_encontrado):
self.salida.write("---------------------------------------\n")
self.salida.write(titulo_esperado + "\n")
if len(esperado) == 0:
self.salida.write(colorea(ROJO, "Ninguna") + "\n")
else:
for l in esperado:
self.salida.write(" %s\n" % l)
self.salida.write("--\n")
self.salida.write(titulo_encontrado + "\n")
if encontrado == "":
self.salida.write(colorea(ROJO, "Ninguna") + "\n")
else:
s = encontrado.split("\n")
if encontrado[-1] == "\n":
s = s[:-1]
for l in s:
self.salida.write(" %s\n" % l)
def presenta_salida(self, salida_encontrada, error_encontrado):
if not self.vistaEntrada:
esalida = self.error_en_salida("", salida_encontrada)
eerror = self.error_en_salida("", error_encontrado)
else:
esalida = self.error_en_salida(self.salidaEsperada, salida_encontrada)
eerror = self.error_en_salida(self.errorEsperado, error_encontrado)
if esalida or eerror:
self.nerrores += 1
if not self.mostrarTodas and not esalida and not eerror:
return
if not self.vistaEntrada:
self.salida.write(colorea(ROJO, "Antes de la primera entrada\n"))
if esalida:
self.muestra_salida("", salida_encontrada, ""
"", "Salida encontrada:")
if eerror:
self.muestra_salida("", error_encontrado, "Salida de error esperada:", "Salida de error encontrada:")
else:
self.salida.write("Línea: %s\n" % colorea(VERDE, str(self.nlinea)))
self.salida.write("Entrada: %s\n" % self.entrada.rstrip())
if esalida or self.mostrarTodas:
self.muestra_salida(self.salidaEsperada, salida_encontrada,
"Salida esperada:", "Salida encontrada:")
else:
self.salida.write("Salida estándar: " + colorea(VERDE, "correcta\n"))
if eerror or self.mostrarTodas:
self.muestra_salida(self.errorEsperado, error_encontrado,
"Salida de error esperada:", "Salida de error encontrada:")
else:
self.salida.write("---------------------------------------\n")
self.salida.write("Salida de error: " + colorea(VERDE, "correcta\n"))
self.salida.write(self.barra(""))
def trata_EOF(self):
if self.nerrores == 0:
m = colorea(VERDE, "No ha habido errores")
else:
m = colorea(ROJO, "Ha habido %d errores" % self.nerrores)
self.salida.write(m + "\n")
self.salida.write(self.barra(" FIN "))
def barra(self, m):
return colorea(MAGENTA, "==%s%s\n" % (m, max(1, 40 - len(m) - 2) * "="))
class Generador(Procesador):
"""Genera la salida en el formato adecuado para ser utilizado con -e"""
def __init__(self, salida, separa_campos, separa_error):
self.salida = salida
self.separaCampos = separa_campos
self.separaError = separa_error
def trata_linea(self, l, nlinea):
self.entrada = l
return l
def presenta_salida(self, salida_encontrada, error_encontrado):
fin = ""
if len(self.entrada) != "" and self.entrada[-1] == "\n":
self.entrada = self.entrada[:-1]
fin = "\n"
self.salida.write(self.entrada)
if salida_encontrada != "":
self.salida.write(" " + self.separaCampos + " ")
if salida_encontrada != "" and salida_encontrada[-1] == "\n":
salida_encontrada = salida_encontrada[:-1]
self.salida.write((" " + self.separaCampos + " ").join(salida_encontrada.split("\n")))
if error_encontrado != "":
self.salida.write(" " + self.separaError + " ")
if error_encontrado != "" and error_encontrado[-1] == "\n":
error_encontrado = error_encontrado[:-1]
self.salida.write((" " + self.separaError + " ").join(error_encontrado.split("\n")))
self.salida.write(fin)
def trata_EOF(self):
pass
################################################################################
#
# Pruebas simples:
#
def prueba_simple(options, args):
if len(args) == 0:
error("Necesito al menos el nombre del programa")
programa = args[0]
entrada = None
salida_esperada = None
error_esperado = None
es_directorio = False
if len(args) == 1:
pass
elif os.path.isdir(args[1]):
es_directorio = True
else:
if len(args) == 2:
salida_esperada = args[1]
elif len(args) == 3:
entrada = args[1]
salida_esperada = args[2]
elif len(args) == 4:
entrada = args[1]
salida_esperada = args[2]
error_esperado = args[3]
else:
error("Demasiados argumentos")
if es_directorio:
bien = 0
mal = 0
for directorio in args[1:]:
d = {}
for nfichero in sorted(os.listdir(directorio)):
(nombre, sufijo) = os.path.splitext(nfichero)
if sufijo != "":
sufijo = sufijo[1:]
fichero = os.path.join(directorio, nfichero)
t = d.get(nombre, (None, None, None))
if sufijo == options.sufijoEntrada:
d[nombre] = (fichero, t[1], t[2])
elif sufijo == options.sufijoSalida:
d[nombre] = (t[0], fichero, t[2])
elif sufijo == options.sufijoError:
d[nombre] = (t[0], t[1], fichero)
for (entrada, salida_esperada, error_esperado) in sorted(d.values()):
if len(options.argumentos) > 0:
linea = programa + " " + " ".join(options.argumentos)
else:
linea = programa
if options.argumento and entrada is not None:
r = ejecuta_programa(linea + " " + entrada, None, salida_esperada, error_esperado)
else:
r = ejecuta_programa(linea, entrada, salida_esperada, error_esperado)
if r:
bien += 1
else:
mal += 1
print(colorea(MAGENTA, "===========================\n"))
print(colorea(AZUL, "Probados %d ficheros" % (bien + mal)))
if mal == 0:
print(colorea(VERDE, "Todos correctos"))
else:
print(colorea(VERDE, "Correctos: %d" % bien))
print(colorea(ROJO, "Erróneos: %d" % mal))
else:
if options.argumento and entrada is not None:
ejecuta_programa(programa + " " + entrada, None, salida_esperada, error_esperado)
else:
ejecuta_programa(programa, entrada, salida_esperada, error_esperado)
def ejecuta_programa(nombre, entrada, salida_esperada, error_esperado):
print(colorea((AZUL, BRILLANTE), "Ejecutando %s" % nombre))
if entrada is not None:
print("- Fichero de entrada: %s" % entrada)
if salida_esperada is not None:
print("- Salida esperada: %s" % salida_esperada)
if error_esperado is not None:
print("- Error esperado: %s" % error_esperado)
fentrada = abre_o_none(entrada)
fsalida_esperada = abre_o_none(salida_esperada)
ferror_esperado = abre_o_none(error_esperado)
fsalida = TemporaryFile(mode="w+")
ferror = TemporaryFile(mode="w+")
try:
programa = Popen(nombre.split(), shell=False, stdin=fentrada, stdout=fsalida, stderr=ferror)
except OSError as e:
error("No he podido ejecutar %s, (%s)" % (nombre, e))
codigo = programa.wait()
fsalida.seek(0)
ferror.seek(0)
print(colorea((AZUL, BRILLANTE), "Resultado:"))
va_bien = True
if codigo != 0:
print(colorea(AMARILLO, "Código de error %d" % codigo))
# va_bien= False
r = compara_ficheros("- Salida estándar:", fsalida_esperada, fsalida)
va_bien = va_bien and r
r = compara_ficheros("- Salida de error:", ferror_esperado, ferror)
va_bien = va_bien and r
return va_bien
################################################################################
#
# Pruebas entrelazadas:
#
def prueba_entrelazado(options, args):
salida = sys.stdout
if len(args) == 0:
error("Necesito al menos un parámetro, el nombre del programa")
elif len(args) == 1:
ejecuta_entrelazado(args[0], sys.stdin, salida, options, "stdin")
else:
for e in args[1:]:
if not os.path.isdir(e):
entrada = abre(e)
ejecuta_entrelazado(args[0], entrada, salida, options, e)
else: # Es un directorio
for nfichero in sorted(os.listdir(e)):
(nombre, sufijo) = os.path.splitext(nfichero)
if sufijo != "":
sufijo = sufijo[1:]
if sufijo == options.sufijoEntrelazado:
fichero = os.path.join(e, nfichero)
entrada = abre(fichero)
ejecuta_entrelazado(args[0], entrada, salida, options, fichero)
def ejecuta_entrelazado(programa, entrada, salida, options, nombre):
# Nos guardamos los ficheros originales
stdout = sys.stdout
stdin = sys.stdin
stderr = sys.stderr
# Preparamos las redirecciones
sys.stdout = RedirigeSalida(salida)
sys.stderr = RedirigeSalida(salida)
if options.genera:
procesador = Generador(stdout, options.separaCampos, options.marcaError)
else:
procesador = Comprobador(stdout, options.todas, options.separaCampos, options.marcaError, nombre)
sys.stdin = RedirigeEntrada(entrada, sys.stdout, sys.stderr, procesador)
# Guardamos sys.argv y construimos el del programa
argv = sys.argv[:]
sys.argv = [programa] + options.argumentos
path = os.path.dirname(programa)
if path not in sys.path:
sys.path.append(path)
# Anotamos qué modulos había antes de la ejecución
modules = list(sys.modules.keys())
# Prepara el entorno de ejecución
# Fuerza la ejecución de programas guardados con if __name__=="__main__"
globales = {"__name__": "__main__"}
try:
exec(compile(open(programa).read(), programa, 'exec'), globales)
except SystemExit:
pass
except:
import traceback
sei = sys.exc_info()
traceback.print_exception(sei[0], sei[1], sei[2])
# Limpiamos los restos que pudieran quedar de la ejecucion
for i in list(sys.modules.keys()):
if i not in modules:
del sys.modules[i]
sys.stdout = stdout
sys.stdin = stdin
sys.stderr = stderr
sys.argv = argv
################################################################################
#
# Comparaciones
#
class Comparacion:
"""Guarda los resultados de una comparación"""
def __init__(self, _iguales, _diferencias):
self._iguales = _iguales
self._diferencias = _diferencias
def iguales(self):
"""Cierto si ambos ficheros son iguales"""
return self._iguales
def diferencias(self):
"""Diferencias entre los ficheros"""
return self._diferencias
def __str__(self):
if self._iguales:
return "Iguales"
else:
return "".join([str(d) for d in self._diferencias])
def pretty(self, lin_ref, lin_obtenido):
if self._iguales:
return "Iguales"
else:
return "".join([d.pretty(lin_ref, lin_obtenido) for d in self._diferencias])
def nlineas(n):
"""Escribe n líneas o línea según el valor de n"""
if n == 0:
return "cero líneas"
elif n == 1:
return "una línea"
elif n == 2:
return "dos líneas"
else:
return "%d líneas" % n
def plural(n):
"""Devuelve s si n!= 1"""
if n != 1:
return "s"
else:
return ""
class Diferencia:
"""Una diferencia"""
def __init__(self, pos_ref, talla_ref, pos_obtenido, talla_obtenido):
"""Guarda la posición y líneas de los ficheros"""
self.posRef = pos_ref
self.tallaRef = talla_ref
self.posObtenido = pos_obtenido
self.tallaObtenido = talla_obtenido
def es_vacia(self):
return self.tallaRef == 0 and self.tallaObtenido == 0
def __add__(self, other):
if (self.posRef + self.tallaRef != other.posRef
or self.posObtenido + self.tallaObtenido != other.posObtenido):
return self, other
return Diferencia(self.posRef, self.tallaRef + other.tallaRef,
self.posObtenido, self.tallaObtenido + other.tallaObtenido)
def pretty(self, lin_ref, lin_obtenido):
sr = plural(self.tallaRef)
nr = nlineas(self.tallaRef)
so = plural(self.tallaObtenido)
no = nlineas(self.tallaObtenido)
lineas_ref = "".join([" - " + l for l in lin_ref[self.posRef - 1:self.posRef - 1 + self.tallaRef]])
lineas_obt = "".join(
[" + " + l for l in lin_obtenido[self.posObtenido - 1:self.posObtenido - 1 + self.tallaObtenido]])
if self.tallaRef != 0:
if self.tallaObtenido != 0:
r = "** %s cambiada%s; en la posición %d de la referencia pone:\n" % (nr, sr, self.posRef)
r += lineas_ref
r += "* y en la posición %d de la salida pone:\n" % self.posObtenido
r += lineas_obt
else:
r = ("** borrada%s %s en la posición %d de la referencia:\n" % (sr, nr, self.posRef))
r += lineas_ref
else:
if self.tallaObtenido != 0:
r = ("** %s inesperada%s en la posición %d de la salida:\n" %
(no, so, self.posObtenido))
r += lineas_obt
else:
r = "** diferencia vacía en %d y %d" % (self.posRef, self.posObtenido)
return r
def __str__(self):
return "Diferencia"
class ListaDiferencias:
"""Almacena una lista de diferencias de manera persistente"""
def __init__(self, c=None, a=None):
self.contenido = c
self.anterior = a
def es_vacia(self):
return self.contenido is None and self.anterior is None
def append(self, d):
"""Añade d, que es una diferencia, al final de la lista"""
if d.es_vacia():
return self
if self.es_vacia():
return ListaDiferencias(d, None)
d2 = self.contenido + d
if type(d2) == type(d):
return ListaDiferencias(d2, self.anterior)
else:
return ListaDiferencias(d, self)
def __iter__(self):
if self.anterior is not None:
for d in self.anterior:
yield d
if self.contenido is not None:
yield self.contenido
def muestra(l):
for puntos, difs in l:
print(" ", puntos, [str(d) for d in difs])
def compara_ficheros(titulo, f_ref, f_obtenido):
if f_ref is not None:
l_ref = f_ref.readlines()
else:
l_ref = []
if f_obtenido is not None:
l_obtenidas = f_obtenido.readlines()
else:
l_obtenidas = []
comparacion = compara_lineas(l_ref, l_obtenidas)
print(titulo, end=' ')
if comparacion.iguales():
print(colorea(VERDE, "correcta"))
else:
print(colorea(ROJO, "errores"))
print(comparacion.pretty(l_ref, l_obtenidas))
return comparacion.iguales()
def compara_lineas(l_referencia, l_obtenidas):
actual = [(0, ListaDiferencias())]
for pos_ref in range(len(l_referencia)):
pos_ref += 1
ld = actual[-1][1].append(Diferencia(pos_ref, 1, 1, 0))
actual.append((actual[-1][0] + 1, ld))
pos_obtenida = 0
for lObtenida in l_obtenidas:
anterior = actual
pos_obtenida += 1
ld = ListaDiferencias()
ld = ld.append(Diferencia(1, 0, 1, pos_obtenida))
actual = [(anterior[0][0] + 1, ld)]
pos_ref = 0
for lRef in l_referencia:
pos_ref += 1
ins = anterior[pos_ref][0] + 1
if lRef == lObtenida:
sust = anterior[pos_ref - 1][0]
else:
sust = anterior[pos_ref - 1][0] + 1
borr = actual[-1][0] + 1
puntos = min(ins, sust, borr)
if puntos == sust:
ld = anterior[pos_ref - 1][1]
if lRef == lObtenida:
diferencia = Diferencia(pos_ref, 0, pos_obtenida, 0)
else:
diferencia = Diferencia(pos_ref, 1, pos_obtenida, 1)
elif puntos == borr:
ld = actual[-1][1]
diferencia = Diferencia(pos_ref, 1, pos_obtenida, 0)
elif puntos == ins:
ld = anterior[pos_ref][1]
diferencia = Diferencia(pos_ref, 0, pos_obtenida, 1)
actual.append((puntos, ld.append(diferencia)))
if actual[-1][0] == 0:
return Comparacion(True, [])
else:
return Comparacion(False, actual[-1][1])
################################################################################
#
# Principal:
#
def analiza_sufijos(options):
c = options.sufijos.split(",")
if len(c) != 3:
error("La cadena pasada a --sufijos tiene que tener tres componentes separados por comas")
options.sufijoEntrada = c[0]
options.sufijoSalida = c[1]
options.sufijoError = c[2]
def main():
parser = OptionParser(usage="%prog [<opciones>] <programa> [ [<entrada>] <salida> [<error>] ] | {<directorio>} ]")
parser.add_option("-a", "--argumento", action="store_true", default=False,
help="el fichero de entrada se pasa como argumento al programa, sin efecto en el modo entrelazado. Si se usa con -A, el fichero de entrada es el último argumento.")
parser.add_option("-A", "--argumentos", type="string", default=None,
help="lista de argumentos que se pasan al programa, separados por blancos. Por defecto no se le pasa ninguno")
parser.add_option("-E", "--marcaError", type="string", default="@*",
help="separador de las líneas de error en modo entrelazado, por defecto: %default.")
parser.add_option("-e", "--entrelazado", action="store_true", default=False,
help="utilizar el modo entrelazado.")
parser.add_option("-g", "--genera", action="store_true", default=False,
help="generar la salida en el formato adecuado para entrelazado (implica -e).")
parser.add_option("-n", "--noColores", action="store_true", default=False,
help="no utilizar colores en la salida.")
parser.add_option("-S", "--separaCampos", type="string", default="@@",
help="separador de los campos en modo entrelazado, por defecto: %default.")
parser.add_option("-s", "--sufijos", type="string", default="i,o,e",
help="sufijos de los ficheros de entrada, salida y error, por defecto: %default.")
parser.add_option("-t", "--todas", action="store_true", default=False,
help="en modo entrelazado, muestra todas las líneas incluso si no hay diferencias respecto a lo esperado.")
parser.add_option("-x", "--sufijoEntrelazado", type="string", default="pr",
help="sufijo de los ficheros con pruebas entrelazadas, por defecto: %default.")
(options, args) = parser.parse_args()
if options.genera:
options.entrelazado = True
global noColores
noColores = options.noColores
if options.argumentos is None:
options.argumentos = []
else:
options.argumentos = options.argumentos.split()
analiza_sufijos(options)
if options.entrelazado:
prueba_entrelazado(options, args)
else:
prueba_simple(options, args)
if __name__ == "__main__":
main()
|
992,087 | a2a19e506116c81bb1a71e8ed93c851e59925a51 | from basegraph import graph as grafo
def check_if_is_spouse(person1, person2):
return person2 in grafo[person1]["spouse"]
def check_if_has_relationship(person1, person2):
return person2 in grafo[person1]["relationships"]
def check_if_is_child(person, parent):
return person in grafo[parent]["parent"]
def check_if_is_bastard_kid(person1, person2):
clause_is_child_of_spouse = False
for spouse in grafo[person2]["spouse"]:
try:
clause_is_child_of_spouse = check_if_is_child(person1, spouse)
if(clause_is_child_of_spouse == True):
break;
except KeyError:
continue
clause_is_child = check_if_is_child(person1, person2)
#print(clause_is_child_of_spouse)
print("Bastard: ",clause_is_child and not clause_is_child_of_spouse)
return clause_is_child and not clause_is_child_of_spouse
def check_if_is_lover_of_henryVIII(person):
clause_has_relationship = check_if_has_relationship("henryVIII", person)
clause_is_spouse = check_if_is_spouse("henryVIII", person)
print("Lover: ",clause_has_relationship and not clause_is_spouse)
return clause_has_relationship and not clause_is_spouse
def check_if_is_is_granchild_of_henryVII(person):
for child in grafo["henryVII"]["parent"]:
if check_if_is_child(person, child):
print("Grandchild: ",True)
return True
print("Grandchild: ",False)
def check_if_can_claim_throne(person):
if not check_if_is_child(person, "henryVIII"):
print(False)
return False
clause_is_legit_child = not check_if_is_bastard_kid(person, "henryVIII")
clause_is_male = False
if (grafo[person]["sex"] == 'male'):
clause_is_male = True
#print("H: ",grafo[person]["sex"])
print("Can claim throne: ",clause_is_legit_child and clause_is_male)
return clause_is_legit_child and clause_is_male
relations = {"bastard": check_if_is_bastard_kid,
"lover": check_if_is_lover_of_henryVIII,
"granchild": check_if_is_is_granchild_of_henryVII,
"claim": check_if_can_claim_throne
}
|
992,088 | 685072b991f6e89afaaaaf4655a77e72ecfa3e07 | #Example
# get bash version by subprocess
import subprocess
cmd = ["bash","--version"]
sp=subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE,stderr=subprocess.PIPE,universal_newlines=True)
rc=sp.wait()
out,err = sp.communicate()
if rc==0:
for each_line in out.splitlines():
if "version" in each_line and "release" in each_line:
ver=each_line.split()[3].split('(')[0]
print(f"Python Version is:- {ver}")
else:
print("Command was failed ad error is ",err)
|
992,089 | d654229deb54b9a21ce617fde714b4d445588299 | # coding=utf-8
# Copyright 2020 The Gin-Config Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gin import config
from gin.tf import external_configurables # pylint: disable=unused-import
import tensorflow as tf
# Necessary for AdagradaDAOptimizer test.
config.external_configurable(tf.compat.v1.train.get_global_step)
@config.configurable
def fake_train_model(learning_rate, optimizer):
global_step = tf.compat.v1.train.get_or_create_global_step()
lr = learning_rate(global_step=global_step)
opt = optimizer(learning_rate=lr)
return lr, opt
@config.configurable
def configurable(**kwargs):
return kwargs
class TFConfigTest(tf.test.TestCase):
def tearDown(self):
config.clear_config()
super(TFConfigTest, self).tearDown()
def testConfigureOptimizerAndLearningRate(self):
config_str = """
fake_train_model.learning_rate = @piecewise_constant
piecewise_constant.boundaries = [200000]
piecewise_constant.values = [0.01, 0.001]
fake_train_model.optimizer = @MomentumOptimizer
MomentumOptimizer.momentum = 0.95
"""
config.parse_config(config_str)
lr, opt = fake_train_model() # pylint: disable=no-value-for-parameter
self.assertIsInstance(opt, tf.compat.v1.train.MomentumOptimizer)
self.assertAlmostEqual(opt._momentum, 0.95)
global_step = tf.compat.v1.train.get_or_create_global_step()
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAlmostEqual(self.evaluate(lr), 0.01)
self.evaluate(global_step.assign(300000))
self.assertAlmostEqual(self.evaluate(lr), 0.001)
def testOptimizersWithDefaults(self):
optimizers = [
tf.compat.v1.train.GradientDescentOptimizer,
tf.compat.v1.train.AdadeltaOptimizer,
tf.compat.v1.train.AdagradOptimizer,
(tf.compat.v1.train.AdagradDAOptimizer, {
'global_step': '@get_global_step()'
}),
(tf.compat.v1.train.MomentumOptimizer, {
'momentum': 0.9
}),
tf.compat.v1.train.AdamOptimizer,
tf.compat.v1.train.FtrlOptimizer,
tf.compat.v1.train.ProximalGradientDescentOptimizer,
tf.compat.v1.train.ProximalAdagradOptimizer,
tf.compat.v1.train.RMSPropOptimizer,
]
constant_lr = lambda global_step: 0.01
for optimizer in optimizers:
extra_bindings = {}
if isinstance(optimizer, tuple):
optimizer, extra_bindings = optimizer
config.clear_config()
config_lines = ['fake_train_model.optimizer = @%s' % optimizer.__name__]
for param, val in extra_bindings.items():
config_lines.append('%s.%s = %s' % (optimizer.__name__, param, val))
config.parse_config(config_lines)
# pylint: disable=no-value-for-parameter
_, configed_optimizer = fake_train_model(constant_lr)
# pylint: enable=no-value-for-parameter
self.assertIsInstance(configed_optimizer, optimizer)
def testDtypes(self):
# Spot check a few.
config_str = """
# Test without tf prefix, but using the prefix is strongly recommended!
configurable.float32 = %float32
# Test with tf prefix.
configurable.string = %tf.string
configurable.qint8 = %tf.qint8
"""
config.parse_config(config_str)
vals = configurable()
self.assertIs(vals['float32'], tf.float32)
self.assertIs(vals['string'], tf.string)
self.assertIs(vals['qint8'], tf.qint8)
def testDynamicRegistrationImportAsGinError(self):
config_str = """
from __gin__ import dynamic_registration
import gin.tf.external_configurables
import __main__
__main__.configurable.arg = %gin.REQUIRED
"""
expected_msg = 'The `gin` symbol is reserved; cannot bind import statement '
with self.assertRaisesRegex(ValueError, expected_msg):
config.parse_config(config_str)
def testCompatibilityWithDynamicRegistration(self):
config_str = """
from __gin__ import dynamic_registration
from gin.tf import external_configurables
import __main__
__main__.configurable.arg = %tf.float32
"""
config.parse_config(config_str)
self.assertEqual(configurable(), {'arg': tf.float32})
if __name__ == '__main__':
tf.test.main()
|
992,090 | 1aa3b7cba489d71dff5f0cf1a89804e344edc463 | from django.contrib import admin
import models as core
class CandidateAdmin(admin.ModelAdmin):
fields = ['username', 'email', 'domain']
admin.site.register(core.Entry)
admin.site.register(core.Block)
admin.site.register(core.Domain)
admin.site.register(core.Candidate)
admin.site.register(core.CandidateEntry)
admin.site.register(core.CandidateTeq)
|
992,091 | 0cd00038134d59e857d2e4205519e777c8557b8e | import os
from dataclasses import dataclass, field
import pandas as pd
import s3
from utils import unpickle, make_pickle
def format_jira(jid: str) -> str:
"""
Transforms jira ID to keep same notation across the project.
:param jid: Jira ID (ex: SGDS-123, or OMICS-456_do_something)
"""
jid = jid.lower().strip()
jid = jid.split('_')[0]
j = "".join(jid.split('-'))
return j
def _build_s3_path(s3_basepath: str, jira_issue: str, loc_path: str, subfolder: str = None) -> str:
"""
Build an s3 path with optional subfolder
:param s3_basepath: Target dir on S3
:param jira_issue: Unformatted jira ID
:param loc_path: Local path of file
:param subfolder: Name of optional subfolder
"""
return os.path.join(s3_basepath, format_jira(jira_issue), subfolder, os.path.basename(loc_path))
@dataclass
class Catalog:
"""
Catalog maps jira issue IDs to s3 outputs to facilitate project management. A catalog belongs to
a specific repository, project, & S3 location. It is pickled and saved to its repository.
:param cat_path: path to the pickled catalog in the project repo
:param s3_basepath: S3 prefix path to the project files
:param contents: dict of DataSet objects
:param fresh: If True, create a new Catalog from scratch, if False, read in the pickle from cat_path
"""
# get repo paths
cat_path: str
s3_basepath: str
contents: dict = field(init=False)
fresh: bool = False
verbose: bool = False
def __post_init__(self):
if self.fresh:
self.create()
else:
try:
print(f"Unpickling {self.cat_path}. Exists? {os.path.exists(self.cat_path)}")
self.contents = unpickle(self.cat_path, verbose=self.verbose)
except Exception as e:
print(f'ERROR Reading in Catalog pickle from {self.cat_path}. {e}.'
f'Generating empty catalog.')
self.contents = {}
if self.verbose:
print(f'Initialized Catalog with {len(self.contents)} records.')
def __repr__(self):
return f'Catalog for {self.s3_basepath}: {len(self.contents)} records.'
def create(self):
"""
Create a new catalog from scratch. Crawls the s3_basepath to collect all DataSet objects.
"""
self.contents = {}
for jira_path in s3.ls(self.s3_basepath):
jira_issue = format_jira(jira_path.split('/')[-2])
if self.verbose:
print(f'Creating JIRA issue {jira_issue}...')
self.contents[jira_issue] = {}
self._update(s3.ls(jira_path), jira_issue)
self._save()
def update(self, jira_issue: str = None, format_jid: bool = True, arrays: bool = False) -> None:
"""
Crawls S3 to update catalog file.
:param jira_issue: specfic Jira issue to update. If None, look for Jira IDs that exist on S3
but are not present in the Catalog.
:param format_jid: whether to format `jira_issue`. Useful for files/paths not following
Catalog nomenclature
:param arrays: whether to create DataSet arrays or force individual files
"""
if jira_issue:
jira_issue = format_jira(jira_issue) if format_jid else jira_issue
# Look for the s3 path corresponding to this jira item
jira_path = s3.ls(self.s3_basepath, pattern=jira_issue)
if len(jira_path) != 1:
raise ValueError(f"Found {len(jira_path)} prefixes matching {jira_issue}.")
else:
jira_path = jira_path[0]
s3_list = s3.ls(jira_path)
self.contents[jira_issue] = {}
self._update(s3_list, jira_issue, arrays=arrays)
else: # update all not yet in Catalog
print('Scanning for new records...')
for jira_path in s3.ls(self.s3_basepath):
jira_issue = format_jira(jira_path.split('/')[-2])
if jira_issue not in self.contents:
print(f'Creating JIRA issue {jira_issue}...')
s3_list = s3.ls(jira_path)
self.contents[jira_issue] = {}
self._update(s3_list, jira_issue, arrays=arrays)
self._save()
def search(self, **kwargs: dict) -> list:
results = []
for jira, cont in self.contents.items():
results.extend([i for i in cont if all(kwargs[k] in i[k] for k in kwargs)])
if results:
print(f'Found {len(results)} results for search: {str(kwargs)}')
return results
def _save(self) -> None:
make_pickle(self.contents, self.cat_path, verbose=self.verbose)
if self.verbose:
print(f'Catalog was saved to {self.cat_path}')
def _update(self, s3_list: list, jira_issue: str, arrays: bool = True):
# add any subfolder contents
folders = [p for p in s3_list if s3.is_prefix(p)]
for prefix in folders:
self._gen_array_records(s3.ls(prefix), jira_issue)
# add any lone files
file_list = [p for p in s3_list if p not in folders]
if arrays and len(file_list) > 10:
self._gen_array_records(file_list, jira_issue)
else:
for fp in file_list:
name, ext = os.path.basename(fp).rsplit('.', 1)
dataset = DataSet(jira_issue, fp, format=ext.upper(), dtype='file')
# Look for an existing dataset or set of datasets with this name
if name in self.contents[jira_issue]:
ds = self.contents[jira_issue].pop(name)
if type(ds) is dict:
ds[ext] = dataset
dataset = ds
else:
dataset = {
ds.format.lower(): ds,
ext.lower(): dataset
}
# Update contents
self.contents[jira_issue][name] = dataset
def _gen_array_records(self, array_list, jira_issue):
bpath = os.path.dirname(array_list[0])
# define arrays by file extensions, skip subfolders
ext = set([c.split('.')[-1] for c in array_list if not c.endswith('/')])
for e in ext:
# make a distinct array for each file format/extension, even if they share the
# same subfolder
if len(ext) > 1:
name = '_'.join([os.path.basename(bpath), e.upper(), 'array'])
else:
name = '_'.join([os.path.basename(bpath), 'array'])
r = [os.path.basename(rec) for rec in array_list if rec.endswith('.' + e)]
if len(r) == 0: # single file no extension
meta = {'format': 'NA', 'dtype': 'file'}
fp = e
elif len(r) == 1: # single file
fp = os.path.join(bpath, r[0])
meta = {'format': e.upper(), 'dtype': 'file'}
else:
# make a dummy path to show regex pattern of array files
repr_path = os.path.join(bpath, self._gen_repr_path(r))
fp = bpath + '/'
meta = {
'count': len(r),
'format': e.upper(),
'dtype': 'array',
'regex': repr_path,
'example': os.path.join(bpath, r[0])
}
dataset = DataSet(jira_issue, fp, **meta)
self.contents[jira_issue][name] = dataset
@staticmethod
def _gen_repr_path(arr):
temp = arr[0]
root = ''
for char in temp:
if all(a.startswith(root + char) for a in arr):
root += char
else:
break
end = ''
for char in temp[::-1]:
if all(v.endswith(char + end) for v in arr):
end = char + end
else:
break
return root + '*' + end
@dataclass
class DataSet:
"""
DataSet object facilitates read/writes of analysis outputs.
:param jira_issue: The jid this output relates to
:param s3_path: S3 location of this output
:param format: file format
:param count: For arrays, number of files in array
:param dtype: array or file
:param regex: path regex for array DataSets
:param example: example path for an array DataSet member
"""
jira_issue: str
s3_path: str
format: str = None
count: int = None
dtype: str = None
regex: str = None
example: str = None
def __repr__(self):
repr = f'DataSet object from {self.jira_issue.upper()}:'
for attr, val in self.__dict__.items():
if val and attr != 'jira_issue':
repr += f'\n\t- {attr}: {val}'
return repr
def __post_init__(self):
self.jira_issue = format_jira(self.jira_issue)
def _get_object_path(self, key):
if self.dtype == 'array': # build s3 path using key
if not key or not self.format:
raise ValueError('Unable to download DataSet array member without key or format.')
return os.path.join(self.s3_path, f'{key}.{self.format.lower()}')
return self.s3_path
@property
def keys(self):
"""Get existing keys for an array DataSet, which can be used in `read` and
`download` functions to access an array member"""
if self.dtype != 'array':
raise TypeError('Property `keys` only exists for DataSet arrays')
return [os.path.basename(p).split('.')[0] for p in
s3.ls(self.s3_path, suffix=self.format.lower())]
def download(self, key: str = None, tmp: str = '/tmp/') -> str:
"""
Download data from S3 to temp folder, return local path
:param key: For array DataSet objects, specifies name of array member to download.
:param tmp: Local dir files are written to
"""
path = self._get_object_path(key)
return s3.copy(path, dest=tmp, verbose=False)
def read(self, idx: (int, str) = 0, key: str = None, **kwargs) -> pd.DataFrame:
"""
Stream data from S3. Uses pandas.read method if file is a CSV
:param key: For array DataSet objects, specifies name of array member to stream.
:param idx: `index_col` kwarg for pandas.read method
"""
path = self._get_object_path(key)
if self.format == 'CSV':
return pd.read_csv(path, index_col=idx, **kwargs)
return s3.read(path, **kwargs)
@classmethod
def from_local_file(cls, loc_path: str, jira_issue: str, s3_basepath: str,
subfolder: str = None):
"""Create a DataSet object from a local path"""
s3_path = _build_s3_path(s3_basepath, jira_issue, loc_path, subfolder=subfolder)
print(s3_path)
s3.copy(loc_path, dest=s3_path, verbose=False)
return cls(jira_issue, s3_path, dtype='file', format=loc_path.rsplit('.', 1)[1].upper())
@classmethod
def from_df(cls, dataframe: pd.DataFrame, name: str, jira_issue: str, s3_basepath: str,
subfolder: str = None, tmp: str = '/tmp/'):
"""Create a DataSet object from a pandas data frame"""
s3_path = _build_s3_path(s3_basepath, jira_issue, name + '.csv', subfolder=subfolder)
print(s3_path)
# dataframe.to_csv(s3_path) # this fails silently when using credentials
# (as opposed to with an instance role). Filed LUCHA-1780
dataframe.to_csv(f'{tmp}{name}.csv')
s3.copy(f'{tmp}{name}.csv', dest=s3_path, verbose=False)
return cls(jira_issue, s3_path, format='CSV', dtype='file')
|
992,092 | 5167f44f8eb8d6fe3181d78321a1a981f2bc6f98 | #!/usr/bin/python
import random
import os
import numpy as np
import time
import sys
def Get_Nid(name):
if name == "Baltimore Orioles":
return 0
elif name == "Boston Red Sox":
return 1
elif name == "Chicago White Sox":
return 2
elif name == "Cleveland Indians":
return 3
elif name == "Detroit Tigers":
return 4
elif name == "Houston Astros":
return 5
elif name == "Kansas City Royals":
return 6
elif name == "Los Angeles Angels":
return 7
elif name == "Minnesota Twins":
return 8
elif name == "New York Yankees":
return 9
elif name == "Oakland Athletics":
return 10
elif name == "Seattle Mariners":
return 11
elif name == "Tampa Bay Rays":
return 12
elif name == "Texas Rangers":
return 13
elif name == "Toronto Blue Jays":
return 14
elif name == "Arizona Diamondbacks":
return 15
elif name == "Atlanta Braves":
return 16
elif name == "Chicago Cubs":
return 17
elif name == "Cincinnati Reds":
return 18
elif name == "Colorado Rockies":
return 19
elif name == "Los Angeles Dodgers":
return 20
elif name == "Miami Marlins":
return 21
elif name == "Milwaukee Brewers":
return 22
elif name == "New York Mets":
return 23
elif name == "Philadelphia Phillies":
return 24
elif name == "Pittsburgh Pirates":
return 25
elif name == "San Diego Padres":
return 26
elif name == "San Francisco Giants":
return 27
elif name == "St.Louis Cardinals":
return 28
elif name == "Washington Nationals":
return 29
else:
return -1
class Team3():
Name=""
odds_record_interval = 10
record_const = 10
score_mid = 3
score_tight = 1
odds_mid = 1.92
odds_low = 1.60
odds_high= 2.55
s_n = 0
s_nn = ""
game_list = []
nid = 0
def __init__(self,name):
self.Name = name
self.score_w_w_h = []
self.score_w_w_a = []
self.score_w_t_h = []
self.score_w_t_a = []
self.score_w_m_h = []
self.score_w_m_a = []
self.score_l_w_h = []
self.score_l_w_a = []
self.score_l_t_h = []
self.score_l_t_a = []
self.score_l_m_h = []
self.score_l_m_a = []
self.win_j_h = []
self.win_y_h = []
self.lose_j_h = []
self.lose_y_h = []
self.win_j_a = []
self.win_y_a = []
self.lose_j_a = []
self.lose_y_a = []
self.odds_record = []
self.score_home_hist = []
self.score_away_hist = []
self.odds_home_hist = []
self.odds_away_hist = []
self.game_list = []
self.chromo = []
self.chromo_odds = []
self.chromo_opp = []
self.chromo_info = []
self.nid = 0
def initial(self):
self.score_w_w_h = []
self.score_w_w_a = []
self.score_w_t_h = []
self.score_w_t_a = []
self.score_w_m_h = []
self.score_w_m_a = []
self.score_l_w_h = []
self.score_l_w_a = []
self.score_l_t_h = []
self.score_l_t_a = []
self.score_l_m_h = []
self.score_l_m_a = []
self.win_j_h = []
self.win_y_h = []
self.lose_j_h = []
self.lose_y_h = []
self.win_j_a = []
self.win_y_a = []
self.lose_j_a = []
self.lose_y_a = []
self.odds_record = []
self.score_home_hist = []
self.score_away_hist = []
self.odds_home_hist = []
self.odds_away_hist = []
self.s_n = 0
self.s_nn = ""
def set_chromo_info(self,game_info):
self.chromo_info.append(game_info)
def get_chromo_info(self):
return self.chromo_info
def set_chromo_opp(self,nid,game_num):
self.chromo_opp.append(game_num*100+nid)
def get_chromo_opp(self):
return self.chromo_opp
def get_game_num(self):
return len(self.chromo)-1
def set_chromo(self,game,over_under,homeaway,mode):
ou,over,under,score_mid_h,score_mid_a = over_under
score_h,score_a,odds_h,odds_a = game
if mode == 0:
odds_cali = 0.0
if score_h > score_a :
if score_h-score_a > (score_mid_h-score_mid_a):
self.chromo.append('A')
self.chromo_odds.append(1.0)
else:
self.chromo.append('G')
self.chromo_odds.append(2.0)
else:
if score_a-score_h > (score_mid_a-score_mid_h):
self.chromo.append('C')
self.chromo_odds.append(3.0)
else:
self.chromo.append('T')
self.chromo_odds.append(4.0)
elif mode == 1:
odds_cali = 0.0
if score_h-1 > score_a :
if score_a-score_mid_a < 1:
self.chromo.append('A')
self.chromo_odds.append(1.0)
else:
self.chromo.append('G')
self.chromo_odds.append(2.0)
else:
if score_a-score_h < 2:
self.chromo.append('C')
self.chromo_odds.append(3.0)
else:
self.chromo.append('T')
self.chromo_odds.append(4.0)
elif mode == 2:
odds_cali = 0.0
if score_h > score_a :
if score_a - score_mid_a < 1:
self.chromo.append('A')
self.chromo_odds.append(1.0)
else:
self.chromo.append('G')
self.chromo_odds.append(2.0)
else:
if score_h - score_mid_h < 2:
self.chromo.append('C')
self.chromo_odds.append(3.0)
else:
self.chromo.append('T')
self.chromo_odds.append(4.0)
elif mode == 3:
self.chromo.append('A')
if len(self.chromo_odds)>0:
last_value = self.chromo_odds[len(self.chromo_odds)-1]
else:
last_value = 0
if homeaway == 1:
if score_h > score_a:
self.chromo_odds.append((odds_a+odds_h)/odds_a/2.0+last_value)
else:
self.chromo_odds.append(-(odds_a+odds_h)/odds_h/2.0+last_value)
else:
if score_a > score_h:
self.chromo_odds.append((odds_a+odds_h)/odds_h/2.0+last_value)
else:
self.chromo_odds.append(-(odds_a+odds_h)/odds_a/2.0+last_value)
elif mode == 4:
if len(self.chromo_odds) > 0:
last_value = self.chromo_odds[len(self.chromo_odds)-1]
else:
last_value = 0
if homeaway == 1:
if score_h > score_a:
self.chromo_odds.append((odds_a+odds_h)/odds_a+last_value)
if odds_h < odds_a:
self.chromo.append('A')
else:
self.chromo.append('G')
else:
self.chromo_odds.append(-(odds_a+odds_h)/odds_h+last_value)
if odds_h > odds_a:
self.chromo.append('C')
else:
self.chromo.append('T')
else:
if score_a > score_h:
self.chromo_odds.append((odds_a+odds_h)/odds_h+last_value)
if odds_a < odds_h:
self.chromo.append('C')
else:
self.chromo.append('T')
else:
self.chromo_odds.append(-(odds_a+odds_h)/odds_a+last_value)
if odds_a > odds_h:
self.chromo.append('A')
else:
self.chromo.append('G')
elif mode == 5:
self.chromo.append('A')
if len(self.chromo_odds)>0:
last_value = self.chromo_odds[len(self.chromo_odds)-1]
else:
last_value = 0
if homeaway == 1:
self.chromo_odds.append((score_h-score_a)+last_value)
else:
self.chromo_odds.append((score_a-score_h)+last_value)
elif mode == 6:
if len(self.chromo_odds) > 0:
last_value = self.chromo_odds[len(self.chromo_odds)-1]
else:
last_value = 0
if score_h > score_a:
if odds_h - 0.15 < odds_a:
self.chromo.append('A')
else:
self.chromo.append('G')
else:
if odds_a < odds_h - 0.15:
self.chromo.append('C')
else:
self.chromo.append('T')
if homeaway == 1:
if score_h > score_a:
#self.chromo_odds.append((odds_a+odds_h)/odds_a+last_value)
if odds_h < odds_a:
self.chromo_odds.append(1.0+last_value)
else:
self.chromo_odds.append(2.0+last_value)
else:
if odds_h < odds_a:
self.chromo_odds.append(-2.0+last_value)
else:
self.chromo_odds.append(-1.0+last_value)
#self.chromo_odds.append(-(odds_a+odds_h)/odds_h+last_value)
else:
if score_a > score_h:
if odds_h < odds_a:
self.chromo_odds.append(2.0+last_value)
else:
self.chromo_odds.append(1.0+last_value)
#self.chromo_odds.append((odds_a+odds_h)/odds_h+last_value)
else:
if odds_h < odds_a:
self.chromo_odds.append(-1.0+last_value)
else:
self.chromo_odds.append(-2.0+last_value)
#self.chromo_odds.append(-(odds_a+odds_h)/odds_a+last_value)
elif mode == 7:
if len(self.chromo_odds) > 0:
last_value = self.chromo_odds[len(self.chromo_odds)-1]
else:
last_value = 0
score_diff = abs(score_h-score_a)
if score_diff > 3:
score_diff = 2.0
elif score_diff > 1:
score_diff = 1.5
else:
score_diff = 1.0
if homeaway == 1:
info = score_h*1000000
info += score_a*10000
info += score_mid_h*100
info += score_mid_a*1
self.chromo_info.append(int(info))
if score_h > score_a:
self.chromo_odds.append(score_diff*(odds_a+odds_h)/odds_a+last_value)
if odds_h < odds_a:
self.chromo.append('A')
else:
self.chromo.append('G')
else:
self.chromo_odds.append(-score_diff*(odds_a+odds_h)/odds_h+last_value)
if odds_h > odds_a:
self.chromo.append('C')
else:
self.chromo.append('T')
else:
info = score_a*1000000
info += score_h*10000
info += score_mid_a*100
info += score_mid_h*1
self.chromo_info.append(int(info))
if score_a > score_h:
self.chromo_odds.append(score_diff*(odds_a+odds_h)/odds_h+last_value)
if odds_a < odds_h:
self.chromo.append('A')
else:
self.chromo.append('G')
else:
self.chromo_odds.append(-score_diff*(odds_a+odds_h)/odds_a+last_value)
if odds_a > odds_h:
self.chromo.append('C')
else:
self.chromo.append('T')
elif mode == 8:
if len(self.chromo_odds) > 0:
last_value = self.chromo_odds[len(self.chromo_odds)-1]
else:
last_value = 0
score_diff = abs(score_h-score_a)
if score_diff > 3:
score_diff = 3.0
elif score_diff > 1:
score_diff = 2.0
else:
score_diff = 1.0
if homeaway == 1:
info = score_h*1000000
info += score_a*10000
info += score_mid_h*100
info += score_mid_a*1
self.chromo_info.append(int(info))
if odds_h + 0.35 < odds_a:
amp = 1.0
elif abs(odds_h - odds_a) < 0.35:
amp = 2.0
else:
amp = 4.0
if score_h > score_a:
# self.chromo_odds.append(amp*(odds_a+odds_h)/odds_a+last_value)
self.chromo_odds.append(amp+last_value)
if odds_h < odds_a:
self.chromo.append('A')
else:
self.chromo.append('G')
else:
# self.chromo_odds.append(-amp*(odds_a+odds_h)/odds_h+last_value)
self.chromo_odds.append(-4.0/amp+last_value)
if odds_h > odds_a:
self.chromo.append('C')
else:
self.chromo.append('T')
else:
info = score_a*1000000
info += score_h*10000
info += score_mid_a*100
info += score_mid_h*1
self.chromo_info.append(int(info))
if odds_a + 0.35 < odds_h:
amp = 1.0
elif abs(odds_a - odds_h) < 0.35:
amp = 2.0
else:
amp = 4.0
if score_a > score_h:
# self.chromo_odds.append(amp*(odds_a+odds_h)/odds_h+last_value)
self.chromo_odds.append(amp+last_value)
if odds_a < odds_h:
self.chromo.append('A')
else:
self.chromo.append('G')
else:
self.chromo_odds.append(-4.0/amp+last_value)
# self.chromo_odds.append(-amp*(odds_a+odds_h)/odds_a+last_value)
if odds_a > odds_h:
self.chromo.append('C')
else:
self.chromo.append('T')
elif mode == 9:
if homeaway == 1:
info = score_h*1000000
info += score_a*10000
info += score_mid_h*100
info += score_mid_a*1
self.chromo_info.append(int(info))
if score_h > score_a:
if odds_h < odds_a:
self.chromo.append('A')
else:
self.chromo.append('G')
else:
if odds_h > odds_a:
self.chromo.append('C')
else:
self.chromo.append('T')
self.chromo_odds.append(odds_h)
else:
info = score_a*1000000
info += score_h*10000
info += score_mid_a*100
info += score_mid_h*1
self.chromo_info.append(int(info))
if score_a > score_h:
if odds_a < odds_h:
self.chromo.append('A')
else:
self.chromo.append('G')
else:
if odds_a > odds_h:
self.chromo.append('C')
else:
self.chromo.append('T')
self.chromo_odds.append(odds_a)
elif mode == 10:
if homeaway == 1:
info = score_h*1000000
info += score_a*10000
info += score_mid_h*100
info += score_mid_a*1
self.chromo_info.append(int(info))
if score_h-1 > score_a:
if odds_h < odds_a:
self.chromo.append('A')
else:
self.chromo.append('G')
else:
if odds_h > odds_a:
self.chromo.append('C')
else:
self.chromo.append('T')
self.chromo_odds.append(odds_h*1.40)
else:
info = score_a*1000000
info += score_h*10000
info += score_mid_a*100
info += score_mid_h*1
self.chromo_info.append(int(info))
if score_a-1 > score_h:
if odds_a < odds_h:
self.chromo.append('A')
else:
self.chromo.append('G')
else:
if odds_a > odds_h:
self.chromo.append('C')
else:
self.chromo.append('T')
self.chromo_odds.append(odds_a*1.40)
else:
return -1
def get_chromo(self):
return self.chromo
def get_chromo_odds(self):
return self.chromo_odds
def set_Game(self,game):
self.game_list.insert(0,game)
return None
def get_Game(self,kmer):
if not self.game_list:
return None
else:
if kmer == 2:
return self.game_list.pop()
elif kmer == 3:
if len(self.game_list) < 2:
return None
else:
ret = [self.game_list[0],self.game_list[1]]
self.game_list.pop()
return ret
elif kmer == 4:
if len(self.game_list) < 3:
return None
else:
ret = [self.game_list[0],self.game_list[1],self.game_list[2]]
self.game_list.pop()
return ret
def set_Series(self,team):
if self.s_nn == team:
self.s_n += 1
else:
self.s_nn = team
self.s_n = 1
def get_Series(self,team):
if self.s_nn == team:
return self.s_n
else:
return 0
def update_diff_10_a(self,hometeam,score_w_w,score_w_m,score_w_t,score_l_w,score_l_m,score_l_t):
self.score_away_hist.append(hometeam)
self.score_w_w_a.append(score_w_w)
self.score_w_t_a.append(score_w_t)
self.score_l_w_a.append(score_l_w)
self.score_l_t_a.append(score_l_t)
self.score_w_m_a.append(score_w_m)
self.score_l_m_a.append(score_l_m)
if len(self.score_away_hist) > self.record_const:
self.score_away_hist.remove(self.score_away_hist[0])
if len(self.score_w_w_a) > self.record_const:
self.score_w_w_a.remove(self.score_w_w_a[0])
if len(self.score_w_t_a) > self.record_const:
self.score_w_t_a.remove(self.score_w_t_a[0])
if len(self.score_l_w_a) > self.record_const:
self.score_l_w_a.remove(self.score_l_w_a[0])
if len(self.score_l_t_a) > self.record_const:
self.score_l_t_a.remove(self.score_l_t_a[0])
if len(self.score_w_m_a) > self.record_const:
self.score_w_m_a.remove(self.score_w_m_a[0])
if len(self.score_l_m_a) > self.record_const:
self.score_l_m_a.remove(self.score_l_m_a[0])
def update_diff_10_h(self,awayteam,score_w_w,score_w_m,score_w_t,score_l_w,score_l_m,score_l_t):
self.score_home_hist.append(awayteam)
self.score_w_w_h.append(score_w_w)
self.score_w_t_h.append(score_w_t)
self.score_l_w_h.append(score_l_w)
self.score_l_t_h.append(score_l_t)
self.score_w_m_h.append(score_w_m)
self.score_l_m_h.append(score_l_m)
if len(self.score_home_hist) > self.record_const:
self.score_home_hist.remove(self.score_home_hist[0])
if len(self.score_w_w_h) > self.record_const:
self.score_w_w_h.remove(self.score_w_w_h[0])
if len(self.score_w_t_h) > self.record_const:
self.score_w_t_h.remove(self.score_w_t_h[0])
if len(self.score_l_w_h) > self.record_const:
self.score_l_w_h.remove(self.score_l_w_h[0])
if len(self.score_l_t_h) > self.record_const:
self.score_l_t_h.remove(self.score_l_t_h[0])
if len(self.score_w_m_h) > self.record_const:
self.score_w_m_h.remove(self.score_w_m_h[0])
if len(self.score_l_m_h) > self.record_const:
self.score_l_m_h.remove(self.score_l_m_h[0])
def update_score_10_a(self,hometeam,win_j_10,win_y_10,lose_j_10,lose_y_10):
self.odds_away_hist.append(hometeam)
self.win_j_a.append(win_j_10)
self.win_y_a.append(win_y_10)
self.lose_j_a.append(lose_j_10)
self.lose_y_a.append(lose_y_10)
if len(self.odds_away_hist)>self.record_const:
self.odds_away_hist.remove(self.odds_away_hist[0])
if len(self.win_j_a) > self.record_const:
self.win_j_a.remove(self.win_j_a[0])
if len(self.win_y_a) > self.record_const:
self.win_y_a.remove(self.win_y_a[0])
if len(self.lose_j_a) > self.record_const:
self.lose_j_a.remove(self.lose_j_a[0])
if len(self.lose_y_a) > self.record_const:
self.lose_y_a.remove(self.lose_y_a[0])
def update_score_10_h(self,awayteam,win_j_10,win_y_10,lose_j_10,lose_y_10):
self.odds_home_hist.append(awayteam)
self.win_j_h.append(win_j_10)
self.win_y_h.append(win_y_10)
self.lose_j_h.append(lose_j_10)
self.lose_y_h.append(lose_y_10)
if len(self.odds_home_hist)>self.record_const:
self.odds_home_hist.remove(self.odds_home_hist[0])
if len(self.win_j_h) > self.record_const:
self.win_j_h.remove(self.win_j_h[0])
if len(self.win_y_h) > self.record_const:
self.win_y_h.remove(self.win_y_h[0])
if len(self.lose_j_h) > self.record_const:
self.lose_j_h.remove(self.lose_j_h[0])
if len(self.lose_y_h) > self.record_const:
self.lose_y_h.remove(self.lose_y_h[0])
def update_score(self,hometeam,awayteam,score_diff,odds,home_away):
if(home_away):
if score_diff > 0:
if odds <= self.odds_mid:
self.update_score_10_h(awayteam,1.0*odds/self.record_const,0,0,0)
elif odds > self.odds_mid:
self.update_score_10_h(awayteam,0,2.0*odds/self.record_const,0,0)
if score_diff > self.score_mid:
self.update_diff_10_h(awayteam,3.0*odds/self.record_const,0,0,0,0,0)
elif score_diff == self.score_tight:
self.update_diff_10_h(awayteam,0,0,2.0*odds/self.record_const,0,0,0)
else:
self.update_diff_10_h(awayteam,0,1.0*odds/self.record_const,0,0,0,0)
elif score_diff < 0:
if odds >= self.odds_mid:
self.update_score_10_h(awayteam,0,0,-1.0/odds/self.record_const,0)
elif odds < self.odds_mid:
self.update_score_10_h(awayteam,0,0,0,-2.0/odds/self.record_const)
if -1*score_diff > self.score_mid:
self.update_diff_10_h(awayteam,0,0,0,-3.0/odds/self.record_const,0,0)
elif -1*score_diff == self.score_tight:
self.update_diff_10_h(awayteam,0,0,0,0,0,-2.0/odds/self.record_const)
else:
self.update_diff_10_h(awayteam,0,0,0,0,-1.0/odds/self.record_const,0)
else:
if score_diff < 0:
if odds <= self.odds_mid:
self.update_score_10_a(hometeam,1.0*odds/self.record_const,0,0,0)
elif odds > self.odds_mid:
self.update_score_10_a(hometeam,0,2.0*odds/self.record_const,0,0)
if -1*score_diff > self.score_mid:
self.update_diff_10_a(hometeam,3.0*odds/self.record_const,0,0,0,0,0)
elif -1*score_diff == self.score_tight:
self.update_diff_10_a(hometeam,0,0,2.0*odds/self.record_const,0,0,0)
else:
self.update_diff_10_a(hometeam,0,1.0*odds/self.record_const,0,0,0,0)
elif score_diff > 0:
if odds >= self.odds_mid:
self.update_score_10_a(hometeam,0,0,-1.0/odds/self.record_const,0)
elif odds < self.odds_mid:
self.update_score_10_a(hometeam,0,0,0,-2.0/odds/self.record_const)
if score_diff > self.score_mid:
self.update_diff_10_a(hometeam,0,0,0,-3.0/odds/self.record_const,0,0)
elif score_diff == self.score_tight:
self.update_diff_10_a(hometeam,0,0,0,0,0,-2.0/odds/self.record_const)
else:
self.update_diff_10_a(hometeam,0,0,0,0,-1.0/odds/self.record_const,0)
def get_score_home_hist(self,awayteam):
trg = 0
cnt = 0
cnt_a = 0
res = [0.0,0.0,0.0,0.0,0.0,0.0]
if len(self.score_home_hist) == 0:
return [1.0,1.0,1.0,1.0,1.0,1.0]
for ii in reversed(self.score_home_hist):
cnt += 1
if ii != awayteam:
if trg == 0:
leng = len(self.score_home_hist)
res[0] = sum(self.score_w_w_h)/leng
res[1] = sum(self.score_w_t_h)/leng
res[2] = sum(self.score_w_m_h)/leng
res[3] = sum(self.score_l_w_h)/leng
res[4] = sum(self.score_l_t_h)/leng
res[5] = sum(self.score_l_m_h)/leng
return res
else:
dom = (len(self.score_home_hist)-cnt_a)*1.0/len(self.score_home_hist)
res[0] += self.score_w_w_h[len(self.score_home_hist)-cnt]/dom
res[1] += self.score_w_t_h[len(self.score_home_hist)-cnt]/dom
res[2] += self.score_w_m_h[len(self.score_home_hist)-cnt]/dom
res[3] += self.score_l_w_h[len(self.score_home_hist)-cnt]/dom
res[4] += self.score_l_t_h[len(self.score_home_hist)-cnt]/dom
res[5] += self.score_l_m_h[len(self.score_home_hist)-cnt]/dom
else:
cnt_a += 1
trg = 1
res[0] += self.score_w_w_h[len(self.score_home_hist)-cnt]
res[1] += self.score_w_t_h[len(self.score_home_hist)-cnt]
res[2] += self.score_w_m_h[len(self.score_home_hist)-cnt]
res[3] += self.score_l_w_h[len(self.score_home_hist)-cnt]
res[4] += self.score_l_t_h[len(self.score_home_hist)-cnt]
res[5] += self.score_l_m_h[len(self.score_home_hist)-cnt]
return res
def get_score_away_hist(self,hometeam):
trg = 0
cnt = 0
cnt_a = 0
res = [0.0,0.0,0.0,0.0,0.0,0.0]
if len(self.score_away_hist) == 0:
return [1.0,1.0,1.0,1.0,1.0,1.0]
for ii in reversed(self.score_away_hist):
cnt += 1
if ii != hometeam:
if trg == 0:
leng = len(self.score_away_hist)
res[0] = sum(self.score_w_w_a)/leng
res[1] = sum(self.score_w_t_a)/leng
res[2] = sum(self.score_w_m_a)/leng
res[3] = sum(self.score_l_w_a)/leng
res[4] = sum(self.score_l_t_a)/leng
res[5] = sum(self.score_l_m_a)/leng
return res
else:
dom = (len(self.score_away_hist)-cnt_a)*1.0/len(self.score_away_hist)
res[0] += self.score_w_w_a[len(self.score_away_hist)-cnt]/dom
res[1] += self.score_w_t_a[len(self.score_away_hist)-cnt]/dom
res[2] += self.score_w_m_a[len(self.score_away_hist)-cnt]/dom
res[3] += self.score_l_w_a[len(self.score_away_hist)-cnt]/dom
res[4] += self.score_l_t_a[len(self.score_away_hist)-cnt]/dom
res[5] += self.score_l_m_a[len(self.score_away_hist)-cnt]/dom
else:
cnt_a += 1
trg = 1
res[0] += self.score_w_w_a[len(self.score_away_hist)-cnt]
res[1] += self.score_w_t_a[len(self.score_away_hist)-cnt]
res[2] += self.score_w_m_a[len(self.score_away_hist)-cnt]
res[3] += self.score_l_w_a[len(self.score_away_hist)-cnt]
res[4] += self.score_l_t_a[len(self.score_away_hist)-cnt]
res[5] += self.score_l_m_a[len(self.score_away_hist)-cnt]
return res
def get_odds_home_hist(self,awayteam):
trg = 0
cnt = 0
cnt_a = 0
res = [0.0,0.0,0.0,0.0]
if len(self.odds_home_hist) == 0:
return [1.0,1.0,1.0,1.0]
for ii in reversed(self.odds_home_hist):
cnt += 1
if ii != awayteam:
if trg == 0:
leng = len(self.odds_home_hist)
res[0] = sum(self.win_j_h)/leng
res[1] = sum(self.win_y_h)/leng
res[2] = sum(self.lose_j_h)/leng
res[3] = sum(self.lose_y_h)/leng
return res
else:
dom = (len(self.odds_home_hist)-cnt_a)*1.0/len(self.odds_home_hist)
res[0] += self.win_j_h[len(self.odds_home_hist)-cnt]/dom
res[1] += self.win_y_h[len(self.odds_home_hist)-cnt]/dom
res[2] += self.lose_j_h[len(self.odds_home_hist)-cnt]/dom
res[3] += self.lose_y_h[len(self.odds_home_hist)-cnt]/dom
else:
cnt_a += 1
trg = 1
res[0] += self.win_j_h[len(self.odds_home_hist)-cnt]
res[1] += self.win_y_h[len(self.odds_home_hist)-cnt]
res[2] += self.lose_j_h[len(self.odds_home_hist)-cnt]
res[3] += self.lose_y_h[len(self.odds_home_hist)-cnt]
return res
def get_odds_away_hist(self,hometeam):
trg = 0
cnt = 0
cnt_a = 0
res = [0.0,0.0,0.0,0.0]
if len(self.odds_away_hist) == 0:
return [1.0,1.0,1.0,1.0]
for ii in reversed(self.odds_away_hist):
cnt += 1
if ii != hometeam:
if trg == 0:
leng = len(self.odds_away_hist)
res[0] = sum(self.win_j_a)/leng
res[1] = sum(self.win_y_a)/leng
res[2] = sum(self.lose_j_a)/leng
res[3] = sum(self.lose_y_a)/leng
return res
else:
# dom = 1.0/len(self.odds_away_hist)
dom = (len(self.odds_away_hist)-cnt_a)*1.0/len(self.odds_away_hist)
res[0] += self.win_j_a[len(self.odds_away_hist)-cnt]/dom
res[1] += self.win_y_a[len(self.odds_away_hist)-cnt]/dom
res[2] += self.lose_j_a[len(self.odds_away_hist)-cnt]/dom
res[3] += self.lose_y_a[len(self.odds_away_hist)-cnt]/dom
else:
cnt_a += 1
trg = 1
res[0] += self.win_j_a[len(self.odds_away_hist)-cnt]
res[1] += self.win_y_a[len(self.odds_away_hist)-cnt]
res[2] += self.lose_j_a[len(self.odds_away_hist)-cnt]
res[3] += self.lose_y_a[len(self.odds_away_hist)-cnt]
return res
def get_Name(self):
return self.Name
class Team2():
Name=""
odds_record_interval = 10
record_const = 10
score_mid = 3
score_tight = 1
odds_mid = 1.92
odds_low = 1.60
odds_high= 2.55
def __init__(self,name):
self.Name = name
self.score_w_w_h = []
self.score_w_w_a = []
self.score_w_t_h = []
self.score_w_t_a = []
self.score_w_m_h = []
self.score_w_m_a = []
self.score_l_w_h = []
self.score_l_w_a = []
self.score_l_t_h = []
self.score_l_t_a = []
self.score_l_m_h = []
self.score_l_m_a = []
self.win_j_h = []
self.win_y_h = []
self.lose_j_h = []
self.lose_y_h = []
self.win_j_a = []
self.win_y_a = []
self.lose_j_a = []
self.lose_y_a = []
self.odds_record = []
self.score_home_hist = []
self.score_away_hist = []
self.odds_home_hist = []
self.odds_away_hist = []
def initial(self):
self.score_w_w_h = []
self.score_w_w_a = []
self.score_w_t_h = []
self.score_w_t_a = []
self.score_w_m_h = []
self.score_w_m_a = []
self.score_l_w_h = []
self.score_l_w_a = []
self.score_l_t_h = []
self.score_l_t_a = []
self.score_l_m_h = []
self.score_l_m_a = []
self.win_j_h = []
self.win_y_h = []
self.lose_j_h = []
self.lose_y_h = []
self.win_j_a = []
self.win_y_a = []
self.lose_j_a = []
self.lose_y_a = []
self.odds_record = []
self.score_home_hist = []
self.score_away_hist = []
self.odds_home_hist = []
self.odds_away_hist = []
def update_diff_10_a(self,hometeam,score_w_w,score_w_m,score_w_t,score_l_w,score_l_m,score_l_t):
self.score_away_hist.append(hometeam)
self.score_w_w_a.append(score_w_w)
self.score_w_t_a.append(score_w_t)
self.score_l_w_a.append(score_l_w)
self.score_l_t_a.append(score_l_t)
self.score_w_m_a.append(score_w_m)
self.score_l_m_a.append(score_l_m)
if len(self.score_away_hist) > self.record_const:
self.score_away_hist.remove(self.score_away_hist[0])
if len(self.score_w_w_a) > self.record_const:
self.score_w_w_a.remove(self.score_w_w_a[0])
if len(self.score_w_t_a) > self.record_const:
self.score_w_t_a.remove(self.score_w_t_a[0])
if len(self.score_l_w_a) > self.record_const:
self.score_l_w_a.remove(self.score_l_w_a[0])
if len(self.score_l_t_a) > self.record_const:
self.score_l_t_a.remove(self.score_l_t_a[0])
if len(self.score_w_m_a) > self.record_const:
self.score_w_m_a.remove(self.score_w_m_a[0])
if len(self.score_l_m_a) > self.record_const:
self.score_l_m_a.remove(self.score_l_m_a[0])
def update_diff_10_h(self,awayteam,score_w_w,score_w_m,score_w_t,score_l_w,score_l_m,score_l_t):
self.score_home_hist.append(awayteam)
self.score_w_w_h.append(score_w_w)
self.score_w_t_h.append(score_w_t)
self.score_l_w_h.append(score_l_w)
self.score_l_t_h.append(score_l_t)
self.score_w_m_h.append(score_w_m)
self.score_l_m_h.append(score_l_m)
if len(self.score_home_hist) > self.record_const:
self.score_home_hist.remove(self.score_home_hist[0])
if len(self.score_w_w_h) > self.record_const:
self.score_w_w_h.remove(self.score_w_w_h[0])
if len(self.score_w_t_h) > self.record_const:
self.score_w_t_h.remove(self.score_w_t_h[0])
if len(self.score_l_w_h) > self.record_const:
self.score_l_w_h.remove(self.score_l_w_h[0])
if len(self.score_l_t_h) > self.record_const:
self.score_l_t_h.remove(self.score_l_t_h[0])
if len(self.score_w_m_h) > self.record_const:
self.score_w_m_h.remove(self.score_w_m_h[0])
if len(self.score_l_m_h) > self.record_const:
self.score_l_m_h.remove(self.score_l_m_h[0])
def update_score_10_a(self,hometeam,win_j_10,win_y_10,lose_j_10,lose_y_10):
self.odds_away_hist.append(hometeam)
self.win_j_a.append(win_j_10)
self.win_y_a.append(win_y_10)
self.lose_j_a.append(lose_j_10)
self.lose_y_a.append(lose_y_10)
if len(self.odds_away_hist)>self.record_const:
self.odds_away_hist.remove(self.odds_away_hist[0])
if len(self.win_j_a) > self.record_const:
self.win_j_a.remove(self.win_j_a[0])
if len(self.win_y_a) > self.record_const:
self.win_y_a.remove(self.win_y_a[0])
if len(self.lose_j_a) > self.record_const:
self.lose_j_a.remove(self.lose_j_a[0])
if len(self.lose_y_a) > self.record_const:
self.lose_y_a.remove(self.lose_y_a[0])
def update_score_10_h(self,awayteam,win_j_10,win_y_10,lose_j_10,lose_y_10):
self.odds_home_hist.append(awayteam)
self.win_j_h.append(win_j_10)
self.win_y_h.append(win_y_10)
self.lose_j_h.append(lose_j_10)
self.lose_y_h.append(lose_y_10)
if len(self.odds_home_hist)>self.record_const:
self.odds_home_hist.remove(self.odds_home_hist[0])
if len(self.win_j_h) > self.record_const:
self.win_j_h.remove(self.win_j_h[0])
if len(self.win_y_h) > self.record_const:
self.win_y_h.remove(self.win_y_h[0])
if len(self.lose_j_h) > self.record_const:
self.lose_j_h.remove(self.lose_j_h[0])
if len(self.lose_y_h) > self.record_const:
self.lose_y_h.remove(self.lose_y_h[0])
def update_score(self,hometeam,awayteam,score_diff,odds,home_away):
if(home_away):
if score_diff > 0:
if odds <= self.odds_mid:
self.update_score_10_h(awayteam,score_diff*odds/self.record_const,0,0,0)
elif odds > self.odds_mid:
self.update_score_10_h(awayteam,0,score_diff*odds/self.record_const,0,0)
if score_diff > self.score_mid:
self.update_diff_10_h(awayteam,score_diff*odds/self.record_const,0,0,0,0,0)
elif score_diff == self.score_tight:
self.update_diff_10_h(awayteam,0,0,score_diff*odds/self.record_const,0,0,0)
else:
self.update_diff_10_h(awayteam,0,score_diff*odds/self.record_const,0,0,0,0)
elif score_diff < 0:
if odds >= self.odds_mid:
self.update_score_10_h(awayteam,0,0,score_diff/odds/self.record_const,0)
elif odds < self.odds_mid:
self.update_score_10_h(awayteam,0,0,0,score_diff/odds/self.record_const)
if -1*score_diff > self.score_mid:
self.update_diff_10_h(awayteam,0,0,0,score_diff/odds/self.record_const,0,0)
elif -1*score_diff == self.score_tight:
self.update_diff_10_h(awayteam,0,0,0,0,0,score_diff/odds/self.record_const)
else:
self.update_diff_10_h(awayteam,0,0,0,0,score_diff/odds/self.record_const,0)
else:
if score_diff < 0:
if odds <= self.odds_mid:
self.update_score_10_a(hometeam,-1*score_diff*odds/self.record_const,0,0,0)
elif odds > self.odds_mid:
self.update_score_10_a(hometeam,0,-1*score_diff*odds/self.record_const,0,0)
if -1*score_diff > self.score_mid:
self.update_diff_10_a(hometeam,-1*score_diff*odds/self.record_const,0,0,0,0,0)
elif -1*score_diff == self.score_tight:
self.update_diff_10_a(hometeam,0,0,-1*score_diff*odds/self.record_const,0,0,0)
else:
self.update_diff_10_a(hometeam,0,-1*score_diff*odds/self.record_const,0,0,0,0)
elif score_diff > 0:
if odds >= self.odds_mid:
self.update_score_10_a(hometeam,0,0,-1*score_diff/odds/self.record_const,0)
elif odds < self.odds_mid:
self.update_score_10_a(hometeam,0,0,0,-1*score_diff/odds/self.record_const)
if score_diff > self.score_mid:
self.update_diff_10_a(hometeam,0,0,0,-1*score_diff/odds/self.record_const,0,0)
elif score_diff == self.score_tight:
self.update_diff_10_a(hometeam,0,0,0,0,0,-1*score_diff/odds/self.record_const)
else:
self.update_diff_10_a(hometeam,0,0,0,0,-1*score_diff/odds/self.record_const,0)
def get_score_home_hist(self,awayteam):
trg = 0
cnt = 0
cnt_a = 0
res = [0.0,0.0,0.0,0.0,0.0,0.0]
if len(self.score_home_hist) == 0:
return [1.0,1.0,1.0,1.0,1.0,1.0]
for ii in reversed(self.score_home_hist):
cnt += 1
if ii != awayteam:
if trg == 0:
leng = len(self.score_home_hist)
res[0] = sum(self.score_w_w_h)/leng
res[1] = sum(self.score_w_t_h)/leng
res[2] = sum(self.score_w_m_h)/leng
res[3] = sum(self.score_l_w_h)/leng
res[4] = sum(self.score_l_t_h)/leng
res[5] = sum(self.score_l_m_h)/leng
return res
else:
dom = (len(self.score_home_hist)-cnt_a)*1.0/len(self.score_home_hist)
res[0] += self.score_w_w_h[len(self.score_home_hist)-cnt]/dom
res[1] += self.score_w_t_h[len(self.score_home_hist)-cnt]/dom
res[2] += self.score_w_m_h[len(self.score_home_hist)-cnt]/dom
res[3] += self.score_l_w_h[len(self.score_home_hist)-cnt]/dom
res[4] += self.score_l_t_h[len(self.score_home_hist)-cnt]/dom
res[5] += self.score_l_m_h[len(self.score_home_hist)-cnt]/dom
else:
cnt_a += 1
trg = 1
res[0] += self.score_w_w_h[len(self.score_home_hist)-cnt]
res[1] += self.score_w_t_h[len(self.score_home_hist)-cnt]
res[2] += self.score_w_m_h[len(self.score_home_hist)-cnt]
res[3] += self.score_l_w_h[len(self.score_home_hist)-cnt]
res[4] += self.score_l_t_h[len(self.score_home_hist)-cnt]
res[5] += self.score_l_m_h[len(self.score_home_hist)-cnt]
return res
def get_score_away_hist(self,hometeam):
trg = 0
cnt = 0
cnt_a = 0
res = [0.0,0.0,0.0,0.0,0.0,0.0]
if len(self.score_away_hist) == 0:
return [1.0,1.0,1.0,1.0,1.0,1.0]
for ii in reversed(self.score_away_hist):
cnt += 1
if ii != hometeam:
if trg == 0:
leng = len(self.score_away_hist)
res[0] = sum(self.score_w_w_a)/leng
res[1] = sum(self.score_w_t_a)/leng
res[2] = sum(self.score_w_m_a)/leng
res[3] = sum(self.score_l_w_a)/leng
res[4] = sum(self.score_l_t_a)/leng
res[5] = sum(self.score_l_m_a)/leng
return res
else:
dom = (len(self.score_away_hist)-cnt_a)*1.0/len(self.score_away_hist)
res[0] += self.score_w_w_a[len(self.score_away_hist)-cnt]/dom
res[1] += self.score_w_t_a[len(self.score_away_hist)-cnt]/dom
res[2] += self.score_w_m_a[len(self.score_away_hist)-cnt]/dom
res[3] += self.score_l_w_a[len(self.score_away_hist)-cnt]/dom
res[4] += self.score_l_t_a[len(self.score_away_hist)-cnt]/dom
res[5] += self.score_l_m_a[len(self.score_away_hist)-cnt]/dom
else:
cnt_a += 1
trg = 1
res[0] += self.score_w_w_a[len(self.score_away_hist)-cnt]
res[1] += self.score_w_t_a[len(self.score_away_hist)-cnt]
res[2] += self.score_w_m_a[len(self.score_away_hist)-cnt]
res[3] += self.score_l_w_a[len(self.score_away_hist)-cnt]
res[4] += self.score_l_t_a[len(self.score_away_hist)-cnt]
res[5] += self.score_l_m_a[len(self.score_away_hist)-cnt]
return res
def get_odds_home_hist(self,awayteam):
trg = 0
cnt = 0
cnt_a = 0
res = [0.0,0.0,0.0,0.0]
if len(self.odds_home_hist) == 0:
return [1.0,1.0,1.0,1.0]
for ii in reversed(self.odds_home_hist):
cnt += 1
if ii != awayteam:
if trg == 0:
leng = len(self.odds_home_hist)
res[0] = sum(self.win_j_h)/leng
res[1] = sum(self.win_y_h)/leng
res[2] = sum(self.lose_j_h)/leng
res[3] = sum(self.lose_y_h)/leng
return res
else:
dom = (len(self.odds_home_hist)-cnt_a)*1.0/len(self.odds_home_hist)
res[0] += self.win_j_h[len(self.odds_home_hist)-cnt]/dom
res[1] += self.win_y_h[len(self.odds_home_hist)-cnt]/dom
res[2] += self.lose_j_h[len(self.odds_home_hist)-cnt]/dom
res[3] += self.lose_y_h[len(self.odds_home_hist)-cnt]/dom
else:
cnt_a += 1
trg = 1
res[0] += self.win_j_h[len(self.odds_home_hist)-cnt]
res[1] += self.win_y_h[len(self.odds_home_hist)-cnt]
res[2] += self.lose_j_h[len(self.odds_home_hist)-cnt]
res[3] += self.lose_y_h[len(self.odds_home_hist)-cnt]
return res
def get_odds_away_hist(self,hometeam):
trg = 0
cnt = 0
cnt_a = 0
res = [0.0,0.0,0.0,0.0]
if len(self.odds_away_hist) == 0:
return [1.0,1.0,1.0,1.0]
for ii in reversed(self.odds_away_hist):
cnt += 1
if ii != hometeam:
if trg == 0:
leng = len(self.odds_away_hist)
res[0] = sum(self.win_j_a)/leng
res[1] = sum(self.win_y_a)/leng
res[2] = sum(self.lose_j_a)/leng
res[3] = sum(self.lose_y_a)/leng
return res
else:
# dom = 1.0/len(self.odds_away_hist)
dom = (len(self.odds_away_hist)-cnt_a)*1.0/len(self.odds_away_hist)
res[0] += self.win_j_a[len(self.odds_away_hist)-cnt]/dom
res[1] += self.win_y_a[len(self.odds_away_hist)-cnt]/dom
res[2] += self.lose_j_a[len(self.odds_away_hist)-cnt]/dom
res[3] += self.lose_y_a[len(self.odds_away_hist)-cnt]/dom
else:
cnt_a += 1
trg = 1
res[0] += self.win_j_a[len(self.odds_away_hist)-cnt]
res[1] += self.win_y_a[len(self.odds_away_hist)-cnt]
res[2] += self.lose_j_a[len(self.odds_away_hist)-cnt]
res[3] += self.lose_y_a[len(self.odds_away_hist)-cnt]
return res
def get_Name(self):
return self.Name
class Team():
Name=""
odds_record_interval = 10
record_const = 10
score_mid = 3
score_tight = 1
odds_mid = 1.92
odds_low = 1.60
odds_high= 2.55
def __init__(self,name):
self.Name = name
self.score_w_w_h = []
self.score_w_w_a = []
self.score_w_t_h = []
self.score_w_t_a = []
self.score_w_m_h = []
self.score_w_m_a = []
self.score_l_w_h = []
self.score_l_w_a = []
self.score_l_t_h = []
self.score_l_t_a = []
self.score_l_m_h = []
self.score_l_m_a = []
self.win_j_h = []
self.win_y_h = []
self.lose_j_h = []
self.lose_y_h = []
self.win_j_a = []
self.win_y_a = []
self.lose_j_a = []
self.lose_y_a = []
self.odds_record = []
def initial(self):
self.score_w_w_h = []
self.score_w_w_a = []
self.score_w_t_h = []
self.score_w_t_a = []
self.score_w_m_h = []
self.score_w_m_a = []
self.score_l_w_h = []
self.score_l_w_a = []
self.score_l_t_h = []
self.score_l_t_a = []
self.score_l_m_h = []
self.score_l_m_a = []
self.win_j_h = []
self.win_y_h = []
self.lose_j_h = []
self.lose_y_h = []
self.win_j_a = []
self.win_y_a = []
self.lose_j_a = []
self.lose_y_a = []
self.odds_record = []
def update_diff_10_a(self,score_w_w,score_w_m,score_w_t,score_l_w,score_l_m,score_l_t):
self.score_w_w_a.append(score_w_w)
self.score_w_t_a.append(score_w_t)
self.score_l_w_a.append(score_l_w)
self.score_l_t_a.append(score_l_t)
self.score_w_m_a.append(score_w_m)
self.score_l_m_a.append(score_l_m)
if len(self.score_w_w_a) > self.record_const:
self.score_w_w_a.remove(self.score_w_w_a[0])
if len(self.score_w_t_a) > self.record_const:
self.score_w_t_a.remove(self.score_w_t_a[0])
if len(self.score_l_w_a) > self.record_const:
self.score_l_w_a.remove(self.score_l_w_a[0])
if len(self.score_l_t_a) > self.record_const:
self.score_l_t_a.remove(self.score_l_t_a[0])
if len(self.score_w_m_a) > self.record_const:
self.score_w_m_a.remove(self.score_w_m_a[0])
if len(self.score_l_m_a) > self.record_const:
self.score_l_m_a.remove(self.score_l_m_a[0])
def update_diff_10_h(self,score_w_w,score_w_m,score_w_t,score_l_w,score_l_m,score_l_t):
self.score_w_w_h.append(score_w_w)
self.score_w_t_h.append(score_w_t)
self.score_l_w_h.append(score_l_w)
self.score_l_t_h.append(score_l_t)
self.score_w_m_h.append(score_w_m)
self.score_l_m_h.append(score_l_m)
if len(self.score_w_w_h) > self.record_const:
self.score_w_w_h.remove(self.score_w_w_h[0])
if len(self.score_w_t_h) > self.record_const:
self.score_w_t_h.remove(self.score_w_t_h[0])
if len(self.score_l_w_h) > self.record_const:
self.score_l_w_h.remove(self.score_l_w_h[0])
if len(self.score_l_t_h) > self.record_const:
self.score_l_t_h.remove(self.score_l_t_h[0])
if len(self.score_w_m_h) > self.record_const:
self.score_w_m_h.remove(self.score_w_m_h[0])
if len(self.score_l_m_h) > self.record_const:
self.score_l_m_h.remove(self.score_l_m_h[0])
def update_score_10_a(self,win_j_10,win_y_10,lose_j_10,lose_y_10):
self.win_j_a.append(win_j_10)
self.win_y_a.append(win_y_10)
self.lose_j_a.append(lose_j_10)
self.lose_y_a.append(lose_y_10)
if len(self.win_j_a) > self.record_const:
self.win_j_a.remove(self.win_j_a[0])
if len(self.win_y_a) > self.record_const:
self.win_y_a.remove(self.win_y_a[0])
if len(self.lose_j_a) > self.record_const:
self.lose_j_a.remove(self.lose_j_a[0])
if len(self.lose_y_a) > self.record_const:
self.lose_y_a.remove(self.lose_y_a[0])
def update_score_10_h(self,win_j_10,win_y_10,lose_j_10,lose_y_10):
self.win_j_h.append(win_j_10)
self.win_y_h.append(win_y_10)
self.lose_j_h.append(lose_j_10)
self.lose_y_h.append(lose_y_10)
if len(self.win_j_h) > self.record_const:
self.win_j_h.remove(self.win_j_h[0])
if len(self.win_y_h) > self.record_const:
self.win_y_h.remove(self.win_y_h[0])
if len(self.lose_j_h) > self.record_const:
self.lose_j_h.remove(self.lose_j_h[0])
if len(self.lose_y_h) > self.record_const:
self.lose_y_h.remove(self.lose_y_h[0])
def update_score(self,score_diff,odds,home_away):
if(home_away):
if score_diff > 0:
if odds <= self.odds_mid:
self.update_score_10_h(1.0/self.record_const,0,0,0)
elif odds > self.odds_mid:
self.update_score_10_h(0,1.0/self.record_const,0,0)
if score_diff > self.score_mid:
self.update_diff_10_h(1.0/self.record_const,0,0,0,0,0)
elif score_diff == self.score_tight:
self.update_diff_10_h(0,0,1.0/self.record_const,0,0,0)
else:
self.update_diff_10_h(0,1.0/self.record_const,0,0,0,0)
elif score_diff < 0:
if odds >= self.odds_mid:
self.update_score_10_h(0,0,1.0/self.record_const,0)
elif odds < self.odds_mid:
self.update_score_10_h(0,0,0,1.0/self.record_const)
if -1*score_diff > self.score_mid:
self.update_diff_10_h(0,0,0,1.0/self.record_const,0,0)
elif -1*score_diff == self.score_tight:
self.update_diff_10_h(0,0,0,0,0,1.0/self.record_const)
else:
self.update_diff_10_h(0,0,0,0,1.0/self.record_const,0)
else:
if score_diff < 0:
if odds <= self.odds_mid:
self.update_score_10_a(-1*1.0/self.record_const,0,0,0)
elif odds > self.odds_mid:
self.update_score_10_a(0,-1*1.0/self.record_const,0,0)
if -1*score_diff > self.score_mid:
self.update_diff_10_a(-1*1.0/self.record_const,0,0,0,0,0)
elif -1*score_diff == self.score_tight:
self.update_diff_10_a(0,0,-1*1.0/self.record_const,0,0,0)
else:
self.update_diff_10_a(0,-1*1.0/self.record_const,0,0,0,0)
elif score_diff > 0:
if odds >= self.odds_mid:
self.update_score_10_a(0,0,-1*1.0/self.record_const,0)
elif odds < self.odds_mid:
self.update_score_10_a(0,0,0,-1*1.0/self.record_const)
if score_diff > self.score_mid:
self.update_diff_10_a(0,0,0,-1*1.0/self.record_const,0,0)
elif score_diff == self.score_tight:
self.update_diff_10_a(0,0,0,0,0,-1.0/self.record_const)
else:
self.update_diff_10_a(0,0,0,0,-1.0/self.record_const,0)
def get_score_w_w(self,home_away):
if home_away:
return sum(self.score_w_w_h)
else:
return sum(self.score_w_w_a)
def get_score_w_t(self,home_away):
if home_away:
return sum(self.score_w_t_h)
else:
return sum(self.score_w_t_a)
def get_score_l_w(self,home_away):
if home_away:
return sum(self.score_l_w_h)
else:
return sum(self.score_l_w_a)
def get_score_l_t(self,home_away):
if home_away:
return sum(self.score_l_t_h)
else:
return sum(self.score_l_t_a)
def get_score_w_m(self,home_away):
if home_away:
return sum(self.score_w_m_h)
else:
return sum(self.score_w_m_a)
def get_score_l_m(self,home_away):
if home_away:
return sum(self.score_l_m_h)
else:
return sum(self.score_l_m_a)
def get_win_j(self,home_away):
if home_away:
return sum(self.win_j_h)
else:
return sum(self.win_j_a)
def get_win_y(self,home_away):
if home_away:
return sum(self.win_y_h)
else:
return sum(self.win_j_a)
def get_lose_j(self,home_away):
if home_away:
return sum(self.lose_j_h)
else:
return sum(self.lose_j_a)
def get_lose_y(self,home_away):
if home_away:
return sum(self.lose_y_h)
else:
return sum(self.lose_y_a)
def get_Name(self):
return self.Name
################################
class Bank():
Name = "Jaemin"
Budjet = 1000.0
Winmoney = 10.0
def __init__(self,name,money):
self.Name = name
self.Budjet = money
def set_Winmoney(self,money):
self.Winmoney = money
def get_Winmoney(self):
return self.Winmoney
def get_Budjet(self):
return self.Budjet
def print_name(self):
print(self.Name)
def print_Budjet(self):
print(self.Budjet)
def payout(self,bet_money):
self.Budjet -= bet_money
def buyin(self,money_in):
self.Budjet += money_in
###############################
def Gene_File_Parser2(MLB_File_List):
MLB_Team_List = []
MLB_Team_List.append(Team2("Baltimore Orioles"))
MLB_Team_List.append(Team2("Boston Red Sox"))
MLB_Team_List.append(Team2("Chicago White Sox"))
MLB_Team_List.append(Team2("Cleveland Indians"))
MLB_Team_List.append(Team2("Detroit Tigers"))
MLB_Team_List.append(Team2("Houston Astros"))
MLB_Team_List.append(Team2("Kansas City Royals"))
MLB_Team_List.append(Team2("Los Angeles Angels"))
MLB_Team_List.append(Team2("Minnesota Twins"))
MLB_Team_List.append(Team2("New York Yankees"))
MLB_Team_List.append(Team2("Oakland Athletics"))
MLB_Team_List.append(Team2("Seattle Mariners"))
MLB_Team_List.append(Team2("Tampa Bay Rays"))
MLB_Team_List.append(Team2("Texas Rangers"))
MLB_Team_List.append(Team2("Toronto Blue Jays"))
MLB_Team_List.append(Team2("Arizona Diamondbacks"))
MLB_Team_List.append(Team2("Atlanta Braves"))
MLB_Team_List.append(Team2("Chicago Cubs"))
MLB_Team_List.append(Team2("Cincinnati Reds"))
MLB_Team_List.append(Team2("Colorado Rockies"))
MLB_Team_List.append(Team2("Los Angeles Dodgers"))
MLB_Team_List.append(Team2("Miami Marlins"))
MLB_Team_List.append(Team2("Milwaukee Brewers"))
MLB_Team_List.append(Team2("New York Mets"))
MLB_Team_List.append(Team2("Philadelphia Phillies"))
MLB_Team_List.append(Team2("Pittsburgh Pirates"))
MLB_Team_List.append(Team2("San Diego Padres"))
MLB_Team_List.append(Team2("San Francisco Giants"))
MLB_Team_List.append(Team2("St.Louis Cardinals"))
MLB_Team_List.append(Team2("Washington Nationals"))
fin = open(MLB_File_List,'r')
MLB_List_Out = []
while 1:
fin_line = fin.readline()
if not fin_line:
break
if fin_line.find('\014') != -1:
continue
if fin_line.find("canc")!=-1:
continue
if fin_line.find("post")!=-1:
continue
dash = fin_line.find("-")
endname = max(fin_line.rfind("s"),fin_line.rfind("x"),fin_line.rfind("p"))
endcolon = fin_line.rfind(":")
score_h = fin_line[endcolon-2:endcolon]
score_a = fin_line[endcolon+1:endcolon+3]
scorediff = int(score_h)-int(score_a)
enddot = fin_line.rfind(".")
endplus = fin_line.rfind("+")
endminus = fin_line.rfind("-")
endslash = fin_line.rfind("/")
slash = fin_line.find("/")
bet_luck = 1
endpl = max(endplus,endminus)
if fin_line.count(".") > 1:
odds_h = float(fin_line[enddot-7:enddot-2])
odds_a = float(fin_line[enddot-2:enddot+3])
elif fin_line.count("+")+fin_line.count("-") > 1:
if fin_line[endpl-5] == '+':
odds_h = 1.0+int(fin_line[endpl-4:endpl-1])/100.0
elif fin_line[endpl-5] == '-':
odds_h = 1.0+100.0/int(fin_line[endpl-4:endpl-1])
if fin_line[endpl] == '+':
odds_a = 1.0+int(fin_line[endpl+1:endpl+4])/100.0
elif fin_line[endpl] == '-':
odds_a = 1.0+100.0/int(fin_line[endpl+1:endpl+4])
else:
odds_h_denom = 0
odds_h_nom = 0
for ii in range(3):
odds_h_t = fin_line[slash-1-ii:slash]
odds_h_tt = fin_line[slash+1:slash+2+ii]
odds_a_t = fin_line[endslash-1-ii:endslash]
odds_a_tt = fin_line[endslash+1:endslash+2+ii]
if odds_h_t.isdigit() == 1:
odds_h_denom = odds_h_t
if odds_h_tt.isdigit() == 1:
odds_h_nom = odds_h_tt
if odds_a_t.isdigit() == 1:
odds_a_denom = odds_a_t
if odds_a_tt.isdigit() == 1:
odds_a_nom = odds_a_tt
odds_h = 1.0 + int(odds_h_denom)*1.0/int(odds_h_nom)
odds_a = 1.0 + int(odds_a_denom)*1.0/int(odds_a_nom)
hometeam = fin_line[6:dash-1]
awayteam = fin_line[dash+2:endname+1]
c_h_cnt = 0
c_h_trg = 0
c_a_cnt = 0
c_a_trg = 0
MLB_Team_Exist = 2
for ii in MLB_Team_List:
if MLB_Team_Exist == 0:
break
elif ii.get_Name() == hometeam:
c_h_trg = 1
MLB_Team_Exist -= 1
elif ii.get_Name() == awayteam:
c_a_trg = 1
MLB_Team_Exist -= 1
c_h_cnt = c_h_cnt if c_h_trg else c_h_cnt + 1
c_a_cnt = c_a_cnt if c_a_trg else c_a_cnt + 1
if MLB_Team_Exist != 0:
continue
List = [c_h_cnt,c_a_cnt,odds_h,odds_a,int(score_h),int(score_a)]
MLB_List_Out.append(List)
return MLB_List_Out
def Gene_File_Parser(MLB_File_List):
MLB_Team_List = []
MLB_Team_List.append(Team("Baltimore Orioles"))
MLB_Team_List.append(Team("Boston Red Sox"))
MLB_Team_List.append(Team("Chicago White Sox"))
MLB_Team_List.append(Team("Cleveland Indians"))
MLB_Team_List.append(Team("Detroit Tigers"))
MLB_Team_List.append(Team("Houston Astros"))
MLB_Team_List.append(Team("Kansas City Royals"))
MLB_Team_List.append(Team("Los Angeles Angels"))
MLB_Team_List.append(Team("Minnesota Twins"))
MLB_Team_List.append(Team("New York Yankees"))
MLB_Team_List.append(Team("Oakland Athletics"))
MLB_Team_List.append(Team("Seattle Mariners"))
MLB_Team_List.append(Team("Tampa Bay Rays"))
MLB_Team_List.append(Team("Texas Rangers"))
MLB_Team_List.append(Team("Toronto Blue Jays"))
MLB_Team_List.append(Team("Arizona Diamondbacks"))
MLB_Team_List.append(Team("Atlanta Braves"))
MLB_Team_List.append(Team("Chicago Cubs"))
MLB_Team_List.append(Team("Cincinnati Reds"))
MLB_Team_List.append(Team("Colorado Rockies"))
MLB_Team_List.append(Team("Los Angeles Dodgers"))
MLB_Team_List.append(Team("Miami Marlins"))
MLB_Team_List.append(Team("Milwaukee Brewers"))
MLB_Team_List.append(Team("New York Mets"))
MLB_Team_List.append(Team("Philadelphia Phillies"))
MLB_Team_List.append(Team("Pittsburgh Pirates"))
MLB_Team_List.append(Team("San Diego Padres"))
MLB_Team_List.append(Team("San Francisco Giants"))
MLB_Team_List.append(Team("St.Louis Cardinals"))
MLB_Team_List.append(Team("Washington Nationals"))
fin = open(MLB_File_List,'r')
MLB_List_Out = []
while 1:
fin_line = fin.readline()
if not fin_line:
break
if fin_line.find('\014') != -1:
continue
if fin_line.find("canc")!=-1:
continue
if fin_line.find("post")!=-1:
continue
dash = fin_line.find("-")
endname = max(fin_line.rfind("s"),fin_line.rfind("x"),fin_line.rfind("p"))
endcolon = fin_line.rfind(":")
score_h = fin_line[endcolon-2:endcolon]
score_a = fin_line[endcolon+1:endcolon+3]
scorediff = int(score_h)-int(score_a)
enddot = fin_line.rfind(".")
endplus = fin_line.rfind("+")
endminus = fin_line.rfind("-")
endslash = fin_line.rfind("/")
slash = fin_line.find("/")
bet_luck = 1
endpl = max(endplus,endminus)
if fin_line.count(".") > 1:
odds_h = float(fin_line[enddot-7:enddot-2])
odds_a = float(fin_line[enddot-2:enddot+3])
elif fin_line.count("+")+fin_line.count("-") > 1:
if fin_line[endpl-5] == '+':
odds_h = 1.0+int(fin_line[endpl-4:endpl-1])/100.0
elif fin_line[endpl-5] == '-':
odds_h = 1.0+100.0/int(fin_line[endpl-4:endpl-1])
if fin_line[endpl] == '+':
odds_a = 1.0+int(fin_line[endpl+1:endpl+4])/100.0
elif fin_line[endpl] == '-':
odds_a = 1.0+100.0/int(fin_line[endpl+1:endpl+4])
else:
odds_h_denom = 0
odds_h_nom = 0
for ii in range(3):
odds_h_t = fin_line[slash-1-ii:slash]
odds_h_tt = fin_line[slash+1:slash+2+ii]
odds_a_t = fin_line[endslash-1-ii:endslash]
odds_a_tt = fin_line[endslash+1:endslash+2+ii]
if odds_h_t.isdigit() == 1:
odds_h_denom = odds_h_t
if odds_h_tt.isdigit() == 1:
odds_h_nom = odds_h_tt
if odds_a_t.isdigit() == 1:
odds_a_denom = odds_a_t
if odds_a_tt.isdigit() == 1:
odds_a_nom = odds_a_tt
odds_h = 1.0 + int(odds_h_denom)*1.0/int(odds_h_nom)
odds_a = 1.0 + int(odds_a_denom)*1.0/int(odds_a_nom)
hometeam = fin_line[6:dash-1]
awayteam = fin_line[dash+2:endname+1]
c_h_cnt = 0
c_h_trg = 0
c_a_cnt = 0
c_a_trg = 0
MLB_Team_Exist = 2
for ii in MLB_Team_List:
if MLB_Team_Exist == 0:
break
elif ii.get_Name() == hometeam:
c_h_trg = 1
MLB_Team_Exist -= 1
elif ii.get_Name() == awayteam:
c_a_trg = 1
MLB_Team_Exist -= 1
c_h_cnt = c_h_cnt if c_h_trg else c_h_cnt + 1
c_a_cnt = c_a_cnt if c_a_trg else c_a_cnt + 1
if MLB_Team_Exist != 0:
continue
List = [c_h_cnt,c_a_cnt,odds_h,odds_a,int(score_h),int(score_a)]
MLB_List_Out.append(List)
return MLB_List_Out
|
992,093 | 64fefc6fa98c69f55bb0c22913e3c071f0561bab | try:
from libKMCUDA import kmeans_cuda
_LIBKMCUDA_FOUND = True
except ModuleNotFoundError:
_LIBKMCUDA_FOUND = False
from functools import partial
import logging
import numpy as np
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from stratification.cluster.utils import silhouette_samples
__all__ = [
'KMeans',
'GaussianMixture',
'FastKMeans',
'AutoKMixtureModel',
'OverclusterModel',
'DummyClusterer',
]
def get_cluster_sils(data, pred_labels, compute_sil=True, cuda=False):
unique_preds = sorted(np.unique(pred_labels))
SIL_samples = (
silhouette_samples(data, pred_labels, cuda=cuda) if compute_sil else np.zeros(len(data))
)
SILs_by_cluster = {
int(label): float(np.mean(SIL_samples[pred_labels == label])) for label in unique_preds
}
SIL_global = float(np.mean(SIL_samples))
return SILs_by_cluster, SIL_global
def compute_group_sizes(labels):
result = dict(sorted(zip(*np.unique(labels, return_counts=True))))
return {int(k): int(v) for k, v in result.items()}
class DummyClusterer:
def __init__(self, **kwargs):
self.n_components = 1
def fit(self, X):
return self
def predict(self, X):
return np.zeros(len(X), dtype=np.int32)
class FastKMeans:
def __init__(self, n_clusters, random_state=0, init='k-means++', n_init=10, verbose=False):
self.k = n_clusters
self.init = init
if n_init > 1:
logging.warning('n_init unsupported for GPU K-Means')
self.seed = random_state
self.verbose = verbose
self.kmeans_obj = KMeans(n_clusters=n_clusters)
def fit(self, X):
logging.info('Using GPU-accelerated K-Means...')
self.cluster_centers_ = kmeans_cuda(
X.astype(np.float32), clusters=self.k, seed=self.seed, init=self.init
)[0].astype(np.float32)
self.kmeans_obj.cluster_centers_ = self.cluster_centers_
if hasattr(self.kmeans_obj, '_check_params'):
self.kmeans_obj._check_params(np.zeros_like(X)) # properly initialize
return self.kmeans_obj
def fit_predict(self, X):
self.fit(X)
return self.predict(X)
def predict(self, X):
return self.kmeans_obj.predict(X.astype(np.float32))
def transform(self, X):
return self.kmeans_obj.transform(X.astype(np.float32))
class AutoKMixtureModel:
def __init__(
self, cluster_method, max_k, n_init=3, seed=None, sil_cuda=False, verbose=0, search=True
):
if cluster_method == 'kmeans':
cluster_cls = FastKMeans if (sil_cuda and _LIBKMCUDA_FOUND) else KMeans
k_name = 'n_clusters'
elif cluster_method == 'gmm':
cluster_cls = GaussianMixture
k_name = 'n_components'
else:
raise ValueError('Unsupported clustering method')
self.cluster_cls = cluster_cls
self.k_name = k_name
self.search = search
self.max_k = max_k
self.n_init = n_init
self.seed = seed
self.sil_cuda = sil_cuda
self.verbose = verbose
def gen_inner_cluster_obj(self, k):
# Return a clustering object according to the specified parameters
return self.cluster_cls(
**{self.k_name: k}, n_init=self.n_init, random_state=self.seed, verbose=self.verbose
)
def fit(self, activ):
logger = logging.getLogger('harness.cluster')
best_score = -2
k_min = 2 if self.search else self.max_k
search = self.search and k_min != self.max_k
for k in range(k_min, self.max_k + 1):
logger.info(f'Clustering into {k} groups...')
cluster_obj = self.gen_inner_cluster_obj(k)
pred_labels = cluster_obj.fit_predict(activ)
logger.info('Clustering done, computing score...')
if search:
local_sils, _ = get_cluster_sils(
activ, pred_labels, compute_sil=True, cuda=self.sil_cuda
)
clustering_score = np.mean(list(local_sils.values()))
logger.info(f'k = {k} score: {clustering_score}')
if clustering_score >= best_score:
logger.info(f'Best model found at k = {k} with score {clustering_score:.3f}')
best_score = clustering_score
best_model = cluster_obj
best_k = k
else:
best_score, best_model, best_k = 0, cluster_obj, self.max_k
self.best_k = best_k
self.n_clusters = best_k
self.best_score = best_score
self.cluster_obj = best_model
return self
def predict(self, activ):
return self.cluster_obj.predict(activ)
def fit_predict(self, activ):
self.fit(activ)
return self.predict(activ)
def predict_proba(self, X):
return self.cluster_obj.predict_proba(activ)
def score(self, X):
return self.cluster_obj.score(activ)
class OverclusterModel:
def __init__(
self,
cluster_method,
max_k,
oc_fac,
n_init=3,
search=True,
sil_threshold=0.0,
seed=None,
sil_cuda=False,
verbose=0,
sz_threshold_pct=0.005,
sz_threshold_abs=25,
):
self.base_model = AutoKMixtureModel(
cluster_method, max_k, n_init, seed, sil_cuda, verbose, search
)
self.oc_fac = oc_fac
self.sil_threshold = sil_threshold
self.sz_threshold_pct = sz_threshold_pct
self.sz_threshold_abs = sz_threshold_abs
self.requires_extra_info = True
def get_oc_predictions(self, activ, val_activ, orig_preds, val_orig_preds):
# Split each cluster from base_model into sub-clusters, and save each of the
# associated sub-clustering predictors in self.cluster_objs.
# Collate and return the new predictions in oc_preds and val_oc_preds.
self.cluster_objs = []
oc_preds = np.zeros(len(activ), dtype=np.int)
val_oc_preds = np.zeros(len(val_activ), dtype=np.int)
for i in self.pred_vals:
sub_activ = activ[orig_preds == i]
cluster_obj = self.base_model.gen_inner_cluster_obj(self.oc_fac).fit(sub_activ)
self.cluster_objs.append(cluster_obj)
sub_preds = cluster_obj.predict(sub_activ) + self.oc_fac * i
oc_preds[orig_preds == i] = sub_preds
val_sub_activ = val_activ[val_orig_preds == i]
val_sub_preds = cluster_obj.predict(val_sub_activ) + self.oc_fac * i
val_oc_preds[val_orig_preds == i] = val_sub_preds
return oc_preds, val_oc_preds
def filter_overclusters(self, activ, losses, orig_preds, oc_preds, val_oc_preds):
# Keep an overcluster if its point have higher SIL than before
# overclustering, AND it has higher average loss than the
# original cluster, AND it contains sufficiently many training and
# validation points.
num_oc = np.amax(oc_preds) + 1
# Compute original per-cluster SIL scores and losses,
# and the SIL scores and losses after overclustering.
orig_sample_sils = silhouette_samples(activ, orig_preds, cuda=self.sil_cuda)
orig_losses = [np.mean(losses[orig_preds == i]) for i in self.pred_vals]
new_sample_sils = silhouette_samples(activ, oc_preds, cuda=self.sil_cuda)
oc_orig_sils = [np.mean(orig_sample_sils[oc_preds == i]) for i in range(num_oc)]
oc_new_sils = [np.mean(new_sample_sils[oc_preds == i]) for i in range(num_oc)]
new_losses = [np.mean(losses[oc_preds == i]) for i in range(num_oc)]
# Count number of points in each cluster after overclustering. Drop tiny clusters as these
# will lead to unreliable optimization.
oc_counts = np.bincount(oc_preds)
# If val clusters are too small, we will get unreliable estimates - so need to threshold these too
val_oc_counts = np.bincount(val_oc_preds)
tr_sz_threshold = max(len(activ) * self.sz_threshold_pct, self.sz_threshold_abs)
val_sz_threshold = self.sz_threshold_abs
# Decide which overclusters to keep
oc_to_keep = []
for i in range(num_oc):
if (
oc_new_sils[i] > max(oc_orig_sils[i], self.sil_threshold)
and new_losses[i] >= orig_losses[i // self.oc_fac]
and oc_counts[i] >= tr_sz_threshold
and val_oc_counts[i] >= val_sz_threshold
):
oc_to_keep.append(i)
return oc_to_keep
def create_label_map(self, num_orig_preds, oc_to_keep, oc_preds):
# Map raw overclustering outputs to final "cluster labels," accounting for the
# fact that some overclusters are re-merged.
label_map = {}
cur_cluster_ind = -1
oc_to_base_id = {}
for i in range(num_orig_preds):
# For each original cluster, if there were no
# overclusters kept within it, keep the original cluster as-is.
# Otherwise, it needs to be split.
keep_all = True # If we keep all overclusters, we can discard the original cluster
for j in range(self.oc_fac):
index = i * self.oc_fac + j
if index not in oc_to_keep:
keep_all = False
if not keep_all:
cur_cluster_ind += 1
# Updated cluster index corresponding to original cluster
# (points in the original cluster assigned to a non-kept overcluster
# are merged into this cluster)
base_index = cur_cluster_ind
for j in range(self.oc_fac):
index = i * self.oc_fac + j
if index in oc_to_keep:
cur_cluster_ind += 1
oc_index = cur_cluster_ind
else:
assert not keep_all
oc_index = base_index
label_map[index] = oc_index
return label_map
def fit(self, activ, val_activ=None, losses=None):
if val_activ is None or losses is None:
raise ValueError('Must provide losses and val set activations')
logger = logging.getLogger('harness.cluster')
logger.info('Fitting base model...')
orig_preds = self.base_model.fit_predict(activ)
self.pred_vals = sorted(np.unique(orig_preds))
num_orig_preds = len(self.pred_vals)
losses = np.array(losses)
oc_fac = self.oc_fac
num_oc = num_orig_preds * oc_fac
val_orig_preds = self.base_model.predict(val_activ)
logger.info('Fitting overclustering model...')
oc_preds, val_oc_preds = self.get_oc_predictions(
activ, val_activ, orig_preds, val_orig_preds
)
oc_to_keep = self.filter_overclusters(activ, losses, orig_preds, oc_preds, val_oc_preds)
self.label_map = self.create_label_map(num_orig_preds, oc_to_keep, oc_preds)
new_preds = np.zeros(len(activ), dtype=np.int)
for i in range(num_oc):
new_preds[oc_preds == i] = self.label_map[i]
self.n_clusters = max(self.label_map.values()) + 1 # Final number of output predictions
logger.info(f'Final number of clusters: {self.n_clusters}')
return self
def predict(self, activ):
# Get clusters from base model
base_preds = self.base_model.predict(activ)
# Get overclusters
oc_preds = np.zeros(len(activ), dtype=np.int)
for i in self.pred_vals:
subfeats = activ[base_preds == i]
subpreds = self.cluster_objs[i].predict(subfeats) + self.oc_fac * i
oc_preds[base_preds == i] = subpreds
# Merge overclusters appropriately and return final predictions
new_preds = np.zeros(len(activ), dtype=np.int)
for i in range(len(self.pred_vals) * self.oc_fac):
new_preds[oc_preds == i] = self.label_map[i]
return new_preds
@property
def sil_cuda(self):
return self.base_model.sil_cuda
@property
def n_init(self):
return self.base_model.n_init
@property
def seed(self):
return self.base_model.seed
|
992,094 | 668a60d1327482caea2b80c1ba026fb151bb655d | from rest_framework import pagination
class StandardTweetsPagination(pagination.PageNumberPagination):
page_size = 3
page_size_query_param = 'page_size'
|
992,095 | b766931038900db4f821f38ac8365e951688abf3 | # coding: utf-8
'''
Created by JamesYi
Created on 2018/10/23
'''
import argparse
import os
import math
import time
import torch.nn.functional as F
from torch import optim
import torch
from torch.nn.utils import clip_grad_norm
from model import Encoder, AttentionDecoder, Seq2Seq
from utils import load_dataset, device, tokenize_de, tokenize_en
start_time = time.time()
def parse_arguments():
p = argparse.ArgumentParser(description='Hyperparams')
p.add_argument('-epochs', type=int, default=100, help='Number of epochs for training')
p.add_argument('-batch_size', type=int, default=32, help='Number of batch size for training')
p.add_argument('-lr', type=float, default=0.0001, help='Initial learning rate for training')
p.add_argument('-grad_clip', type=float, default=10.0, help='In case of gradient explosion')
return p.parse_args()
def train(model, optimizer, train_iter, vocab_size, grad_clip, DE, EN):
model.train()
total_loss = 0
pad = EN.vocab.stoi['<pad>']
for index, batch in enumerate(train_iter):
src, len_src = batch.src
trg, len_trg = batch.trg
src = src.to(device)
trg = trg.to(device)
optimizer.zero_grad()
output = model(src, trg)
loss = F.nll_loss(output[1:].view(-1, vocab_size), trg[1:].contiguous().view(-1), ignore_index=pad)
loss.backward()
clip_grad_norm(model.parameters(), grad_clip)
optimizer.step()
total_loss += loss.item()
if index % 100 == 0 and index != 0:
total_loss = total_loss / 100
now_time = time.time()
print("{} [Loss: {}] [Time: {}h{}m{}s]"
.format(index, total_loss, (now_time - start_time) // 3600,
(now_time - start_time) % 3600 // 60, (now_time - start_time) % 60))
total_loss = 0
def evaluate(model, val_iter, vocab_size, DE, EN):
model.eval()
total_loss = 0
pad = EN.vocab.stoi['<pad>']
for index, batch in enumerate(val_iter):
src, len_src = batch.src
trg, len_trg = batch.trg
src = src.to(device)
trg = trg.to(device)
output = model(src, trg, teacher_forcing_ratio=0)
loss = F.nll_loss(output[1:].view(-1, vocab_size), trg[1:].contiguous().view(-1), ignore_index=pad)
total_loss += loss.item()
return total_loss / len(val_iter)
def online_translator(model, model_save_path, DE, EN):
model.load_state_dict(torch.load(model_save_path))
while True:
s_de = input("Please input a german sentence:")
if s_de == 'quit':
break
else:
de_list = ['<sos>'] + tokenize_de(s_de) + ['<eos>']
input_de = []
for de_i in de_list:
input_de.append(DE.vocab.stoi[de_i])
input_de = torch.Tensor(input_de).unsqueeze(1).long().to(device)
model.eval()
output_en = model(input_de, input_de, teacher_forcing_ratio=0)
output_en.squeeze_()
s_en = ''
pad = EN.vocab.stoi["<pad>"]
for en_i in output_en:
_, top1 = en_i.topk(1)
if top1.item() == pad:
continue
s_en += EN.vocab.itos[top1.item()] + ' '
print("Translation result in English: {}".format(s_en))
if __name__ == "__main__":
args = parse_arguments()
hidden_size = 512
embed_size = 256
print(device)
print('Loading dataset ......')
train_iter, val_iter, test_iter, DE, EN = load_dataset(args.batch_size)
de_size, en_size = len(DE.vocab), len(EN.vocab)
print("[TRAIN]:%d (dataset:%d)\t[TEST]:%d (dataset:%d)\t[VALUATE]:%d (dataset:%d)"
% (len(train_iter), len(train_iter.dataset),
len(test_iter), len(test_iter.dataset),
len(val_iter), len(val_iter.dataset)))
print("[DE_vocab]:%d [en_vocab]:%d" % (de_size, en_size))
print("Initialize model ......")
encoder = Encoder(de_size, embed_size, hidden_size)
decoder = AttentionDecoder(en_size, embed_size, hidden_size)
seq2seq = Seq2Seq(encoder, decoder).to(device)
optimizer = optim.Adam(seq2seq.parameters(), lr=args.lr)
print(seq2seq)
best_val_loss = None
for epoch in range(0, args.epochs):
train(seq2seq, optimizer,train_iter, en_size, args.grad_clip, DE, EN)
val_loss = evaluate(seq2seq, val_iter, en_size, DE, EN)
now_time = time.time()
print("[Epoch:{}] val_loss:{} | val_pp:{} | Time: {}h{}m{}s".format(epoch, val_loss, math.exp(val_loss), (now_time - start_time) // 3600,
(now_time - start_time) % 3600 // 60, (now_time - start_time) % 60))
if not best_val_loss or val_loss < best_val_loss:
print("Saving model ......")
if not os.path.isdir(".save"):
os.makedirs(".save")
torch.save(seq2seq.state_dict(), './.save/seq2seq_%d.pt' % (epoch))
best_val_loss = val_loss
test_loss = evaluate(seq2seq, test_iter, en_size, DE, EN)
print("[TEST] loss:{}".format(test_loss))
|
992,096 | a5d630b9aa20db732322c8cde5e52e6945e2ffe3 | #########################################################################################
#############################--- SOLUCION DE RETO 2--- ##################################
#########################################################################################
try:
#INGRESANDO NOMBRE DEL ESTUDIANTE
estudiante = input("Ingrese el nombre del estudiante: \n")
#INGRESANDO NUMERO DE NOTAS DEL ESTUDIANTE
numero_notas = int(input("ingrese el numero de notas del estudiante: \n"))
i = 0
notas = list(range(numero_notas))
while i < numero_notas:
nota = int(input(f'Ingrese Nota {i+1} :'))
notas[i] = nota
i += 1
#OBTENIENDO EL PROMEDIO DE LAS NOTAS INGRESADAS
promedio = sum(notas)/len(notas)
#OBTENIENDO LA MAYOR NOTAS DE TODAS
nota_max = max(notas)
#OBTENIENDO LA MENOR NOTA DE TODAS
nota_min = min(notas)
print ("===================================================")
print (f'Las notas ingresadas del estudiante {estudiante} son: \n')
print ("===================================================")
#IMPRIMIENDO LA LISTA DE NOTAS
#print (notas)
#IMPRIMIENDO LAS NOTAS CON MEDIANTE UN FOR
contador = 0
j = 1
for item in notas:
print(f'Nota {j} : {notas[contador]}')
contador += 1
j += 1
print (f'El promedio de las {numero_notas} notas ingresadas es : --> {promedio}')
print (f'La nota mas alta de las {numero_notas} notas ingresadas es : --> {nota_max}')
print (f'La nota mas baja de las {numero_notas} notas ingresadas es : --> {nota_min}')
except Exception as ex:
print("Ha ocurrido un inconveniente, codigo de error: " + str(ex))
|
992,097 | 91e5d1f9a0d94ad75348d13767a6717a249a5785 | from generator import *
from backend import banner, Backend
headerCode = r'''
let _bytesToString = bytes => bytes.map(c => String.fromCharCode(c)).join('');
let _bytesToCString = bytes => {
let nind = bytes.indexOf(0);
if(nind == -1)
return _bytesToString(bytes);
else
return _bytesToString(bytes.slice(0, nind));
};
class BinaryReader {
constructor(data) {
if(Array.isArray(data))
this.dataView = new DataView(new ArrayBuffer(data));
else if(ArrayBuffer.isView(data))
this.dataView = new DataView(data.buffer, data.byteOffset, data.byteLength);
else if(data instanceof ArrayBuffer)
this.dataView = new DataView(data);
else
throw 'Unknown data type for BinaryReader'
this.length = this.dataView.byteLength;
this.position = 0;
}
skip(amount, value) {
this.position += amount;
return value;
}
uint8 = () => this.skip(1, this.dataView.getUint8(this.position, true));
uint16 = () => this.skip(2, this.dataView.getUint16(this.position, true));
uint32 = () => this.skip(4, this.dataView.getUint32(this.position, true));
int8 = () => this.skip(1, this.dataView.getInt8(this.position, true));
int16 = () => this.skip(2, this.dataView.getInt16(this.position, true));
int32 = () => this.skip(4, this.dataView.getInt32(this.position, true));
float32 = () => this.skip(4, this.dataView.getFloat32(this.position, true));
float64 = () => this.skip(8, this.dataView.getFloat64(this.position, true));
readBytes = count => this.skip(count, Array.from(new Uint8Array(this.dataView.buffer.slice(this.dataView.byteOffset + this.position, this.dataView.byteOffset + this.position + count))));
readString = len => _bytesToString(this.readBytes(len));
}
'''.strip()
kwlist = 'break', 'case', 'catch', 'class', 'const', 'continue', 'debugger', 'default', 'delete', 'do', 'else', 'export', 'extends', 'finally', 'for', 'function', 'if', 'import', 'in', 'instanceof', 'new', 'return', 'super', 'switch', 'this', 'throw', 'try', 'typeof', 'var', 'void', 'while', 'with', 'yield'
def sanitize(name):
if name in kwlist or name[0] in '0123456789':
return sanitize('_' + name)
if '<' in name or '>' in name:
return sanitize(name.replace('<', '_').replace('>', ''))
return name
isPublic = lambda name: name[0] != '_' and name[0].upper() == name[0]
def genExpr(tree, struct):
def sub(tree):
if tree is None:
return None
if tree[0] == 'compare':
return '(%s) %s (%s)' % (sub(tree[1]), tree[2], sub(tree[3]))
elif tree[0] == 'binary_op':
return '(%s) %s (%s)' % (sub(tree[1]), tree[2], sub(tree[3]))
elif tree[0] == 'variable':
if tree[1] in struct.fields:
return 'this.%s' % sanitize(tree[1])
else:
return sanitize(tree[1])
elif tree[0] == 'value':
return repr(tree[1])
elif tree[0] == 'property':
return '(%s).%s' % (sub(tree[1]), sanitize(tree[2]))
elif tree[0] == 'cast-to':
assert isinstance(tree[1], CStringType) # TODO: Add more casts
return "_bytesToCString(%s)" % sub(tree[2])
elif tree[0] == 'subscript':
base = sub(tree[1])
index = sub(tree[2])
if isinstance(index, tuple):
start, end = index
if start is None:
if end is None:
return base
else:
return '%s.slice(0, %s)' % (base, end)
elif end is None:
return '%s.slice(%s)' % (base, start)
else:
return '%s.slice(%s, %s)' % (base, start, end)
else:
return '%s[%s]' % (base, index)
elif tree[0] == 'slice':
return (sub(tree[1]), sub(tree[2]))
else:
print tree
assert False
return sub(tree)
class JsBackend(Backend):
LANGUAGE = 'javascript'
EXTENSION = 'js'
def generate(self, spec):
self.spec = spec
for line in banner:
self.writeLine('//', line)
self.writeLine('//')
self.writeLine('// DO NOT EDIT')
self.writeLine('// Generated automatically by Fluffy')
self.writeLine(headerCode)
for name, struct in spec.structs.items():
self.writeLine()
self.writeLine('export class %s {' % sanitize(name))
self.indent()
self.writeLine('constructor(br%s) {' % (', ' + u', '.join(map(sanitize, sorted(struct.dependencies.keys()))) if struct.dependencies else ''))
self.indent()
self.writeLine('if(!(br instanceof BinaryReader)) br = new BinaryReader(br);')
for fn in struct.fields:
if not isPublic(fn):
self.writeLine('Object.defineProperty(this, \'%s\', {enumerable: false, writable: true});' % sanitize(fn))
def recur(steps):
for step in steps:
if step[0] == 'magic':
self.writeLine('if(br.readString(%i) != %r) throw \'Magic mismatch in %s\';' % (len(step[1]), str(step[1]), struct.name))
elif step[0] == 'unsupported':
self.writeLine('throw \'Unsupported\';')
elif step[0] == 'unpack':
unpacker = self.genUnpack(step[1], struct)
for var in step[2]:
self.writeLine('this.%s = %s;' % (sanitize(var), unpacker))
elif step[0] == 'assign':
self.writeLine('this.%s = %s;' % (sanitize(step[1]), genExpr(step[3], struct)))
elif step[0] == 'mark_position':
self.writeLine('this.%s = br.position;' % (sanitize(step[1])))
elif step[0] == 'seek_abs_scoped':
oldPos = self.tempvar()
self.writeLine('let %s = br.position;' % oldPos)
self.writeLine('br.position = %s;' % genExpr(step[1], struct))
recur(step[2])
self.writeLine('br.position = %s;' % oldPos)
elif step[0] == 'seek_abs':
self.writeLine('br.position = %s;' % genExpr(step[1], struct))
elif step[0] == 'match':
if len(step[2]) == 0:
continue
elif len(step[2]) == 1 and step[2][0][0] is None:
recur(step[2][0][1])
else:
self.writeLine('switch(%s) {' % genExpr(step[1], struct))
for case, body in step[2]:
if case is not None:
self.writeLine('case %s:' % genExpr(case, struct))
else:
self.writeLine('default:')
self.indent()
recur(body)
self.writeLine('break;')
self.dedent()
first = False
self.writeLine('}')
elif step[0] == 'if':
self.writeLine('if(%s) {' % genExpr(step[1], struct))
self.indent()
if step[2]:
recur(step[2])
self.dedent()
if step[3]:
self.writeLine('} else {')
self.indent()
recur(step[3])
self.dedent()
self.writeLine('}')
else:
print step
assert False
recur(struct.unpackSteps)
self.dedent()
self.writeLine('}')
self.dedent()
self.writeLine('}')
def genUnpack(self, type, struct):
if isinstance(type, IntType) or isinstance(type, FloatType):
return 'br.%r()' % type
elif isinstance(type, ArrayType):
bt = type.base
while isinstance(bt, Typedef):
bt = bt.otype
rank = genExpr(type.rankExpr, struct)
if isinstance(bt, IntType) and bt.bits == 8 and not bt.signed:
return 'br.readBytes(%s)' % rank
try:
if type.rankExpr[0] == 'value':
irank = int(type.rankExpr[1])
up = self.genUnpack(bt, struct)
return '[%s]' % u', '.join([up] * irank)
except:
pass
return '[...Array(%s).keys()].map(_ => %s)' % (rank, self.genUnpack(bt, struct))
elif isinstance(type, Typedef):
return self.genUnpack(type.otype, struct)
elif isinstance(type, Struct) or isinstance(type, SpecializedType):
ts = self.spec.structs[type.name]
depMatch = ''
if ts.dependencies:
depMatch = ', ' + u', '.join(sanitize(key if key in struct.dependencies else 'this.' + key) for key in sorted(ts.dependencies.keys()))
return 'new %s(br%s)' % (sanitize(type.name), depMatch)
print '%r %s' % (type.__class__, type)
assert False
|
992,098 | 6399b709bcefed0c806e5ed3bbc730ccc4d571d4 | """
In this simulation, the fog device moves in different nodes. There are linked to another nodes.
@author: Isaac Lera
"""
import os
import time
import json
import random
import logging.config
import networkx as nx
import matplotlib.pyplot as plt
from pathlib import Path
from yafs.core import Sim
from yafs.application import create_applications_from_json
from yafs.topology import Topology
from yafs.placement import JSONPlacement
from yafs.path_routing import DeviceSpeedAwareRouting
from yafs.distribution import deterministic_distribution, deterministicDistributionStartPoint
class CustomStrategy():
def __init__(self, pathResults, listIdApps):
self.activations = 0
self.pathResults = pathResults
self.listUsers = []
self.numberMaxUsers = 100 #100
self.listIdApps = listIdApps
self.placeAt = {}
def createUser(self, sim):
app_name = random.sample(self.listIdApps, 1)[0]
app = sim.apps[app_name]
msg = app.get_message("Fog.Node.%i" % app_name)
dist = deterministic_distribution(30, name="Deterministic") # 30
node = random.sample(sim.topology.G.nodes(), 1)[0]
idDES = sim.deploy_source(app_name, id_node=node, msg=msg, distribution=dist)
self.listUsers.append(idDES)
self.placeAt[idDES] = node
return idDES
def __call__(self, sim, routing):
logging.info("Activating Custom process - number %i " % self.activations) #
self.activations += 1
# In this case, the new users not change the topology
routing.invalid_cache_value = True # when the service change the cache of the Path.routing is outdated.
# We can introduce a new user or we move it
if len(self.listUsers) == 0:
self.createUser(sim)
if random.random() < 0.6:
# we create a new user
idDES = self.createUser(sim)
logging.info(" Creating a FogNode %i on node %i" % (idDES, self.placeAt[idDES]))
elif random.random() < 0.8:
# we move a user from one node to other
userDES = random.sample(self.listUsers, 1)[0]
newNode = random.sample(sim.topology.G.nodes(), 1)[0]
logging.info(" Moving a FogNode %i from node %i to %i" % (userDES, self.placeAt[userDES], newNode))
sim.alloc_DES[self.placeAt[userDES]] = newNode
#else:
# # we remove an user
# userDES = random.sample(self.listUsers, 1)[0]
# sim.undeploy_source(userDES)
# self.listUsers.remove(userDES)
# logging.info(" Removing a user %i on node %i" % (userDES, self.placeAt[userDES]))
def main(stop_time, it):
folder_results = Path("results/")
folder_results.mkdir(parents=True, exist_ok=True)
folder_results = str(folder_results) + "/"
"""
TOPOLOGY
"""
t = Topology()
# You also can create a topology using JSONs files. Check out examples folder
size = 200
#t.G = nx.binomial_tree(size) # In NX-lib there are a lot of Graphs generators
t.G = nx.gnp_random_graph(size, 0.025)
initial = [55,61,67,73,81,89,96,109,176]
finial = [105,93,187,92,185,45,59,31,9,43]
color_map = []
node_size = []
for node in t.G:
#print(node)
if node in initial:
color_map.append('orange')
node_size.append(200)
elif node in finial:
color_map.append('green')
node_size.append(300)
else:
color_map.append('gray')
node_size.append(100)
print(color_map)
# Definition of mandatory attributes of a Topology
# Attr. on edges
# PR and BW are 1 unit
attPR_BW = {x: 1 for x in t.G.edges()}
nx.set_edge_attributes(t.G, name="PR", values=attPR_BW)
nx.set_edge_attributes(t.G, name="BW", values=attPR_BW)
# Attr. on nodes
# IPT
attIPT = {x: 100 for x in t.G.nodes()} #100 service
nx.set_node_attributes(t.G, name="IPT", values=attIPT)
#nx.write_gexf(t.G, folder_results + "graph_binomial_tree_%i" % size) # you can export the Graph in multiples format to view in tools like Gephi, and so on.
nx.draw(t.G, node_color = color_map, with_labels=False, node_size = node_size, alpha = 0.7) # Draw
plt.show()
print(t.G.nodes ()) # nodes id can be str or int
"""
APPLICATION or SERVICES
"""
dataApp = json.load(open('data/appDefinition.json'))
apps = create_applications_from_json(dataApp)
"""
SERVICE PLACEMENT
"""
placementJson = json.load(open('data/allocDefinition.json'))
placement = JSONPlacement(name="Placement", json=placementJson)
"""
Defining ROUTING algorithm to define how path messages in the topology among modules
"""
selectorPath = DeviceSpeedAwareRouting()
"""
SIMULATION ENGINE
"""
s = Sim(t, default_results_path=folder_results + "sim_trace")
"""
Deploy services == APP's modules
"""
for aName in apps.keys():
s.deploy_app(apps[aName], placement, selectorPath)
"""
Deploy users
"""
### IN THIS CASE, We control the users from our custom strategy
userJSON = json.load(open('data/fognodesDefinition.json'))
for user in userJSON["sources"]:
app_name = user["app"]
app = s.apps[app_name]
msg = app.get_message(user["message"])
node = user["id_resource"]
dist = deterministic_distribution(200, name="Deterministic") #100
idDES = s.deploy_source(app_name, id_node=node, msg=msg, distribution=dist)
"""
This internal monitor in the simulator (a DES process) changes the sim's behaviour.
You can have multiples monitors doing different or same tasks.
In this case, it changes the number or movement of users.
"""
listIdApps = [x["id"] for x in dataApp]
dist = deterministicDistributionStartPoint(stop_time / 4., stop_time / 20, name="Deterministic") #4 20
evol = CustomStrategy(folder_results, listIdApps)
s.deploy_monitor("RandomAllocation",
evol,
dist,
**{"sim": s, "routing": selectorPath}) # __call__ args
"""
RUNNING - last step
"""
logging.info(" Performing simulation: %i " % it)
s.run(stop_time) # To test deployments put test_initial_deploy a TRUE
s.print_debug_assignaments()
print("Number of new FogNodes: %i" % len(evol.listUsers))
if __name__ == '__main__':
logging.config.fileConfig(os.getcwd() + '/logging.ini')
nIterations = 1 # iteration for each experiment
simulationDuration = 2000 # 20000
# Iteration for each experiment changing the seed of randoms
for iteration in range(nIterations):
random.seed(iteration)
logging.info("Running experiment it: - %i" % iteration)
start_time = time.time()
main(stop_time=simulationDuration,
it=iteration)
print("\n--- %s seconds ---" % (time.time() - start_time))
print("Simulation Done!")
|
992,099 | cc40170e4a7d7ca564229cc27806bdec4dfb0d4b | """
Создать матрицу случайных чисел и сохранить ее в json файл.
После прочесть ее, обнулить все четные элементы и сохранить в другой файл.
"""
import json
from random import randint as rd
def matrix_to_json():
"""Function creates a matrix and saves it into a json-file"""
data_dict = {k: v for k, v in enumerate([[rd(-50, 50) for _ in range(5)] for _ in range(5)])}
with open('matrix.json', 'w') as file:
json.dump(data_dict, file, indent=6)
def from_json_to_another_file():
"""Function reads json file - zeroes even numbers of the matrix
and saves it to another file"""
with open('matrix.json') as matrix:
with open('even_to_zero.txt', 'w') as file:
extract = json.load(matrix)
for k, v in extract.items():
for i, num in enumerate(v):
if not num % 2:
v[i] = 0
file.writelines(f'{v}\n')
def main():
matrix_to_json()
from_json_to_another_file()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.