index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
994,700 | 550cfc3b4a4d514a5ddda4392c042927648f00d7 | import calendar
import datetime
'''
You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
'''
# def getSundays():
# startDate = datetime.date(1901,1,1)
# endDate = datetime.date(2000,1,1)
# count = 0
# while (startDate < endDate):
# if (startDate.day == 1 && startDate.today.weekday == 6):
# count++
# startDate
# return count
# print getSundays()
from datetime import timedelta
def subtract_one_month(dt0):
dt1 = dt0.replace(days=+1)
dt2 = dt1 - timedelta(days=+1)
dt3 = dt2.replace(days=+1)
return dt3
a = datetime.date(2004,3,30)
subtract_one_month(a)
print a |
994,701 | 7029ea2ac084a36220e4cdbe9fffcf99be107c86 | # coding:utf-8
import re
import time
from urllib import request
class PL:
def __init__(self): # 定义初始信息
# 定义http头信息
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/55.0.2883.87 Safari/537.36",
"referer": "http://music.163.com/song?id=4466775&market=baiduqk"
}
# 使用request打开api接口,获取数据
def single(self, song_id):
offset = 0
comment = []
# 爬虫爬35*28条评论
for i in range(28):
# api里limit对应的是每页多少条,offset对应的是页数
single_url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_'+str(song_id)+'?limit35&offset='+str(offset)
offset += 35
# requst访问api接口获取数据
html_url = request.Request(url=single_url, headers=self.headers)
html = request.urlopen(html_url)
data = html.read()
# 转换获取数据的格式为str
str_data = data.decode()
# 已"content":"为分隔符,分割获取的字符串
split_data = str_data.split('"content":"')
# 循环处理所有字符
for i in split_data:
data_split = i.split('","')
if data_split[0] not in comment:
comment.append(data_split[0])
pl = open(r'C:\Users\liushipeng\Documents\pl.txt', 'a+')
# 由于评论里有些表情字符无法储存到文本里,删除所有无法处理的字符,方法比较笨
for i in comment:
try:
pl.write(i + '\n')
except Exception as error:
data = self.error_gbk(error, i)
try:
pl.write(data + '\n')
except Exception as error:
data1 = self.error_gbk(error, data)
try:
pl.write(data1 + '\n')
except Exception as error:
data2 = self.error_gbk(error, data1)
try:
pl.write(data2 + '\n')
except:
pass
time.sleep(2)
pl.close()
# 清除写入文本里时报错的字符
def error_gbk(self, error, content):
u = str(error).split(" '")
u_error = str(u[1]).split("' ")
result = re.sub(u_error[0], '', content)
return result
if __name__ == '__main__':
# 通过歌曲id来访问歌曲对应的API接口
song_id = '1365221826'
p = PL()
p.single(song_id) |
994,702 | d4f18e60460ee250581a530e2f5896bc0b88540e | # -*- coding: utf-8 -*-
from tccli.services.mna.mna_client import action_caller
|
994,703 | 56a10646f4a62d804e28f985ddd42e492f8a4d3c | # Step1
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import json
import time
# Step2
def PrintSetUp():
# 印刷としてPDF保存する設定
chopt = webdriver.ChromeOptions()
appState = {
"recentDestinations": [
{
"id": "Save as PDF",
"origin": "local",
"account": ""
}
],
"selectedDestinationId": "Save as PDF",
"version": 2,
"isLandscapeEnabled": True, # 印刷の向きを指定 tureで横向き、falseで縦向き。
"pageSize": 'A4', # 用紙タイプ(A3、A4、A5、Legal、 Letter、Tabloidなど)
# "mediaSize": {"height_microns": 355600, "width_microns": 215900}, #紙のサイズ (10000マイクロメートル = 1cm)
# "marginsType": 0, #余白タイプ #0:デフォルト 1:余白なし 2:最小
# "scalingType": 3 , #0:デフォルト 1:ページに合わせる 2:用紙に合わせる 3:カスタム
# "scaling": "141" ,#倍率
# "profile.managed_default_content_settings.images": 2, #画像を読み込ませない
"isHeaderFooterEnabled": False, # ヘッダーとフッター
"isCssBackgroundEnabled": True, # 背景のグラフィック
# "isDuplexEnabled": False, #両面印刷 tureで両面印刷、falseで片面印刷
# "isColorEnabled": True, #カラー印刷 trueでカラー、falseで白黒
# "isCollateEnabled": True #部単位で印刷
}
prefs = {'printing.print_preview_sticky_settings.appState':
json.dumps(appState),
"download.default_directory": "~/Downloads"
} # appState --> pref
chopt.add_experimental_option('prefs', prefs) # prefs --> chopt
chopt.add_argument('--kiosk-printing') # 印刷ダイアログが開くと、印刷ボタンを無条件に押す。
return chopt
# Step3
def main_WebToPDF(BlogURL):
# Web ページもしくはhtmlファイルをPDFにSeleniumを使って変換する
chopt = PrintSetUp()
driver_path = "./chromedriver" # webdriverのパス
driver = webdriver.Chrome(executable_path=driver_path, options=chopt)
driver.implicitly_wait(10) # 秒 暗示的待機
driver.get(BlogURL) # ブログのURL 読み込み
# ページ上のすべての要素が読み込まれるまで待機(15秒でタイムアウト判定)
WebDriverWait(driver, 15).until(EC.presence_of_all_elements_located)
driver.execute_script('return window.print()') # Print as PDF
time.sleep(10) # ファイルのダウンロードのために10秒待機
driver.quit() # Close Screen
# Step4
if __name__ == '__main__':
BlogURLList = ['https://degitalization.hatenablog.jp/entry/2020/05/15/084033',
'https://note.com/makkynm/n/n1343f41c2fb7',
"file:///Users/makky/Documents/Python/Sample.html"]
for BlogURL in BlogURLList:
main_WebToPDF(BlogURL)
|
994,704 | 2cff03eda00dc3831ce7270731c34560db139055 | from kafka import KafkaConsumer
from kafka.admin import KafkaAdminClient, NewTopic
REQUIRED_SUFFIXES = ["_events", "_sampleEnv", "_runInfo", "_epicsForwarderConfig", "_detSpecMap"]
def get_existing_topics(kafka_broker):
return KafkaConsumer(bootstrap_servers=[kafka_broker]).topics()
def add_required_topics(kafka_broker, instrument):
"""
Adds required Kafka topics for the instrument
Args:
kafka_broker: the broker to add the topics to
instrument: the name of the instrument for which to add the topics
"""
required_topics = set(instrument + suffix for suffix in REQUIRED_SUFFIXES)
existing_topics = set(filter(lambda topic: topic.startswith(instrument), get_existing_topics(kafka_broker)))
if required_topics != existing_topics:
topic_names_to_add = required_topics - existing_topics
topic_list = [NewTopic(name=name, num_partitions=1, replication_factor=3) for name in topic_names_to_add]
admin_client = KafkaAdminClient(bootstrap_servers=kafka_broker, client_id='install_script')
admin_client.create_topics(new_topics=topic_list, validate_only=False)
|
994,705 | aeb7407fcaf2778f5aad1167d3fba7d4e968e7b8 | from __future__ import annotations
import falcon
from app import auth_backend
from app.schemas.login import login_schema
class LoginResource:
auth = {"auth_disabled": True}
deserializers = {"post": login_schema}
def on_post(self, req: falcon.Request, resp: falcon.Response):
"""
---
summary: Login into user account and generate JWT
tags:
- Login
parameters:
- in: body
schema: LoginSchema
consumes:
- application/json
produces:
- application/json
responses:
200:
description: Login Successful
schema:
type: object
properties:
message:
type: string
jwt:
type: string
401:
description: Login Unsuccessful
422:
description: Input body formatting issue
"""
user = req._deserialized
jwt_token = auth_backend.get_auth_token({"id": user.id})
resp.status = falcon.HTTP_OK
resp.media = {"message": "login successful!", "jwt": jwt_token}
|
994,706 | ebd364f2111c211aa734031ecb2c4653442b234c | """Tests for src/f_cli/resources/click_options.py."""
import json
from typing import Any, Dict
from unittest import TestCase
import click
from click.testing import CliRunner, Result
from f_cli.resources.click_options import (click_common_options,
click_debug_option,
click_verbose_option)
class TestSingleOptions(TestCase):
"""Test for single options."""
runner: CliRunner = CliRunner()
def test_click_debug_option(self) -> None:
"""Tests for the debug option."""
@click.command('test_func')
@click_debug_option
def test_func(debug: bool) -> None:
"""Function for testing options."""
click.echo(debug)
default_result: Result = self.runner.invoke(test_func, [])
flag_result: Result = self.runner.invoke(test_func,
args=['--debug'])
envvar_result: Result = self.runner.invoke(test_func, env={'F_DEBUG': '1'})
self.assertEqual(default_result.output, 'False\n',
'output should be `False` when flag is not '
'present')
self.assertEqual(flag_result.output, 'True\n',
'output should be `True` when flag is '
'present')
self.assertEqual(envvar_result.output, 'True\n',
'output should be `True` when flag is not'
'present but env var is set')
def test_click_verbose_option(self) -> None:
"""Tests for the verbose option."""
@click.command('test_func')
@click_verbose_option
def test_func(verbose: bool) -> None:
"""Function for testing options."""
click.echo(verbose)
default_result: Result = self.runner.invoke(test_func, [])
flag_result: Result = self.runner.invoke(test_func,
args=['--verbose'])
envvar_result: Result = self.runner.invoke(test_func, env={'F_VERBOSE': '1'})
self.assertEqual(default_result.output, 'False\n',
'output should be `False` when flag is not '
'present')
self.assertEqual(flag_result.output, 'True\n',
'output should be `True` when flag is '
'present')
self.assertEqual(envvar_result.output, 'True\n',
'output should be `True` when flag is not'
'present but env var is set')
class TestMultipleOptionDecorators(TestCase):
"""Test for single options."""
runner: CliRunner = CliRunner()
def test_click_common_options(self) -> None:
"""Tests for common options wrapper."""
args = ['--debug', '--verbose']
@click.command('test_func')
@click_common_options
def test_func(**kwargs: Dict[str, Any]) -> None:
"""Function for testing options."""
click.echo(json.dumps(kwargs))
result = json.loads(
self.runner.invoke(test_func, args).output.replace('\n', '')
)
self.assertEqual(result, {'debug': True, 'verbose': True})
|
994,707 | 494c5df655ba604ef3183c68f7b4eac9244d71a6 |
# date: 2019.08.01
# https://stackoverflow.com/questions/57308598/rectangle-moving/57309451#57309451
# https://www.qtcentre.org/threads/32958-multiple-QPropertyAnimations-after-each-other-how
# https://doc.qt.io/qt-5/qpropertyanimation.html
# https://doc.qt.io/qt-5/qtimer.html
# https://doc.qt.io/qtforpython/PySide2/QtCore/QTimer.html
from PyQt5.QtWidgets import QWidget, QApplication, QFrame, QPushButton
from PyQt5.QtCore import QPropertyAnimation, QRect
from PyQt5.QtGui import QFont
import sys
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("Animation Window")
self.setGeometry(100, 100, 400, 400)
self.widgets()
self.show()
def widgets(self):
font = QFont("Times New Roman")
font.setPixelSize(20)
self.start = QPushButton("Start", self)
self.start.setFont(font)
self.start.setGeometry(100, 100, 100, 50)
self.start.clicked.connect(self.doAnimation_1)
self.frame = QFrame(self)
self.frame.setStyleSheet("background-color:darkGreen;")
self.frame.setFrameStyle(QFrame.Panel | QFrame.Raised)
self.frame.setGeometry(250, 100, 100, 100)
def doAnimation_1(self):
self.anim = QPropertyAnimation(self.frame, b"geometry")
self.anim.setDuration(1000)
self.anim.setStartValue(QRect(0, 0, 100, 100))
self.anim.setEndValue(QRect(300, 0, 100, 100))
self.anim.finished.connect(self.doAnimation_2)
self.anim.start()
def doAnimation_2(self):
self.anim = QPropertyAnimation(self.frame, b"geometry")
self.anim.setDuration(1000)
self.anim.setStartValue(QRect(300, 0, 100, 100))
self.anim.setEndValue(QRect(300, 300, 100, 100))
self.anim.finished.connect(self.doAnimation_3)
self.anim.start()
def doAnimation_3(self):
self.anim = QPropertyAnimation(self.frame, b"geometry")
self.anim.setDuration(1000)
self.anim.setStartValue(QRect(300, 300, 100, 100))
self.anim.setEndValue(QRect(0, 300, 100, 100))
self.anim.finished.connect(self.doAnimation_4)
self.anim.start()
def doAnimation_4(self):
self.anim = QPropertyAnimation(self.frame, b"geometry")
self.anim.setDuration(1000)
self.anim.setStartValue(QRect(0, 300, 100, 100))
self.anim.setEndValue(QRect(0, 0, 100, 100))
self.anim.finished.connect(self.doAnimation_1)
self.anim.start()
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
994,708 | b3b79b80a7ea3fe4cee9e319c5a057ac6df8acd3 | import os
from demo_env.envs.demo_env import DemoEnv
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv
from stable_baselines import PPO2
def train():
n_cpu = os.cpu_count()
env = SubprocVecEnv([lambda: DemoEnv() for i in range(n_cpu)])
model = PPO2(MlpPolicy, env, verbose=1, policy_kwargs={'net_arch': [dict(vf=[4], pi=[4])]})
model.learn(total_timesteps=int(1e6))
model.save("ppo2_DemoEnv")
env.close()
del model
def run():
env = DummyVecEnv([lambda: DemoEnv()])
model = PPO2.load("ppo2_DemoEnv", env)
obs = env.reset()
sum_rew = 0
while True:
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
env.render()
sum_rew += rewards[0]
if dones[0] == True:
print("Total reward: ", sum_rew)
break
if __name__ == '__main__':
train()
run() |
994,709 | 3b0922bc08da4fd3ee20c892e9b2786bd920ca06 | def hotdogify(my_string):
"""
This functions adds "_hotdog" to any string.
"""
return '{}_hotdog'.format(my_string)
|
994,710 | 8905b88c84f0b6fcc5e1d8ccc4bd929db36d9ee0 | import pytest
def one():
return 1
def test_one():
"""
This test expect success
"""
expected = 1
actual = one()
assert actual == expected
class DanielPracticeError(Exception):
pass
# In this function error_we_raise=None specifies default value for the argument.
# So in case you call it like this failing_function() - without argument, the value of "error_we_raise" will be None.
def failing_function(error_we_raise=None):
if error_we_raise == TypeError:
raise TypeError
elif error_we_raise == DanielPracticeError:
raise DanielPracticeError
else:
raise Exception
def test_failing_function():
with pytest.raises(TypeError): # Here we say, "hey pytest, you hould expect the error of type I specified as argument"
# You need to call the function we are testing so it raises the exception we expect
failing_function(TypeError)
with pytest.raises(DanielPracticeError):
failing_function(DanielPracticeError)
with pytest.raises(Exception):
failing_function(Exception)
|
994,711 | 8fb2dc483efc385c77cad6b1ebcf7427813781c6 |
from tensorflow.python.ops.gen_math_ops import maximum
from examples import creare_padding_mask, create_look_ahead_mask, Transformer
import logging
import tensorflow as tf
from tensorflow.python.autograph.pyct import transformer
import tensorflow_datasets as tfds
from loss import CustomSchedule
import matplotlib.pyplot as plt
import time
# supppress warnings
logging.getLogger("tensorflow").setLevel(logging.ERROR)
"""
train set 50000
val set 1100
test set 2000
"""
examples, metadata = tfds.load("ted_hrlr_translate/pt_to_en", with_info = True,
as_supervised=True)
train_examples, val_examples = examples["train"], examples["validation"]
# down loading text tokenizer...
model_name = "ted_hrlr_translate_pt_en_converter"
tf.keras.utils.get_file( f"{model_name}.zip",
f"https://storage.googleapis.com/download.tensorflow.org/models/{model_name}.zip",
cache_dir=".", cache_subdir="", extract=True
)
tokenizers = tf.saved_model.load(model_name)
BUFFER_SIZE = 20000
BATCH_SIZE = 64
# tokenize and batches make
def tokenize_pairs(pt, en):
pt = tokenizers.pt.tokenize(pt).to_tensor()
en = tokenizers.en.tokenize(en).to_tensor()
return pt, en
def make_batches(ds):
return(ds.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
.map(tokenize_pairs, num_parallel_calls=tf.data.AUTOTUNE).prefetch(tf.data.AUTOTUNE)
)
def create_masks(inp, tar):
# Encoder padding mask
enc_padding_mask = creare_padding_mask(inp)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = creare_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = creare_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_sum(loss_)/tf.reduce_sum(mask)
def accuracy_function(real, pred):
accuracies = tf.equal(real, tf.argmax(pred, axis=2))
mask = tf.math.logical_not(tf.math.equal(real, 0))
accuracies = tf.math.logical_and(mask, accuracies)
accuracies = tf.cast(accuracies, dtype=tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
return tf.reduce_sum(accuracies)/tf.reduce_sum(mask)
# Parameter
# num_layers = 4
num_layers = 8
d_model = 128
dff = 512
num_heads = 8
dropout_rate = 0.1
train_batches = make_batches(train_examples)
val_batches = make_batches(val_examples)
learning_rate = CustomSchedule(d_model)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
temp_learning_rate_schedule = CustomSchedule(d_model)
# plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32)))
# plt.ylabel("Learning Rate")
# plt.xlabel("Train Step")
# plt.show()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction="none"
)
transformer = Transformer(
num_layers = num_layers,
d_model = d_model,
num_heads =num_heads,
dff = dff,
input_vocab_size = tokenizers.pt.get_vocab_size(),
target_vocab_size = tokenizers.pt.get_vocab_size(),
pe_input=1000,
pe_target=1000,
rate=dropout_rate
)
checkporint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(transformer=transformer, optimizer= optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkporint_path, max_to_keep=5 )
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print("Latest checkpoint restored!!")
# The target is divided into tar_inp and tar_real.
# tar_inp is passed as an input to the decoder.
# tar_real is that same input shifted by 1: At each location in tar_input, tar_real contains the next token that should be predicted.
# For example, sentence = "SOS A lion in the jungle is sleeping EOS"
# tar_inp = "SOS A lion in the jungle is sleeping"
# tar_real = "A lion in the jungle is sleeping EOS"
EPOCHS = 40
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.Mean(name='train_accuracy')
# The @tf.function trace-compiles train_step into a TF graph for faster
# execution. The function specializes to the precise shape of the argument
# tensors. To avoid re-tracing due to the variable sequence lengths or variable
# batch sizes (the last batch is smaller), use input_signature to specify
# more generic shapes.
train_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int64),
tf.TensorSpec(shape=(None, None), dtype=tf.int64),
]
@tf.function(input_signature=train_step_signature)
def train_step(inp, tar):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
with tf.GradientTape() as tape:
predictions, _ = transformer(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
train_loss(loss)
train_accuracy(accuracy_function(tar_real, predictions))
def evaluate(sentence, max_length=40):
# inp sentence is portuguese, hence adding the start and end token
sentence = tf.convert_to_tensor([sentence])
sentence = tokenizers.pt.tokenize(sentence).to_tensor()
encoder_input = sentence
# as the target is english, the first word to the transformer should be the
# english start token.
start, end = tokenizers.en.tokenize([''])[0]
output = tf.convert_to_tensor([start])
output = tf.expand_dims(output, 0)
for i in range(max_length):
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(
encoder_input, output)
# predictions.shape == (batch_size, seq_len, vocab_size)
predictions, attention_weights = transformer(encoder_input,
output,
False,
enc_padding_mask,
combined_mask,
dec_padding_mask)
# select the last word from the seq_len dimension
predictions = predictions[:, -1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.argmax(predictions, axis=-1)
# concatentate the predicted_id to the output which is given to the decoder
# as its input.
output = tf.concat([output, predicted_id], axis=-1)
# return the result if the predicted_id is equal to the end token
if predicted_id == end:
break
# output.shape (1, tokens)
text = tokenizers.en.detokenize(output)[0] # shape: ()
tokens = tokenizers.en.lookup(output)[0]
return text, tokens, attention_weights
def print_translation(sentence, tokens, ground_truth):
print(f'{"Input:":15s}: {sentence}')
print(f'{"Prediction":15s}: {tokens.numpy().decode("utf-8")}')
print(f'{"Ground truth":15s}: {ground_truth}')
for epoch in range(EPOCHS):
start = time.time()
train_loss.reset_states()
train_accuracy.reset_states()
# inp -> portuguese, tar -> english
for (batch, (inp, tar)) in enumerate(train_batches):
train_step(inp, tar)
if batch % 50 == 0:
print(f'Epoch {epoch + 1} Batch {batch} Loss {train_loss.result():.4f} Accuracy {train_accuracy.result():.4f}')
if (epoch + 1) % 5 == 0:
ckpt_save_path = ckpt_manager.save()
print(f'Saving checkpoint for epoch {epoch+1} at {ckpt_save_path}')
print(f'Epoch {epoch + 1} Loss {train_loss.result():.4f} Accuracy {train_accuracy.result():.4f}')
print(f'Time taken for 1 epoch: {time.time() - start:.2f} secs\n')
# test
sentence = "este é um problema que temos que resolver."
ground_truth = "this is a problem we have to solve ."
translated_text, translated_tokens, attention_weights = evaluate(sentence)
print_translation(sentence, translated_text, ground_truth)
sentence = "os meus vizinhos ouviram sobre esta ideia."
ground_truth = "and my neighboring homes heard about this idea ."
translated_text, translated_tokens, attention_weights = evaluate(sentence)
print_translation(sentence, translated_text, ground_truth)
sentence = "vou então muito rapidamente partilhar convosco algumas histórias de algumas coisas mágicas que aconteceram."
ground_truth = "so i \'ll just share with you some stories very quickly of some magical things that have happened ."
translated_text, translated_tokens, attention_weights = evaluate(sentence)
print_translation(sentence, translated_text, ground_truth)
sentence = "este é o primeiro livro que eu fiz."
ground_truth = "this is the first book i've ever done."
translated_text, translated_tokens, attention_weights = evaluate(sentence)
print_translation(sentence, translated_text, ground_truth)
def plot_attention_head(in_tokens, translated_tokens, attention):
# The plot is of the attention when a token was generated.
# The model didn't generate `<START>` in the output. Skip it.
translated_tokens = translated_tokens[1:]
ax = plt.gca()
ax.matshow(attention)
ax.set_xticks(range(len(in_tokens)))
ax.set_yticks(range(len(translated_tokens)))
labels = [label.decode('utf-8') for label in in_tokens.numpy()]
ax.set_xticklabels(
labels, rotation=90)
labels = [label.decode('utf-8') for label in translated_tokens.numpy()]
ax.set_yticklabels(labels)
head = 0
# shape: (batch=1, num_heads, seq_len_q, seq_len_k)
attention_heads = tf.squeeze(
attention_weights['decoder_layer4_block2'], 0)
attention = attention_heads[head]
print(attention.shape)
in_tokens = tf.convert_to_tensor([sentence])
in_tokens = tokenizers.pt.tokenize(in_tokens).to_tensor()
in_tokens = tokenizers.pt.lookup(in_tokens)[0]
print(in_tokens)
print(translated_tokens)
plot_attention_head(in_tokens, translated_tokens, attention)
def plot_attention_weights(sentence, translated_tokens, attention_heads):
in_tokens = tf.convert_to_tensor([sentence])
in_tokens = tokenizers.pt.tokenize(in_tokens).to_tensor()
in_tokens = tokenizers.pt.lookup(in_tokens)[0]
in_tokens
fig = plt.figure(figsize=(16, 8))
for h, head in enumerate(attention_heads):
ax = fig.add_subplot(2, 4, h+1)
plot_attention_head(in_tokens, translated_tokens, head)
ax.set_xlabel(f'Head {h+1}')
plt.tight_layout()
plt.show()
plot_attention_weights(sentence, translated_tokens,
attention_weights['decoder_layer4_block2'][0])
sentence = "Eu li sobre triceratops na enciclopédia."
ground_truth = "I read about triceratops in the encyclopedia."
translated_text, translated_tokens, attention_weights = evaluate(sentence)
print_translation(sentence, translated_text, ground_truth)
plot_attention_weights(sentence, translated_tokens,
attention_weights['decoder_layer4_block2'][0]) |
994,712 | d8e1379c43a70b9c2ec4a5046c71d0fbb2968c07 | from .sortingcomparison import SortingComparison, MappedSortingExtractor, compute_performance, confusion_matrix
from .multisortingcomparison import MultiSortingComparison
|
994,713 | c71123b60fd7098fa8c550a724b177f23b72b8d6 | import numpy as np
import numpy.random as rd
def int_plus(params):
num = params['num']
if num == 0:
return False
low = params['low']
high = params['high']
maximum = params['maximum']
if low > high:
raise Exception('[Integer Plus] high must be lager than low.')
if low == high:
raise Exception('[Integer Plus] it is meaningless if low equals high.')
if maximum < high or maximum < low*2:
raise Exception('[Integer Plus] too small maximum.')
problem_list = []
ans_list = []
for idx in range(1,1+num):
while True:
num1 = rd.randint(low,high)
num2 = rd.randint(low,high)
if num1 + num2 > maximum:
continue
else:
if 0 < num < 100:
head = '[P%02d]'%idx
elif 100 <= num < 1000:
head = '[P%03d]'%idx
else:
head = '[P%d]'%idx
item1 = '%d'%num1
item2 = '%d'%num2
if num1 < 0:
item1 = '(%s)'%item1
if num2 < 0:
item2 = '(%s)'%item2
problem_list.append('%s %s + %s = \n'%(head,item1,item2))
ans_list.append('%s %d\n'%(head,num1+num2))
break
return problem_list, ans_list
def int_subtract(params):
num = params['num']
if num == 0:
return False
low = params['low']
high = params['high']
neg = params['neg']
if neg == 'False' or neg == 'false':
neg = False
if low > high:
raise Exception('[Integer Subtract] high must be lager than low.')
if low == high:
raise Exception('[Integer Subtract] it is meaningless if low equals high.')
if neg is False:
if low < 0:
raise Exception('[Integer Subtract] negative low while neg is false.')
if high < 0:
raise Exception('[Integer Subtract] negative high while neg is false.')
problem_list = []
ans_list = []
for idx in range(1,1+num):
num1 = rd.randint(low,high)
num2 = rd.randint(low,high)
if neg is False and num1 < num2:
num1, num2 = num2, num1
if 0 < num < 100:
head = '[S%02d]'%idx
elif 100 <= num < 1000:
head = '[S%03d]'%idx
else:
head = '[S%d]'%idx
item1 = '%d'%num1
item2 = '%d'%num2
if num1 < 0:
item1 = '(%s)'%item1
if num2 < 0:
item2 = '(%s)'%item2
problem_list.append('%s %s - %s = \n'%(head,item1,item2))
ans_list.append('%s %d\n'%(head,num1-num2))
return problem_list, ans_list
def int_multiply(params):
num = params['num']
if num == 0:
return False
low = params['low']
high = params['high']
maximum = params['maximum']
marker = params['marker']
if low > high:
raise Exception('[Integer Multiply] high must be lager than low.')
if low == high:
raise Exception('[Integer Multiply] it is meaningless if low equals high.')
if maximum < high or maximum < low*low:
raise Exception('[Integer Multiply] too small maximum.')
problem_list = []
ans_list = []
for idx in range(1,1+num):
while True:
num1 = rd.randint(low,high)
num2 = rd.randint(low,high)
if num1 * num2 > maximum:
continue
else:
if 0 < num < 100:
head = '[M%02d]'%idx
elif 100 <= num < 1000:
head = '[M%03d]'%idx
else:
head = '[M%d]'%idx
item1 = '%d'%num1
item2 = '%d'%num2
if num1 < 0:
item1 = '(%s)'%item1
if num2 < 0:
item2 = '(%s)'%item2
problem_list.append('%s %s %s %s = \n'%(head,item1,marker,item2))
ans_list.append('%s %d\n'%(head,num1*num2))
break
return problem_list, ans_list
def int_divide(params):
num = params['num']
if num == 0:
return False
maximum = params['maximum']
maximum_divisor = params['maximum_divisor']
marker = params['marker']
#decimal = params['decimal']
mod_ = params['mod']
if maximum <= 3:
raise Exception('[Integer Divide] maximum must be larger than 3.')
problem_list = []
ans_list = []
for idx in range(1,1+num):
while True:
num1 = rd.randint(2,maximum)
num2 = rd.randint(2,maximum_divisor)
if mod_ is True or mod_ == 'True' or mod_ == 'true':
res = num1 // num2
if rd.randint(2) == 1:#p = 1/2
res, num2 = num2, res
offset = num1 - res*num2
if offset == 0 or offset >= num2:
continue
else:
tail_problem = '%d %s %d = \n'%(num1,marker,num2)
tail_ans = '%d ... %d\n'%(res,offset)
elif mod_ is False or mod_ == 'False' or mod_ == 'false':
res = num1 // num2
num1 = res * num2
if rd.randint(2) == 1:#p = 1/2
res, num2 = num2, res
tail_problem = '%d %s %d = \n'%(num1,marker,num2)
tail_ans = '%d\n'%(res)
else:
raise Exception('[Integer Divide] invalid mod.')
if 0 < num < 100:
head = '[D%02d]'%idx
elif 100 <= num < 1000:
head = '[D%03d]'%idx
else:
head = '[D%d]'%idx
problem_list.append('%s %s'%(head,tail_problem))
ans_list.append('%s %s'%(head,tail_ans))
break
return problem_list, ans_list
|
994,714 | 395374bcebdc4102dd90dd286dc8e560fd54463f | from django.urls import path
from django.contrib.auth.views import (LoginView, LogoutView,
PasswordResetView,
PasswordResetConfirmView,
PasswordResetDoneView,
PasswordResetCompleteView
)
from users.views import registeruser, userProfile
urlpatterns = [
path('register/', registeruser, name="register"),
path('profile/', userProfile, name="profile"),
path('login/', LoginView.as_view(template_name="user/login.html"), name="login"),
path('logout/', LogoutView.as_view(template_name="user/log_out.html"), name="logout"),
path('password-reset/', PasswordResetView.as_view(
template_name="user/password_reset_form.html"), name="password_reset"),
path('password-reset/done/', PasswordResetDoneView.as_view(
template_name="user/password_reset_done.html"), name="password_reset_done"),
path('password-reset-confirm/<uidb64>/<token>/', PasswordResetConfirmView.as_view(
template_name="user/password_reset_confirm.html"), name="password_reset_confirm"),
path('password-reset-complete/', PasswordResetCompleteView.as_view(
template_name="user/password_reset_complete.html"), name="password_reset_complete"),
]
|
994,715 | b847126fa05e82ea69ce8d0bf82305679bdc772e | import pandas as pd
import numpy as np
import helpers
train_data=helpers.train_data
test_data=helpers.test_data
pd.set_option('display.max_rows',train_data.shape[0]+1)
pd.set_option('display.max_columns',train_data.shape[1]+1)
# data shapes
#print(train_data.shape)
satisfied_number=len(train_data.where(train_data['satisfaction'] == 'satisfied').dropna())
dis_neur_number=len(train_data.where(train_data['satisfaction'] == 'neutral or dissatisfied').dropna())
#print(f"satisfied : {satisfied_number} , neural or disatisfied : {dis_neur_number}")
# ploting
# understanding data
data=pd.concat([train_data,test_data])
#print(data.describe())
#print(data.shape)
# spliting data function (x2)
def split_data(donne):
y = donne.get('satisfaction')
y=[1 if e == 'satisfied' else 0 for e in y ]
y=np.asarray(y)
donne["Arrival Delay in Minutes"] = donne["Arrival Delay in Minutes"].fillna(np.mean(donne["Arrival Delay in Minutes"]))
x_temp=donne.iloc[:,2:-1]
x=pd.get_dummies(x_temp)
return x, y
X_train_data,Y_train_data=split_data(train_data)
X_test_data,Y_test_data=split_data(test_data)
|
994,716 | ffd154d9f3ec999bc5433404fb4a18bf9d9344a5 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Any, Bool
from pychron.envisage.tasks.base_editor import BaseTraitsEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
class LaserEditor(BaseTraitsEditor):
component = Any
_execute_thread = None
was_executed = False
_laser_manager = Any
completed = Bool(False)
def stop(self):
pass
def do_execute(self, lm):
self.completed = False
self._laser_manager = lm
return self._do_execute()
def _do_execute(self):
pass
def block(self):
if self._execute_thread:
self._execute_thread.join()
self.completed = True
# ============= EOF =============================================
|
994,717 | f4baecc1c0b3682df5e768b3845ed2e7b62f1044 | from bigO import BigO
from random import randint
from sorting_algorithms import *
lib = BigO()
complexity = lib.test(bucketsort, "random")
# complexity = lib.test(countsort_neg, "sorted")
# complexity = lib.test(countsort_neg, "reversed")
# complexity = lib.test(countsort_neg, "partial")
# complexity = lib.test(countsort_neg, "Ksorted")
# lib.test_all(countsort) |
994,718 | 8f9078ead1a42e260c505fe177d86acd53f41d54 | print('a) Apresente a imagem e as informações de número de linhas e colunas; número de canais e número total de pixels;')
import cv2
import numpy as np
nome_arquivo = "arroz.png"
imgbgr = cv2.imread(nome_arquivo,1) # Carrega imagem (0 - Binária e Escala de Cinza; 1 - Colorida (BGR))
img_arroz = cv2.cvtColor(imgbgr,cv2.COLOR_BGR2RGB) #transformar em rgb
lin, col, canais = np.shape(img_arroz)
print('Tipo: ',img_arroz.dtype)
print('Número de linhas: ' + str(lin))
print('Número de colunas: ' + str(col))
print('Número de canais: ' + str(canais))
print('Dimensão:' + str (lin) + 'x' + str (col))
print('Portanto, número de pixels é: ' + str(lin*col))
print(img_arroz) #matriz da imagem #cada posição representa um pixel e recebe um valor
#Imagem
from matplotlib import pyplot as plt
import os
plt.figure('Imagem arroz')
plt.imshow(img_arroz,cmap="gray") # https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
plt.title("Imagem arroz")
plt.show()
print('-'*100)
print('b)Faça um recorte da imagem para obter somente a área de interesse. Utilize esta imagem para a solução das próximas alternativas;')
# Recortar uma imagem
img_recorte = img_arroz[109:370,160:433]
print('INFORMAÇÕES IMAGEM RECORTE')
lin_r, col_r, canais_r = np.shape(img_recorte)
print('Dimensão: ' + str(lin_r) +' x '+ str(col_r))
print('Número de canais :' + str(canais_r))
plt.figure('Imagem Recortada')
fig = plt.figure('Imagem Recortada')
plt.imshow(img_recorte,cmap="gray") # https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
plt.title("Imagem recortada")
plt.xticks([]) # Eliminar o eixo X
plt.yticks([]) # Eliminar o eixo Y
plt.show()
#nome = 'folha_recortada'
#fig.savefig((nome+'.png'), bbox_inches="tight")
#os.startfile(nome+'.png')
plt.figure('Comparação sem e com recorte')
plt.subplot(1,2,1)
plt.imshow(img_arroz) # https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
plt.title("Sem recorte")
plt.colorbar(orientation = 'horizontal')
plt.subplot(1,2,2)
plt.imshow(img_recorte) # https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
plt.title("Com recorte")
plt.colorbar(orientation = 'horizontal')
plt.show()
#nome = 'Comparação sem e com recorte'
#fig.savefig((nome+'.png'), bbox_inches="tight")
#os.startfile(nome+'.png')
print('-'*100)
print('c)Converta a imagem colorida para uma de escala de cinza (intensidade) e a apresente utilizando os mapas de cores “Escala de Cinza” e “JET”;')
# Apresentar imagens no matplotlib
img_arrozcinza = cv2.cvtColor(img_recorte,cv2.COLOR_BGR2GRAY)
print(img_arrozcinza)
#nome = 'img_arrozcinza'
#fig.savefig((nome+'.png'), bbox_inches="tight")
#os.startfile(nome+'.png')
plt.figure('Imagens')
plt.subplot(1,2,1)
plt.imshow(img_arrozcinza,cmap="gray") # https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
#plt.xticks([]) # Eliminar o eixo X
#plt.yticks([]) # Eliminar o eixo Y
plt.title("Escala de Cinza")
plt.colorbar(orientation = 'horizontal')
plt.subplot(1,2,2)
plt.imshow(img_arrozcinza,cmap="jet") # https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
#plt.xticks([]) # Eliminar o eixo X
#plt.yticks([]) # Eliminar o eixo Y
plt.title("JET")
plt.colorbar(orientation = 'horizontal')
plt.show()
print('-'*100)
print('d) Apresente a imagem em escala de cinza e o seu respectivo histograma; Relacione o histograma e a imagem')
histograma = cv2.calcHist([img_arrozcinza],[0],None,[256],[0,256])
dim = len(histograma)
print ('Dimensão do histograma: ' + str(dim))
plt.figure('Imagens')
plt.subplot(1,2,1)
plt.imshow(img_arrozcinza,cmap="gray")
plt.title("Escala de Cinza")
plt.subplot(1,2,2)
plt.plot(histograma,color = 'black')
plt.title("Histograma")
plt.xlabel("Valores de pixels")
plt.ylabel("Número de pixels")
plt.show()
dimen = len(histograma)
print('-'*100)
print('e) Utilizando a imagem em escala de cinza (intensidade) realize a segmentação da imagem de modo a remover o fundo da imagem utilizando um limiar manual e o limiar obtido pela técnica de Otsu. Nesta questão apresente o histograma com marcação dos limiares utilizados, a imagem limiarizada (binarizada) e a imagem colorida final obtida da segmentação. Explique os resultados')
#imagens utilizadas
imgbgr = cv2.imread(nome_arquivo,1) # Carrega imagem (0 - Binária e Escala de Cinza; 1 - Colorida (BGR))
img_arroz = cv2.cvtColor(imgbgr,cv2.COLOR_BGR2RGB)
histograma = cv2.calcHist([img_arrozcinza],[0],None,[256],[0,256])
# Histograma da imagem em escala de cinza
histo_cinza = cv2.calcHist([img_arrozcinza],[0], None, [256], [0, 256])
# Limiarização manual (Thresholding)
valor_limiar = 130 # Valor do limiar
(L, img_limiar) = cv2.threshold(img_arrozcinza, valor_limiar, 255, cv2.THRESH_BINARY)
(LI, img_limiar_invertida) = cv2.threshold(img_arrozcinza, valor_limiar, 255, cv2.THRESH_BINARY_INV)
#invertida: abaixo do limiar recebe 255
# Limiarização (Thresholding) da imagem e escala de cinza pela técnica de Otsu
(LO, img_otsu) = cv2.threshold(img_arrozcinza, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Apresentando as imagens
plt.figure('Limiarização e')
plt.subplot(3,3,1)
plt.imshow(img_recorte) # Plota a imagem em RGB
plt.xticks([])
plt.yticks([])
plt.title('Imagem em RGB')
plt.subplot(3,3,2)
plt.imshow(img_arrozcinza, cmap="gray")
plt.xticks([])
plt.yticks([])
plt.title('Imagem em escala de cinza')
plt.subplot(3,3,3)
plt.plot(histo_cinza, color="black")
plt.axvline(x=valor_limiar, color="red") # Coloca uma reta vertical do limiar
plt.title('Histograma em escala de cinza',)
plt.xlim([0, 256]) # O eixo X vai variar de 0 a 256
plt.xlabel('Valores de pixels')
plt.ylabel('Número de pixels')
# Imagem limiarizada manualmente e seu histograma
plt.subplot(3, 3, 4)
plt.imshow(img_limiar) # Plota a imagem binária
plt.title('Imagem binária')
plt.subplot(3, 3, 5)
plt.imshow(img_limiar_invertida) # Plota a imagem binária invertida
plt.title ('Imagem binária invertida')
# Imagem obtida pela tecnica de OTSU e seu histograma
plt.subplot(3, 3, 6)
plt.imshow(img_otsu)
plt.title('Imagem OTSU')
plt.subplot(3, 3, 7)
plt.plot(histo_cinza, color="black")
plt.axvline(x=LO, color="red") # Coloca uma linha vermelha no histograma para marcar o limiar
plt.title('Histograma - OTSU')
plt.xlim([0, 256])
plt.xlabel('Valores de pixels')
plt.ylabel('Número de pixels')
plt.show()
print('-' * 100)
print('f)Apresente uma figura contendo a imagem selecionada nos sistemas RGB, Lab, HSV e YCrCb.')
imgLAB = cv2.cvtColor(img_recorte, cv2.COLOR_BGR2Lab) # Para Lab
imgHSV = cv2.cvtColor(img_recorte, cv2.COLOR_BGR2HSV) # Para HSV
imgYCR = cv2.cvtColor(img_recorte, cv2.COLOR_BGR2YCrCb) #Para YCrCb
plt.figure('Imagens em RGB, Lab, HSV, YcrCb')
plt.subplot(2, 2, 1)
plt.imshow(img_recorte)
plt.xticks([])
plt.yticks([])
plt.title('Imagem do arroz em RGB')
plt.subplot(2, 2, 2)
plt.imshow(imgLAB)
plt.xticks([])
plt.yticks([])
plt.title('Imagem do arroz em Lab')
plt.subplot(2, 2, 3)
plt.imshow(imgHSV)
plt.xticks([])
plt.yticks([])
plt.title('Imagem do arroz em HSV')
plt.subplot(2, 2, 4)
plt.imshow(imgYCR)
plt.xticks([])
plt.yticks([])
plt.title('Imagem do arroz em YCrCb')
plt.show()
print('g)Apresente uma figura para cada um dos sistemas de cores (RGB, HSV, Lab e YCrCb) contendo a imagem de cada um dos canais e seus respectivos histogramas.')
hist_red = cv2.calcHist([img_recorte], [0], None, [256], [0,256])
hist_green = cv2.calcHist([img_recorte], [1], None, [256], [0,256])
hist_blue = cv2.calcHist([img_recorte], [2], None, [256], [0,256])
plt.figure('Sistemas de cores RGB')
plt.subplot(3, 4, 1)
plt.imshow(img_recorte) # Plotando a imagem em RGB
plt.xticks([]) # Eliminar o eixo X
plt.yticks([]) # Eliminar o eixo Y
plt.title('Imagem RGB')
plt.subplot(3, 4, 2)
plt.imshow(img_recorte[:, :, 0])
plt.xticks([])
plt.yticks([])
plt.title('Canal Red - Vermelho')
plt.subplot(3, 4, 3)
plt.imshow(img_recorte[:, :, 1])
plt.xticks([])
plt.yticks([])
plt.title('Canal Green - Verde')
plt.subplot(3, 4, 4)
plt.imshow(img_recorte[:, :, 2]) # Obtendo a imagem do canal B "azul"
plt.xticks([])
plt.yticks([])
plt.title('Canal Blue - Azul')
plt.subplot(3, 4, 5)
plt.plot(hist_red, color="red") # Obtendo o histograma do canal R "vermelho"
plt.title("Histograma - Red")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.subplot(3, 4, 6)
plt.plot(hist_green, color="green") # Obtendo o histograma do canal Green "verde"
plt.title("Histograma - Green")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.subplot(3, 4, 7)
plt.plot(hist_blue, color="blue") # Obtendo o histograma do canal Blue "azul"
plt.title("Histograma - Blue")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.show()
####Lab
hist_cL = cv2.calcHist([imgLAB], [0], None, [256], [0,256])
hist_cA = cv2.calcHist([imgLAB], [1], None, [256], [0,256])
hist_cB = cv2.calcHist([imgLAB], [2], None, [256], [0,256])
plt.figure('Lab')
plt.subplot(3, 4, 1)
plt.imshow(imgLAB) # Plotando a imagem em Lab
plt.xticks([])
plt.yticks([])
plt.title('Imagem em Lab')
plt.subplot(3, 4, 2)
plt.imshow(imgLAB[:, :, 0]) # Obtendo a imagem do canal L
plt.xticks([])
plt.yticks([])
plt.title('Canal L')
plt.subplot(3, 4, 3)
plt.imshow(imgLAB[:, :, 1]) # Obtendo a imagem do canal a
plt.xticks([])
plt.yticks([])
plt.title('Canal A')
plt.subplot(3, 4, 4)
plt.imshow(imgLAB[:, :, 2]) # Obtendo a imagem do canal b
plt.xticks([]) # Eliminar o eixo X
plt.yticks([]) # Eliminar o eixo Y
plt.title('Canal B')
plt.subplot(3, 4, 5)
plt.plot(hist_cL, color="black") # Obtendo o histograma do canal L
plt.title("Histograma do canal L")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.subplot(3, 4, 6)
plt.plot(hist_cA, color="black") # Obtendo o histograma do canal a
plt.title("Histograma - a")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.subplot(3, 4, 7)
plt.plot(hist_cB, color="black") # Obtendo o histograma do canal b
plt.title("Histograma - b")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.show()
#HSV
hist_H = cv2.calcHist([imgHSV], [0], None, [256], [0,256])
hist_S = cv2.calcHist([imgHSV], [1], None, [256], [0,256])
hist_V = cv2.calcHist([imgHSV], [2], None, [256], [0,256])
plt.figure('HSV')
plt.subplot(3, 4, 1)
plt.imshow(imgHSV)
plt.xticks([])
plt.yticks([])
plt.title('Imagem HSV')
plt.subplot(3, 4, 2)
plt.imshow(imgHSV[:, :, 0])
plt.xticks([])
plt.yticks([])
plt.title('Canal H')
plt.subplot(3, 4, 3)
plt.imshow(imgHSV[:, :, 1]) # Obtendo a imagem do canal S
plt.xticks([])
plt.yticks([])
plt.title('Canal S')
plt.subplot(3, 4, 4)
plt.imshow(imgHSV[:, :, 2])
plt.xticks([])
plt.yticks([])
plt.title('Canal V')
plt.subplot(3, 4, 5)
plt.plot(hist_H, color="black")
plt.title("Histograma - H")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.subplot(3, 4, 6)
plt.plot(hist_S, color="black")
plt.title("Histograma - S")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.subplot(3, 4, 7)
plt.plot(hist_V, color="black")
plt.title("Histograma - V")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.show()
#YCrCb
hist_Y = cv2.calcHist([imgYCR], [0], None, [256], [0, 256])
hist_Cr = cv2.calcHist([imgYCR], [1], None, [256], [0, 256])
hist_Cb = cv2.calcHist([imgYCR], [2], None, [256], [0, 256])
plt.figure('YCrCb')
plt.subplot(3, 4, 1)
plt.imshow(imgYCR) # Plotando a imagem em YCrCb
plt.xticks([])
plt.yticks([])
plt.title('Imagem YCrCb')
plt.subplot(3, 4, 2)
plt.imshow(imgYCR[:, :, 0]) # Obtendo a imagem do canal Y
plt.xticks([])
plt.yticks([])
plt.title('Canal Y')
plt.subplot(3, 4, 3)
plt.imshow(imgYCR[:, :, 1]) # Obtendo a imagem do canal Cr
plt.xticks([])
plt.yticks([])
plt.title('Canal Cr')
plt.subplot(3, 4, 4)
plt.imshow(imgYCR[:, :, 2]) # Obtendo a imagem do canal Cb
plt.xticks([])
plt.yticks([])
plt.title('Canal Cb')
plt.subplot(3, 4, 5)
plt.plot(hist_Y, color="black") # Obtendo o histograma do canal Y
plt.title("Histograma - Y")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.subplot(3, 4, 6)
plt.plot(hist_Cr, color="black") # Obtendo o histograma do canal Cr
plt.title("Histograma - Cr")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.subplot(3, 4, 7)
plt.plot(hist_Cb, color="black")
plt.title("Histograma - Cb")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.show()
print('-'*100)
print('h) Encontre o sistema de cor e o respectivo canal que propicie melhor segmentação da imagem de modo a remover o fundo a imagem utilizando limiar manual e limiar obtido pela técnica de Otsu. Nesta questão apresente o histograma com marcação dos limiares utilizados, a imagem limiarizada (binarizada) e a imagem colorida final obtida da segmentação.Explique os resultados e sua escolha pelo sistema de cor e canal utilizado na segmentação. Nesta questão apresente a imagem limiarizada (binarizada) e a imagem colorida final obtida da segmentação.')
r,g,b = cv2.split(img_recorte)
hist_r = cv2.calcHist([r],[0], None, [256],[0,256])
# Limiarização manual (Thresholding)
vl = 140 # Valor do limiar
(LL, img_limiar2) = cv2.threshold(r, vl, 255, cv2.THRESH_BINARY)
# Limiarização (Thresholding) da imagem pela técnica de Otsu
(LOO, img_otsu2) = cv2.threshold(r, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
img_segmentada_r = cv2.bitwise_and(img_recorte,img_recorte,mask=img_limiar2)
img_segmentada_rmanual = cv2.bitwise_and(img_recorte,img_recorte,mask=img_otsu2)
plt.figure('Letra g')
plt.subplot(4,4,1)
plt.imshow(img_recorte)
plt.title('RGB')
plt.xticks([])
plt.yticks([])
plt.subplot(4,4,2)
plt.imshow(r,cmap='gray')
plt.title('RGB - r')
plt.xticks([])
plt.yticks([])
plt.subplot(4,4,3)
plt.plot(hist_r,color = 'black')
plt.axvline(x=LL,color = 'r')
plt.title("Histograma - Manual")
plt.xlim([0,256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.subplot(4,4,4)
plt.plot(hist_r,color = 'black')
plt.axvline(x=LOO,color = 'r')
plt.title("Histograma - Otsu")
plt.xlim([0,256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.subplot(4,4,5)
plt.imshow(img_limiar,cmap='gray')
plt.title('Limiar: ' + str(L))
plt.xticks([])
plt.yticks([])
plt.subplot(4,4,6)
plt.imshow(img_segmentada_r)
plt.title('Imagem segmentada (OTSU- canal r)')
plt.xticks([])
plt.yticks([])
plt.subplot(4,4,7)
plt.imshow(img_segmentada_rmanual)
plt.title('Imagem segmentada (MANUAL- canal r)')
plt.xticks([])
plt.yticks([])
plt.show()
print('i) Obtenha o histograma de cada um dos canais da imagem em RGB, utilizando como mascara a imagem limiarizada (binarizada) da letra h.')
hist_red = cv2.calcHist([img_segmentada_r], [0], img_limiar2, [256], [0,256])
hist_green = cv2.calcHist([img_segmentada_r], [1], img_limiar2, [256], [0,256])
hist_blue = cv2.calcHist([img_segmentada_r], [2], img_limiar2, [256], [0,256])
plt.figure('Sistemas de cores RGB')
plt.subplot(3, 4, 5)
plt.plot(hist_red, color="red") # Obtendo o histograma do canal R "vermelho"
plt.title("Histograma - Red")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.subplot(3, 4, 6)
plt.plot(hist_green, color="green") # Obtendo o histograma do canal Green "verde"
plt.title("Histograma - Green")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.subplot(3, 4, 7)
plt.plot(hist_blue, color="blue") # Obtendo o histograma do canal Blue "azul"
plt.title("Histograma - Blue")
plt.xlim([0, 256])
plt.xlabel("Valores Pixels")
plt.ylabel("Número de Pixels")
plt.show()
print('j) Realize operações aritméticas na imagem em RGB de modo a realçar os aspectos de seu interesse. Exemplo (2*R-0.5*G')
# Operações nos canais da imagem
img_j = 1.7*img_arroz[:, :, 0] - 1.2 * img_arroz[:, :, 1]
# Converção da variavel para inteiro de 8 bits
img_j2 = img_j.astype(np.uint8)
# Histograma
histj = cv2.calcHist([img_j2], [0], None, [256], [0, 256])
# Limiarização de Otsu
(LLL, img_otsu3) = cv2.threshold(img_j2, 0, 256, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Segmentação da imagem com mascara
img_segmentadaj = cv2.bitwise_and(img_arroz, img_arroz, mask=img_otsu3)
# Apresentando a imagem
plt.figure('Imagens letra J')
plt.subplot(2, 3, 1)
plt.imshow(img_arroz, cmap='gray')
plt.title('RGB')
plt.subplot(2, 3, 2)
plt.imshow(img_j2, cmap='gray')
plt.title('B - 1,2*G')
plt.subplot(2, 3, 3)
plt.plot(histj, color='black')
# plt.axline(x=LLL color='black')
plt.xlim([0, 256])
plt.xlabel('Valores de pixels')
plt.xlabel('Número de pixels')
plt.subplot(2, 3, 4)
plt.imshow(img_otsu3, cmap='gray')
plt.title('Imagem binária')
plt.subplot(2, 3, 5)
plt.imshow(img_segmentadaj, cmap='gray')
plt.title('Imagem segmentada com mascara')
plt.xticks([])
plt.yticks([])
plt.show()
|
994,719 | 2e7490ea57f6dfd6d4c86536cba8d289215638cb | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, BooleanField, SubmitField, TimeField, FileField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, Email, Length, NumberRange
class formCentros(FlaskForm):
class Meta:
csrf = False
nombre = StringField("Nombre Centro", validators=[DataRequired(), Length(max=255)])
direccion = StringField("Dirección Centro", validators=[DataRequired(), Length(max=255)])
municipio = StringField("Municipio Centro",validators=[DataRequired(),Length(max=255)])
telefono = StringField("Teléfono Centro", validators=[DataRequired(), Length(max=20)])
hora_apertura = TimeField("Hora de Apertura", validators=[DataRequired()])
hora_cierre = TimeField("Hora de Cierre", validators=[DataRequired()])
tipo = StringField("Tipo de Centro", validators=[DataRequired()])
sitio_web = StringField("Sitio Web de Centro", validators=[DataRequired(), Length(max=60)])
email = EmailField("Email de Centro", validators=[DataRequired(),Email(), Length(max=255)])
estado = BooleanField("Centro Habilitado")
path_pdf = StringField("PDF Visita",validators=[DataRequired(),Length(max=400)])
|
994,720 | f29ff142307b12cbd56b163d9afa1668ab97e5b5 | from skiplistlist import SkiplistList
lista = SkiplistList()
for i in range(100):
lista.add(i, i)
print(lista)
print("Altura: ", lista.h) #Será a maior altura possivel, se não for a mesma que a altura na nova lista ou da antiga-modificada algo deu errado
segunda = lista.truncate(50)
print("Segunda lista:", segunda, " de altura: ", segunda.h, " e tamanho: ", segunda.n)
print("Antiga Lista: ",lista, " de altura: ", lista.h, " e tamanho: ", lista.n)
|
994,721 | 0a94b3a400baaab6689ed3e412344d9315f42516 | import web
urls = (
'/', 'app.controllers.index.Index',
'/docker', 'app.controllers.docker.Docker',
'/ubuntu', 'app.controllers.ubuntu.Ubuntu',
)
app = web.application(urls, globals())
if __name__ == "__main__":
app.run()
|
994,722 | 15d1b50f14fc4a6189fc99c2d70db9a74ad69d9f | # 檔名:main.py
# 功能:主程式、監聽訊息、鏈結加入其他功能
# TODO:實作 unload, reload
import discord
from discord.ext import commands
import os
# Bot object、設定指令開頭
bot = commands.Bot(command_prefix='$')
token = os.getenv("ODQ4NDQ0NjU1MDQwNzI1MDEz.YLMtqQ.bGlL-_sXphITfCNFe38O9BHaIGg")
# 從 token.txt 中讀取 token
# 使用 os.path.join() 在不同作業系統會以 / 或是 \ 連接路徑
# 從 extensions.txt 中讀取現有功能,並加入那些功能
with open(os.path.join("..", "info", "extensions.txt"), 'r') as f:
for extension in f:
# 加入功能 (直接使用 Bot method: load_extension)
bot.load_extension(extension.strip('\n'))
# 一開始準備就緒時會觸發
@bot.event
async def on_ready():
print("Ready!")
# 印出 bot 這個 user 的資訊
print("User name:", bot.user.name)
print("User ID:", bot.user.id)
# 監聽訊息,有訊息時會觸發
@bot.event
async def on_message(message):
# 檢查訊息是否是 bot 自己傳的
if message.author.id == bot.user.id:
return
# 回應有 hello 的訊息
if "hello" in message.content.lower():
await message.channel.send("Hello~ Nice to meet you.") # Bot 傳送訊息
# 回應 help 開頭的訊息
if message.content.lower().startswith("help"):
await message.channel.send("Enter commands starting with $ or enter $help for more information:)")
# 加這行才可以用 commands
await bot.process_commands(message)
# load extension 加入功能
# 使用者輸入 $load 時會觸發
@bot.command(help = "Load extension.", brief = "Load extension.")
async def load(ctx, extension): # extension: 使用者輸入要加入的功能
try:
bot.load_extension(extension.lower()) # load extension, lower() 因為檔名是小寫
await ctx.send(f"{extension} loaded.") # Bot 傳送訊息
except Exception as e:
await ctx.send(e) # 若加入失敗印出錯誤訊息
# unload extension 卸載功能
@bot.command(help = "Un-load extension.", brief = "Un-load extension.")
async def unload(ctx, extension):
try:
bot.unload_extension(extension.lower()) # load extension, lower() 因為檔名是小寫
await ctx.send(f"{extension} unloaded.") # Bot 傳送訊息
except Exception as e:
await ctx.send(e )
#################################################################
# TODO: 實作 unload extension
# 分類: 上課練習
# HINT: 參考上方 load extension,Bot 有內建 method: unload_extension
#################################################################
# reload extension 重新加入功能
@bot.command(help = "Re-load extension.", brief = "Re-load extension.")
async def reload(ctx, extension):
try:
bot.reload_extension(extension.lower()) # load extension, lower() 因為檔名是小寫
await ctx.send(f"{extension} reloaded.") # Bot 傳送訊息
except Exception as e:
await ctx.send(e )
#################################################################
# TODO: 實作 reload extension
# 分類: 上課練習
# HINT: 參考上方 load extension,Bot 有內建 method: reload_extension
#################################################################
bot.run(token) # 執行
|
994,723 | c5d85abdcde160ce10c287d6042742e05ae0951d | # 表单信息
from flask_wtf import FlaskForm
# 表单由若干输入字段组成,每种字段用一种类接收,组成一种类,每种字段作为组合类的一种属性;
from wtforms import StringField, PasswordField, BooleanField, IntegerField, TextAreaField, SubmitField, MultipleFileField ,SelectField,RadioField
from wtforms.validators import DataRequired, Length, ValidationError, Email,InputRequired,NumberRange
# 登录
# 登录界面的登录登录字段组合形成的类
class LoginForm(FlaskForm):
account=StringField('Account',validators=[Length(10,10,message=u'长度为10位')]) # 文本字段
password=PasswordField('Password',validators=[Length(6,16,message=u'长度为6~16位')]) # 密码字段
submit=SubmitField('Log in') #提交按钮字段
# 登录界面的注册字段
class RegisterForm(FlaskForm):
password=PasswordField('Password',validators=[Length(6,16,message=u'长度为6~16位')]) # 密码字段
name=StringField('Username',validators=[DataRequired(message=u'名字不能为空')])
phonenumber=StringField('Telephone',validators=[Length(11,11,message=u'电话号码长度为11位')])
sex=SelectField('Sex',
validators=[DataRequired()],
choices=[('男','男'),('女','女')]
)
idcard=StringField('IDCard',validators=[DataRequired(message=u'身份证号不能为空')])
email=StringField('Email',validators=[Email(message=u'填写正确的Email格式')])
regist=SubmitField('Regist')
# 注册返回界面字段
class ShowAccount(FlaskForm):
BACK=SubmitField('back')
class AddRelation(FlaskForm):
account=StringField('Account',validators=[Length(10,10,message=u'长度为10位')])
T_or_S=RadioField('Relations',choices=[('teacher','teacher'),('student','student')],validators=[DataRequired()])
starttime=StringField('StartTime')
endtime=StringField('EndTime')
submit=SubmitField('Submit')
|
994,724 | 374d431b8006e1b763b3f9e25846d7a32b721a6e | import sys
import parameter
import socket
import loadingGif
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import *
class Base_Button(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, text)
self.colors = {True:'green', False:'orange'}
self.texts = {True: parameter.decision_str, False: parameter.modify_str}
self.now_start_button = True
self.setStyleSheet("background-color: green")
self.able_list = None
self.disable_list = None
self.base_opt = 'base'
self.setCheckable(True)
def set_state_start(self):
self.now_start_button = True
self.setStyleSheet("background-color: %s" % (self.colors[self.now_start_button]))
self.setText(self.texts[self.now_start_button])
self.base_opt = 'base'
def set_state_fix(self):
self.now_start_button = False
self.setStyleSheet("background-color: %s" % (self.colors[self.now_start_button]))
self.setText(self.texts[self.now_start_button])
self.base_opt = 'base_fix'
def custom_toggle(self):
self.now_start_button = not self.now_start_button
self.setStyleSheet("background-color: %s" % (self.colors[self.now_start_button]))
self.setText(self.texts[self.now_start_button])
if self.now_start_button:
self.base_opt = 'base'
else:
self.base_opt = 'base_fix'
def set_change_widget_list(self, disable_list, able_list):
self.able_list = able_list
self.disable_list = disable_list
def button_click(self, material:str, process:str, amount:str, sock):
"""
disable_list : disable widgets when button is start button (so, this widgets are able when button is modify button)
able_list : able widgets when button is start button (so, this widgets area disable when button is modify button)
"""
if self.now_start_button:
msg = (self.base_opt + ' ' + material + ' ' + process+ ' ' + amount)
else:
msg = self.base_opt
msg_byte = msg.encode()
sock.sendall(msg_byte)
recv_msg = sock.recv(1024).decode()
self.custom_toggle()
for elem in self.disable_list:
elem.setEnabled(not self.now_start_button)
for elem in self.able_list:
elem.setEnabled(self.now_start_button)
class Detail_Button(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, text)
self.colors = {True:'green', False:'orange'}
self.texts = {True: parameter.decision_str, False: parameter.modify_str}
self.now_start_button = True
self.setStyleSheet("background-color: green")
self.able_list = None
self.disable_list = None
self.base_opt = 'detail'
self.is_process_wokring = False
self.setCheckable(True)
def set_state_start(self): #using when clear UI
self.now_start_button = True
self.setStyleSheet("background-color: green")
self.setText(self.texts[self.now_start_button])
self.base_opt = 'detail'
self.is_process_wokring = False
def set_state_fix(self):
self.now_start_button = False
self.setStyleSheet("background-color: orange")
self.setText(self.texts[self.now_start_button])
self.base_opt = 'detail_fix'
self.is_process_wokring = True
def custom_toggle(self):
self.now_start_button = not self.now_start_button
self.setStyleSheet("background-color: %s" % (self.colors[self.now_start_button]))
self.setText(self.texts[self.now_start_button])
##test function (add base_opt = 'restart')
if self.now_start_button:
if self.is_process_wokring:
self.base_opt = 'restart'
else:
self.base_opt = 'detail'
else:
self.is_process_wokring = True
self.base_opt = 'detail_fix'
def set_change_widget_list(self, disable_list, able_list):
self.able_list = able_list
self.disable_list = disable_list
def button_click(self, gas:str, tempers:list, heattimes:list, staytimes:list, sock:socket, process_info:tuple):
count = len(tempers)
local_tempers = list(map(str, tempers))
local_heattimes = list(map(str, heattimes))
local_staytimes = list(map(str, staytimes))
heattime = ' '.join(local_heattimes)
staytime = ' '.join(local_staytimes)
temper = ' '.join(local_tempers)
if self.now_start_button:
msg = self.base_opt + ' ' + str(count) + ' ' + temper + ' ' + heattime + ' ' + staytime + ' ' + gas
else:
msg = self.base_opt
msg_byte = msg.encode()
sock.sendall(msg_byte)
loader = loadingGif.LoadingGif()
recv_msg = sock.recv(1024).decode()
loader.stopAnimation()
if self.now_start_button and not self.is_process_wokring:
process_info["id"], process_info["starttime"] = recv_msg.split('+')
self.custom_toggle()
for elem in self.disable_list:
elem.setEnabled(not self.now_start_button)
for elem in self.able_list:
elem.setEnabled(self.now_start_button) |
994,725 | cd353d3d76f925cf06d5c07d1a37108ff2e72588 | #!/usr/bin/env python3
#Copyright (C) 2011 by Glenn Hickey
#
#Released under the MIT license, see LICENSE.txt
""" Wrap a tree and add some simple partitioning functionality. note
that nameUnlabeledInternalNodes() and computeSubtreeRoots() need to be
called (in that order) for anything to work...
"""
import math
from sonLib.nxtree import NXTree
import networkx as nx
from networkx.algorithms.shortest_paths.weighted import dijkstra_path
class MultiCactusTree(NXTree):
self_suffix = "_self"
def __init__(self, tree = None):
if isinstance(tree, NXTree):
NXTree.__init__(self, tree.nxDg)
else:
NXTree.__init__(self, tree)
# ids of all subtree roots for fast checking
self.subtreeRoots = set()
# map of names to node ids
self.nameToId = dict()
for node in self.breadthFirstTraversal():
if self.hasName(node):
self.nameToId[self.getName(node)] = node
# fill in unlabeled node ids with a breadth-first
# traversal numbering from the root
def nameUnlabeledInternalNodes(self, prefix = "Anc", startIdx = 0):
# make sure we don't duplicate any pre-existing labels
existing_labels = set()
for node in self.breadthFirstTraversal():
if self.hasName(node):
existing_labels.add(self.getName(node))
count = startIdx
numInternal = 0
width = 0
for node in self.breadthFirstTraversal():
if self.isLeaf(node) is False:
numInternal += 1
if numInternal > 0:
width = int(math.log10(numInternal)) + 1
for node in self.breadthFirstTraversal():
if not self.isLeaf(node):
while not self.hasName(node) or not self.getName(node):
new_name = "%s%s" % (prefix, str(count).zfill(width))
if new_name not in existing_labels:
self.setName(node, new_name)
existing_labels.add(new_name)
count += 1
self.nameToId[self.getName(node)] = node
# identify roots of subclades in the tree and
# add them to the self.subtreeRoots set
def computeSubtreeRoots(self):
self.subtreeRoots = set(node for node in self.breadthFirstTraversal() if not self.isLeaf(node))
# blindly read in the roots from given list of names
def assignSubtreeRootNames(self, rootNames):
self.subtreeRoots = set()
for node in self.breadthFirstTraversal():
if self.getName(node) in rootNames:
self.subtreeRoots.add(node)
self.nameToId[self.getName(node)] = node
def getSubtreeRootNames(self):
return [self.getName(x) for x in self.subtreeRoots]
def getSubtreeRoots(self):
return self.subtreeRoots
# generate eall nodes beneath (and including) given
# root
def traverseSubtree(self, root, node):
yield node
if node == root or node not in self.subtreeRoots:
for child in self.getChildren(node):
for i in self.traverseSubtree(root, child):
yield i
# Extracts a tree spanning the nodes with the given names.
def extractSpanningTree(self, nodes):
assert len(nodes) > 1
nodeIds = [self.nameToId[name] for name in nodes]
paths = [dijkstra_path(self.nxDg.to_undirected(), source=nodeIds[0], target=x) for x in nodeIds[1:]]
nodesToInclude = set()
for path in paths:
for node in path:
nodesToInclude.add(node)
cpy = self.nxDg.subgraph(nodesToInclude).copy()
# Get rid of nodes that have only 1 children
graphWasModified = True
while graphWasModified:
graphWasModified = False
for node in nx.nodes(cpy):
if cpy.out_degree(node) == 1 and cpy.in_degree(node) == 1:
if node not in nodeIds:
# This is a spurious node in the species tree,
# we can and should remove
childEdge = list(cpy.out_edges(node, data=True))[0]
parentEdge = list(cpy.in_edges(node, data=True))[0]
child = childEdge[1]
childDist = childEdge[2]['weight']
parent = parentEdge[0]
assert parent != node
parentDist = parentEdge[2]['weight']
cpy.remove_node(node)
cpy.add_edge(parent, child, weight = childDist + parentDist)
graphWasModified = True
break
mcCpy = MultiCactusTree(cpy)
mcCpy.nameUnlabeledInternalNodes(prefix="thisPrefixShouldNeverAppear")
mcCpy.computeSubtreeRoots()
return mcCpy
# get names of children below this node
def getChildNames(self, name):
id = self.nameToId[name]
subtree = [i for i in self.traverseSubtree(id, id)]
# remove the root from the set of children
subtree.remove(id)
names = [self.getName(i) for i in subtree]
return names
# copy a subtree rooted at node with given name
def extractSubTree(self, name):
root = self.nameToId[name]
subtree = [i for i in self.traverseSubtree(root, root)]
cpy = self.nxDg.subgraph(subtree).copy()
mcCpy = MultiCactusTree(cpy)
mcCpy.assignSubtreeRootNames(self.getSubtreeRootNames())
return mcCpy
# find the root of the subtree containing the given node
# as leaf (slowly.. for nwo) (returns event name, not node id)
def getSubtreeRoot(self, name):
assert len(self.nameToId) > 0
node = self.nameToId[name]
if node == self.rootId:
return name
parent = self.getParent(node)
while parent is not None:
if parent in self.subtreeRoots:
return self.getName(parent)
parent = self.getParent(parent)
return None
# safe id to insert is current max + 1
def getNextIndex(self):
return sorted([i for i in self.breadthFirstTraversal()])[-1] + 1
# insert a new node above a specified node in the tree
def insertAbove(self, node, newNode, newName = "", newWeight= None):
parent = self.getParent(node)
if parent is not None:
oldWeight = self.getWeight(parent, node)
self.nxDg.remove_edge(parent, node)
self.nxDg.add_edge(parent, newNode)
if oldWeight is not None:
self.setWeight(parent, newNode, oldWeight)
else:
assert node == self.rootId
self.rootId = newNode
self.nxDg.add_node(newNode)
self.setName(newNode, newName)
self.nxDg.add_edge(newNode, node)
if newWeight is not None:
self.setWeight(newNode, node, newWeight)
if len(newName) > 0:
self.nameToId[newName] = newNode
# insert a node with id (name_self) directly above
# every node in the tree
# should be run after subtreeroots are computed (otherwise
# won't work
def addSelfEdges(self):
nextIndex = self.getNextIndex()
traversal = [i for i in self.breadthFirstTraversal()]
for node in traversal:
if (node in self.subtreeRoots or self.isLeaf(node)) and\
node != self.rootId:
newNode = nextIndex
nextIndex += 1
parent = self.getParent(node)
weight = None
if parent is not None:
weight = self.getWeight(parent, node)
assert weight is not None
assert self.self_suffix not in self.getName(node)
newName = self.getName(node) + self.self_suffix
self.insertAbove(node, newNode, newName, weight)
self.subtreeRoots.add(newNode)
# tack an outgroup onto the root
# if root is a leaf, we make a new root above.
def addOutgroup(self, ogName, distance):
# de-activating assert because new outgroup munging in
# cactus_createMultiCactusProject will temporarility duplicate
# note in tree (is normal)
#assert ogName not in self.nameToId
if self.isLeaf(self.rootId):
newNode = self.getNextIndex()
self.insertAbove(self.rootId, newNode, "", distance / 2)
distance = distance / 2
newNode = self.getNextIndex()
self.nxDg.add_edge(self.rootId, newNode )
self.setName(newNode, ogName)
self.nameToId[ogName] = newNode
self.setWeight(self.rootId, newNode, distance)
# make an attempt at keeping nameToId up to date
def setName(self, node, name):
super(MultiCactusTree, self).setName(node, name)
self.nameToId[name] = node
# map from event name to networkx node ID
def getNodeId(self, name):
return self.nameToId[name]
|
994,726 | e2d4f2e3178e7fd816dd5d2ac0ae732327e0a5f7 | import torch
import os
import io
import codecs
import xml.etree.ElementTree as ET
from collections import defaultdict
from torchtext.utils import (download_from_url, extract_archive,
unicode_csv_reader)
URLS = {
'Multi30k': [
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2016_flickr.cs.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2016_flickr.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2016_flickr.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2016_flickr.fr.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2017_flickr.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2017_flickr.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2017_flickr.fr.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2017_mscoco.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2017_mscoco.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2017_mscoco.fr.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/test_2018_flickr.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/train.cs.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/train.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/train.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/train.fr.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/val.cs.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/val.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/val.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/val.fr.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/test_2016.1.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/test_2016.1.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/test_2016.2.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/test_2016.2.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/test_2016.3.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/test_2016.3.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/test_2016.4.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/test_2016.4.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/test_2016.5.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/test_2016.5.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/train.1.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/train.1.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/train.2.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/train.2.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/train.3.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/train.3.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/train.4.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/train.4.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/train.5.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/train.5.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/val.1.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/val.1.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/val.2.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/val.2.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/val.3.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/val.3.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/val.4.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/val.4.en.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/val.5.de.gz",
"https://raw.githubusercontent.com/multi30k/dataset/master/data/task2/raw/val.5.en.gz"
],
'WMT14':
'https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8',
'IWSLT':
'https://wit3.fbk.eu/archive/2016-01//texts/{}/{}/{}.tgz'
}
def _read_text_iterator(path):
with io.open(path, encoding="utf8") as f:
reader = unicode_csv_reader(f)
for row in reader:
yield " ".join(row)
def _clean_xml_file(f_xml):
f_txt = os.path.splitext(f_xml)[0]
with codecs.open(f_txt, mode='w', encoding='utf-8') as fd_txt:
root = ET.parse(f_xml).getroot()[0]
for doc in root.findall('doc'):
for e in doc.findall('seg'):
fd_txt.write(e.text.strip() + '\n')
def _clean_tags_file(f_orig):
xml_tags = [
'<url', '<keywords', '<talkid', '<description', '<reviewer',
'<translator', '<title', '<speaker'
]
f_txt = f_orig.replace('.tags', '')
with codecs.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \
io.open(f_orig, mode='r', encoding='utf-8') as fd_orig:
for l in fd_orig:
if not any(tag in l for tag in xml_tags):
# TODO: Fix utf-8 next line mark
# fd_txt.write(l.strip() + '\n')
# fd_txt.write(l.strip() + u"\u0085")
# fd_txt.write(l.lstrip())
fd_txt.write(l.strip() + '\n')
def _construct_filenames(filename, languages):
filenames = []
for lang in languages:
filenames.append(filename + "." + lang)
return filenames
def _construct_filepaths(paths, src_filename, tgt_filename):
src_path = None
tgt_path = None
for p in paths:
src_path = p if src_filename in p else src_path
tgt_path = p if tgt_filename in p else tgt_path
return (src_path, tgt_path)
def _setup_datasets(dataset_name,
train_filenames,
valid_filenames,
test_filenames,
root='.data'):
if not isinstance(train_filenames, tuple) and not isinstance(valid_filenames, tuple) \
and not isinstance(test_filenames, tuple):
raise ValueError("All filenames must be tuples")
src_train, tgt_train = train_filenames
src_eval, tgt_eval = valid_filenames
src_test, tgt_test = test_filenames
extracted_files = []
if isinstance(URLS[dataset_name], list):
for f in URLS[dataset_name]:
dataset_tar = download_from_url(f, root=root)
extracted_files.extend(extract_archive(dataset_tar))
elif isinstance(URLS[dataset_name], str):
dataset_tar = download_from_url(URLS[dataset_name], root=root)
extracted_files.extend(extract_archive(dataset_tar))
else:
raise ValueError(
"URLS for {} has to be in a form or list or string".format(
dataset_name))
# Clean the xml and tag file in the archives
file_archives = []
for fname in extracted_files:
if 'xml' in fname:
_clean_xml_file(fname)
file_archives.append(os.path.splitext(fname)[0])
elif "tags" in fname:
_clean_tags_file(fname)
file_archives.append(fname.replace('.tags', ''))
else:
file_archives.append(fname)
data_filenames = defaultdict(dict)
data_filenames = {
"train": _construct_filepaths(file_archives, src_train, tgt_train),
"valid": _construct_filepaths(file_archives, src_eval, tgt_eval),
"test": _construct_filepaths(file_archives, src_test, tgt_test)
}
for key in data_filenames.keys():
if len(data_filenames[key]) == 0 or data_filenames[key] is None:
raise FileNotFoundError(
"Files are not found for data type {}".format(key))
datasets = []
for key in data_filenames.keys():
src_data_iter = _read_text_iterator(data_filenames[key][0])
tgt_data_iter = _read_text_iterator(data_filenames[key][1])
datasets.append(
RawTranslationIterableDataset(src_data_iter, tgt_data_iter))
return tuple(datasets)
class RawTranslationIterableDataset(torch.utils.data.IterableDataset):
"""Defines an abstraction for raw text iterable datasets.
"""
def __init__(self, src_iterator, tgt_iterator):
"""Initiate text-classification dataset.
"""
super(RawTranslationIterableDataset, self).__init__()
self._src_iterator = src_iterator
self._tgt_iterator = tgt_iterator
self.has_setup = False
self.start = 0
self.num_lines = None
def setup_iter(self, start=0, num_lines=None):
self.start = start
self.num_lines = num_lines
self.has_setup = True
def __iter__(self):
if not self.has_setup:
self.setup_iter()
for i, item in enumerate(zip(self._src_iterator, self._tgt_iterator)):
if i >= self.start:
yield item
if (self.num_lines is not None) and (i == (self.start +
self.num_lines)):
break
def get_iterator(self):
return (self._src_iterator, self._tgt_iterator)
def Multi30k(train_filenames=("train.de", "train.en"),
valid_filenames=("val.de", "val.en"),
test_filenames=("test_2016_flickr.de", "test_2016_flickr.en"),
root='.data'):
""" Define translation datasets: Multi30k
Separately returns train/valid/test datasets as a tuple
The available dataset include:
test_2016_flickr.cs
test_2016_flickr.de
test_2016_flickr.en
test_2016_flickr.fr
test_2017_flickr.de
test_2017_flickr.en
test_2017_flickr.fr
test_2017_mscoco.de
test_2017_mscoco.en
test_2017_mscoco.fr
test_2018_flickr.en
train.cs
train.de
train.en
train.fr
val.cs
val.de
val.en
val.fr
test_2016.1.de
test_2016.1.en
test_2016.2.de
test_2016.2.en
test_2016.3.de
test_2016.3.en
test_2016.4.de
test_2016.4.en
test_2016.5.de
test_2016.5.en
train.1.de
train.1.en
train.2.de
train.2.en
train.3.de
train.3.en
train.4.de
train.4.en
train.5.de
train.5.en
val.1.de
val.1.en
val.2.de
val.2.en
val.3.de
val.3.en
val.4.de
val.4.en
val.5.de
val.5.en
Arguments:
train_filenames: the source and target filenames for training.
Default: ('train.de', 'train.en')
valid_filenames: the source and target filenames for valid.
Default: ('val.de', 'val.en')
test_filenames: the source and target filenames for test.
Default: ('test2016.de', 'test2016.en')
root: Directory where the datasets are saved. Default: ".data"
Examples:
>>> from torchtext.datasets import Multi30k
>>> train_dataset, valid_dataset, test_dataset = Multi30k()
"""
return _setup_datasets("Multi30k",
train_filenames=train_filenames,
valid_filenames=valid_filenames,
test_filenames=test_filenames,
root=root)
def IWSLT(train_filenames=('train.de-en.de', 'train.de-en.en'),
valid_filenames=('IWSLT16.TED.tst2013.de-en.de',
'IWSLT16.TED.tst2013.de-en.en'),
test_filenames=('IWSLT16.TED.tst2014.de-en.de',
'IWSLT16.TED.tst2014.de-en.en'),
root='.data'):
""" Define translation datasets: IWSLT
Separately returns train/valid/test datasets
The available datasets include:
IWSLT16.TED.dev2010.ar-en.ar
IWSLT16.TED.dev2010.ar-en.en
IWSLT16.TED.dev2010.cs-en.cs
IWSLT16.TED.dev2010.cs-en.en
IWSLT16.TED.dev2010.de-en.de
IWSLT16.TED.dev2010.de-en.en
IWSLT16.TED.dev2010.en-ar.ar
IWSLT16.TED.dev2010.en-ar.en
IWSLT16.TED.dev2010.en-cs.cs
IWSLT16.TED.dev2010.en-cs.en
IWSLT16.TED.dev2010.en-de.de
IWSLT16.TED.dev2010.en-de.en
IWSLT16.TED.dev2010.en-fr.en
IWSLT16.TED.dev2010.en-fr.fr
IWSLT16.TED.dev2010.fr-en.en
IWSLT16.TED.dev2010.fr-en.fr
IWSLT16.TED.tst2010.ar-en.ar
IWSLT16.TED.tst2010.ar-en.en
IWSLT16.TED.tst2010.cs-en.cs
IWSLT16.TED.tst2010.cs-en.en
IWSLT16.TED.tst2010.de-en.de
IWSLT16.TED.tst2010.de-en.en
IWSLT16.TED.tst2010.en-ar.ar
IWSLT16.TED.tst2010.en-ar.en
IWSLT16.TED.tst2010.en-cs.cs
IWSLT16.TED.tst2010.en-cs.en
IWSLT16.TED.tst2010.en-de.de
IWSLT16.TED.tst2010.en-de.en
IWSLT16.TED.tst2010.en-fr.en
IWSLT16.TED.tst2010.en-fr.fr
IWSLT16.TED.tst2010.fr-en.en
IWSLT16.TED.tst2010.fr-en.fr
IWSLT16.TED.tst2011.ar-en.ar
IWSLT16.TED.tst2011.ar-en.en
IWSLT16.TED.tst2011.cs-en.cs
IWSLT16.TED.tst2011.cs-en.en
IWSLT16.TED.tst2011.de-en.de
IWSLT16.TED.tst2011.de-en.en
IWSLT16.TED.tst2011.en-ar.ar
IWSLT16.TED.tst2011.en-ar.en
IWSLT16.TED.tst2011.en-cs.cs
IWSLT16.TED.tst2011.en-cs.en
IWSLT16.TED.tst2011.en-de.de
IWSLT16.TED.tst2011.en-de.en
IWSLT16.TED.tst2011.en-fr.en
IWSLT16.TED.tst2011.en-fr.fr
IWSLT16.TED.tst2011.fr-en.en
IWSLT16.TED.tst2011.fr-en.fr
IWSLT16.TED.tst2012.ar-en.ar
IWSLT16.TED.tst2012.ar-en.en
IWSLT16.TED.tst2012.cs-en.cs
IWSLT16.TED.tst2012.cs-en.en
IWSLT16.TED.tst2012.de-en.de
IWSLT16.TED.tst2012.de-en.en
IWSLT16.TED.tst2012.en-ar.ar
IWSLT16.TED.tst2012.en-ar.en
IWSLT16.TED.tst2012.en-cs.cs
IWSLT16.TED.tst2012.en-cs.en
IWSLT16.TED.tst2012.en-de.de
IWSLT16.TED.tst2012.en-de.en
IWSLT16.TED.tst2012.en-fr.en
IWSLT16.TED.tst2012.en-fr.fr
IWSLT16.TED.tst2012.fr-en.en
IWSLT16.TED.tst2012.fr-en.fr
IWSLT16.TED.tst2013.ar-en.ar
IWSLT16.TED.tst2013.ar-en.en
IWSLT16.TED.tst2013.cs-en.cs
IWSLT16.TED.tst2013.cs-en.en
IWSLT16.TED.tst2013.de-en.de
IWSLT16.TED.tst2013.de-en.en
IWSLT16.TED.tst2013.en-ar.ar
IWSLT16.TED.tst2013.en-ar.en
IWSLT16.TED.tst2013.en-cs.cs
IWSLT16.TED.tst2013.en-cs.en
IWSLT16.TED.tst2013.en-de.de
IWSLT16.TED.tst2013.en-de.en
IWSLT16.TED.tst2013.en-fr.en
IWSLT16.TED.tst2013.en-fr.fr
IWSLT16.TED.tst2013.fr-en.en
IWSLT16.TED.tst2013.fr-en.fr
IWSLT16.TED.tst2014.ar-en.ar
IWSLT16.TED.tst2014.ar-en.en
IWSLT16.TED.tst2014.de-en.de
IWSLT16.TED.tst2014.de-en.en
IWSLT16.TED.tst2014.en-ar.ar
IWSLT16.TED.tst2014.en-ar.en
IWSLT16.TED.tst2014.en-de.de
IWSLT16.TED.tst2014.en-de.en
IWSLT16.TED.tst2014.en-fr.en
IWSLT16.TED.tst2014.en-fr.fr
IWSLT16.TED.tst2014.fr-en.en
IWSLT16.TED.tst2014.fr-en.fr
IWSLT16.TEDX.dev2012.de-en.de
IWSLT16.TEDX.dev2012.de-en.en
IWSLT16.TEDX.tst2013.de-en.de
IWSLT16.TEDX.tst2013.de-en.en
IWSLT16.TEDX.tst2014.de-en.de
IWSLT16.TEDX.tst2014.de-en.en
train.ar
train.ar-en.ar
train.ar-en.en
train.cs
train.cs-en.cs
train.cs-en.en
train.de
train.de-en.de
train.de-en.en
train.en
train.en-ar.ar
train.en-ar.en
train.en-cs.cs
train.en-cs.en
train.en-de.de
train.en-de.en
train.en-fr.en
train.en-fr.fr
train.fr
train.fr-en.en
train.fr-en.fr
train.tags.ar-en.ar
train.tags.ar-en.en
train.tags.cs-en.cs
train.tags.cs-en.en
train.tags.de-en.de
train.tags.de-en.en
train.tags.en-ar.ar
train.tags.en-ar.en
train.tags.en-cs.cs
train.tags.en-cs.en
train.tags.en-de.de
train.tags.en-de.en
train.tags.en-fr.en
train.tags.en-fr.fr
train.tags.fr-en.en
train.tags.fr-en.fr
Arguments:
train_filenames: the source and target filenames for training.
Default: ('train.de-en.de', 'train.de-en.en')
valid_filenames: the source and target filenames for valid.
Default: ('IWSLT16.TED.tst2013.de-en.de', 'IWSLT16.TED.tst2013.de-en.en')
test_filenames: the source and target filenames for test.
Default: ('IWSLT16.TED.tst2014.de-en.de', 'IWSLT16.TED.tst2014.de-en.en')
root: Directory where the datasets are saved. Default: ".data"
Examples:
>>> from torchtext.datasets.raw import IWSLT
>>> train_dataset, valid_dataset, test_dataset = IWSLT()
"""
src_language = train_filenames[0].split(".")[-1]
tgt_language = train_filenames[1].split(".")[-1]
languages = "-".join([src_language, tgt_language])
URLS["IWSLT"] = URLS["IWSLT"].format(src_language, tgt_language, languages)
return _setup_datasets(
"IWSLT",
train_filenames=train_filenames,
valid_filenames=valid_filenames,
test_filenames=test_filenames,
root=root,
)
def WMT14(train_filenames=('train.tok.clean.bpe.32000.de',
'train.tok.clean.bpe.32000.en'),
valid_filenames=('newstest2013.tok.bpe.32000.de',
'newstest2013.tok.bpe.32000.en'),
test_filenames=('newstest2014.tok.bpe.32000.de',
'newstest2014.tok.bpe.32000.en'),
root='.data'):
""" Define translation datasets: WMT14
Separately returns train/valid/test datasets
The available datasets include:
newstest2016.en
newstest2016.de
newstest2015.en
newstest2015.de
newstest2014.en
newstest2014.de
newstest2013.en
newstest2013.de
newstest2012.en
newstest2012.de
newstest2011.tok.de
newstest2011.en
newstest2011.de
newstest2010.tok.de
newstest2010.en
newstest2010.de
newstest2009.tok.de
newstest2009.en
newstest2009.de
newstest2016.tok.de
newstest2015.tok.de
newstest2014.tok.de
newstest2013.tok.de
newstest2012.tok.de
newstest2010.tok.en
newstest2009.tok.en
newstest2015.tok.en
newstest2014.tok.en
newstest2013.tok.en
newstest2012.tok.en
newstest2011.tok.en
newstest2016.tok.en
newstest2009.tok.bpe.32000.en
newstest2011.tok.bpe.32000.en
newstest2010.tok.bpe.32000.en
newstest2013.tok.bpe.32000.en
newstest2012.tok.bpe.32000.en
newstest2015.tok.bpe.32000.en
newstest2014.tok.bpe.32000.en
newstest2016.tok.bpe.32000.en
train.tok.clean.bpe.32000.en
newstest2009.tok.bpe.32000.de
newstest2010.tok.bpe.32000.de
newstest2011.tok.bpe.32000.de
newstest2013.tok.bpe.32000.de
newstest2012.tok.bpe.32000.de
newstest2014.tok.bpe.32000.de
newstest2016.tok.bpe.32000.de
newstest2015.tok.bpe.32000.de
train.tok.clean.bpe.32000.de
Arguments:
train_filenames: the source and target filenames for training.
Default: ('train.tok.clean.bpe.32000.de', 'train.tok.clean.bpe.32000.en')
valid_filenames: the source and target filenames for valid.
Default: ('newstest2013.tok.bpe.32000.de', 'newstest2013.tok.bpe.32000.en')
test_filenames: the source and target filenames for test.
Default: ('newstest2014.tok.bpe.32000.de', 'newstest2014.tok.bpe.32000.en')
root: Directory where the datasets are saved. Default: ".data"
Examples:
>>> from torchtext.datasets import WMT14
>>> train_dataset, valid_dataset, test_dataset = WMT14()
"""
return _setup_datasets("WMT14",
train_filenames=train_filenames,
valid_filenames=valid_filenames,
test_filenames=test_filenames,
root=root)
DATASETS = {'Multi30k': Multi30k, 'IWSLT': IWSLT, 'WMT14': WMT14}
|
994,727 | 4303419f0bad63581cc0d4b012f3e2a754bd9125 | from Framework import *
import random
import sys
import time
PLAYER_LIST="PLAY"
REPLY = "REPL"
ERROR = "ERRO"
GAME_START="GAME" # send to bootstrap to get game details
DETAILS="DETL" # bootstrap replies with this as first message followed by PLAYER_LIST messages
PLAY_START="PSTA"
INFORM_GAME_END_BOOTSTRAP="OVER"
LEAVING = "LEAV"
DROP_NODE = "DROP"
DEAD_NODE = "DEAD"
MAX_PLAYER_NUMBER=6
class Game(object,Communicate):
"""
This constructor does most of the bootstrapping. All nodes first contact the bootstrap node and get information
like game-id, number of peers , list of peers and their player_num from it. Once thats done, they contact any peers
from the list that they get and exchange information about each other. The first person is considered the head and
he starts the game
"""
#--------------------------------------------------------------------------
def __init__( self, firstpeer,maxpeers=5, serverport=12345, master=None ):
#--------------------------------------------------------------------------
Communicate.__init__(self, maxpeers, serverport)
handlers = {
GAME_START:self.__handle_gamestart,
INFORM_GAME_END_BOOTSTRAP: self.__handle_game_end_bootstrap,
LEAVING:self.__handle_player_leaving_gracious,
DROP_NODE: self.__handle_player_leaving_suddenly
}
self.my_peer_name=firstpeer
for mt in handlers:
self.add_event_handler(mt, handlers[mt])
self.t = threading.Thread( target = self.mainloop, args = [] )
self.t.start()
self.rejoin_thread_dict={}
#--------------------------------------------------------------------------
def __handle_drop_node(self,peerconn,data,peername):
#--------------------------------------------------------------------------
print "Got Drop message"
#This is where you need to deal with the lost node
#This data contains ip:host of the lost node
print data
#--------------------------------------------------------------------------
def __handle_gamestart(self, peerconn,data,peername):
#--------------------------------------------------------------------------
"""
Function that chooses the Map and other details and sends it back to the user
DETAILS contain - game-id,MAP,Number of peers and X,Y position
GAME contains list of peers
This function is used only by the bootstrapping node and is called from "contactbootstrap" function
This function is used to contact bootstrap for getting initial details and to inform bootstrap once all
nodes have started play so that it does not allow any more players
"""
if data in self.rejoin_thread_dict:
temp_list=self.rejoin_thread_dict.pop(data)
game_id=temp_list[1]
player_number=temp_list[2]
self.game_dict[int(game_id)].append(data)
peerconn.send_data(DETAILS,'%d %s %d %d' % (int(game_id),self.gameid_map_dict[int(game_id)],len(self.game_dict[int(game_id)])-1,int(player_number)))
for peer_list in self.game_dict[int(game_id)]:
if peer_list!=data:
peerconn.send_data(PLAYER_LIST,'%s %s %d' % (peer_list,peer_list.split(":")[0],int(peer_list.split(":")[1])))
else:
# This condition is hit when nodes require initial set up details
print "in else of game start" , data
if not "STARTED" in data:
self.game_dict_lock.acquire()
#Check if there is already a game with lesser than 4 users. If so add the user to it. If not create new game
if(self.game_id in self.game_dict):
player_number = len(self.game_dict[self.game_id])+1
if(len(self.game_dict[self.game_id])<=MAX_PLAYER_NUMBER-1):
peerconn.send_data(DETAILS,'%d %s %d %d' % (self.game_id,self.gameid_map_dict[self.game_id],len(self.game_dict[self.game_id]),player_number))
for peer_list in self.game_dict[self.game_id]:
peerconn.send_data(PLAYER_LIST,'%s %s %d' % (peer_list,peer_list.split(":")[0],int(peer_list.split(":")[1])))
self.game_dict[self.game_id].append(data)
if(len(self.game_dict[self.game_id])==MAX_PLAYER_NUMBER):
self.game_id=self.game_id+1
print "Game dictionary is :"
print self.game_dict[self.game_id]
#create new game for the given game-id and add user to it
else:
map_id=random.randint(1, 4)
print self.available_maps_dict[map_id]
self.game_dict[self.game_id]=[]
player_number = len(self.game_dict[self.game_id])+1
peerconn.send_data(DETAILS,'%d %s %d %d' % (self.game_id,self.available_maps_dict[map_id],len(self.game_dict[self.game_id]), player_number))
self.game_dict[self.game_id].append(data)
self.gameid_map_dict[self.game_id]=self.available_maps_dict[map_id]
print "Game dictionary is :"
print self.game_dict[self.game_id]
self.game_dict_lock.release()
#this condition is hit when a game is started with < 4 players
else:
message,game_id = data.split(" ")
if int(game_id)==int(self.game_id) and len(self.game_dict[self.game_id])!=MAX_PLAYER_NUMBER:
self.game_id=self.game_id+1
print "GAME ID" , self.game_id
peerconn.send_data(REPLY,'OK')
#--------------------------------------------------------------------------
def __handle_player_leaving_gracious(self,peerconn,data,peername):
#--------------------------------------------------------------------------
players_game_id,peer_name,player_num,play_start= data.split(" ")
if play_start=="False":
self.game_dict_lock.acquire()
if peer_name in self.game_dict[int(players_game_id)]:
print " GAME DICT BEFORE REMOVING"
print self.game_dict[int(players_game_id)]
if len(self.game_dict[int(players_game_id)])==MAX_PLAYER_NUMBER:
self.game_id=self.game_id-1
self.game_dict[int(players_game_id)].remove(peer_name)
print " GAME DICT AFTER REMOVING"
print self.game_dict[int(players_game_id)]
self.game_dict_lock.release()
elif play_start=="True":
if peer_name=="128.237.224.170:12341" or peer_name=="128.237.224.170:12342":
print " In rejoin test case"
print "DATA ",data
self.__handle_player_leaving_suddenly(peerconn, data, peername)
return
print "peer_name"+" popped"
if peer_name in self.game_dict[int(players_game_id)]:
self.game_dict[int(players_game_id)].remove(peer_name)
print " GAME DICT AFTER REMOVING"
print self.game_dict[int(players_game_id)]
#--------------------------------------------------------------------------
def __handle_game_end_bootstrap(self,peerconn,data,peername):
#--------------------------------------------------------------------------
gameid,winnername,winnerid=data.split(" ")
print "GAME END"
print self.game_dict
if int(gameid) in self.game_dict:
del self.game_dict[int(gameid)]
print self.game_dict
#--------------------------------------------------------------------------
def __handle_player_leaving_suddenly(self,peerconn,data,peername):
#--------------------------------------------------------------------------
print "DATA IN LEAVING",data
gameid,peer_address,player_number = data.split(" ")
print gameid,peer_address,player_number
self.rejoin_thread_dict[peer_address]=[threading.Thread(target=self.rejoin_thread_time,args=[peer_address]),gameid,player_number]
print self.rejoin_thread_dict[peer_address]
self.game_dict[int(gameid)].remove(peer_address)
print self.game_dict
self.rejoin_thread_dict[peer_address][0].start()
#--------------------------------------------------------------------------
def rejoin_thread_time(self,peername):
#--------------------------------------------------------------------------
time.sleep(40)
print 'timeout: delete it from the game', peername
print self.rejoin_thread_dict
if peername in self.rejoin_thread_dict:
temp_list=self.rejoin_thread_dict.pop(peername)
players_game_id = temp_list[1]
#--------------------------------------------------------------------------
def main(self, screen):
#--------------------------------------------------------------------------
print "Starting Bootstrap"
if __name__=='__main__':
if len(sys.argv) < 4:
print "Syntax: %s server-port max-peers peer-ip:port" % sys.argv[0]
sys.exit(-1)
serverport = int(sys.argv[1])
maxpeers = sys.argv[2]
peerid = sys.argv[3]
appl = Game(firstpeer=peerid, maxpeers=maxpeers, serverport=serverport)
|
994,728 | 06cae3f01468a59cd1860b0974fee41c0e034aab | from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
from datetime import datetime as dt
import time
option = webdriver.ChromeOptions()
option.add_argument('--headless')
option.add_argument('--no-sandbox')
option.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome('chromedriver', options = option)
source = 'http://kolakatimurkab.go.id/pages/kawal-corona'
driver.get(source)
time.sleep(50)
content = driver.page_source
soup = BeautifulSoup(content, 'lxml')
div = soup.find_all('div', attrs={'style': 'color:white;font-size:50pt'})
d = [i.get_text() for i in div]
td = soup.find_all('td')[3:6]
td = [i.get_text() for i in td]
data = {'pdp_dipantau':[td[2]],
'pdp_sembuh':[td[1]],
'pdp_meninggal':[td[0]],
'total_odp':[d[3]],
'positif_sembuh':[d[1]],
'positif_dirawat':[d[0]],
'positif_meninggal':[d[2]],}
df = pd.DataFrame(data)
date = dt.now().strftime("%Y-%m-%d")
df.insert(loc=0, column='scrape_date', value = date)
df.insert(loc=1, column='date_update', value = date)
df.insert(loc=2, column='provinsi', value ='Sulawesi Tenggara')
df.insert(loc=3, column='kabkot', value = 'Kolaka Timur')
df.insert(loc=11, column='source_link', value =source)
df.insert(loc=12, column='types', value ='kabkot')
df.insert(loc=13, column='user_pic', value ='Dea')
df
import mysql.connector
try:
mydb = mysql.connector.connect(host='',
port=,
user='',
passwd='',
database='')
cursor = mydb.cursor()
cols1 = "`,`".join([str(i) for i in df.columns.tolist()])
# Insert DataFrame records one by one.
for i,row in df.iterrows():
sql = "INSERT INTO `covid19_data` (`" + cols1 + "`) VALUES (" + "%s,"*(len(row)-1) + "%s)"
cursor.execute(sql, tuple(row))
# the connection is not autocommitted by default, so we must commit to save our # changes
mydb.commit()
except mysql.connector.Error as error:
print("Failed to insert into MySQL table {}".format(error))
finally:
if (mydb.is_connected()):
cursor.close()
mydb.close()
print(date + " Kabupaten Kolaka Timur Done")
import pymysql.cursors
import sqlalchemy
engine = sqlalchemy.create_engine('mysql+pymysql://covid_user5bb6593aa078@db-blanja2:3306/covid')
|
994,729 | ce8c73c7c6be13fc97de35ab0a0b913dd6d02740 | #https://www.geeksforgeeks.org/turtle-programming-python/
import turtle #Inside_Out
wn = turtle.Screen()
wn.bgcolor("black")
skk = turtle.Turtle()
skk.color("white")
def sqrfunc(size):
for i in range(10):
while 1==1:
for i in range(10):
skk.fd(size)
skk.left(10)
size = size + 5
sqrfunc(10)
sqrfunc(20)
sqrfunc(30)
sqrfunc(40)
sqrfunc(50)
sqrfunc(60)
sqrfunc(70)
sqrfunc(80)
holdit = input();
|
994,730 | 5915c3f761288151a2f51f4b5f51b06fb137ca4c | import math
import numpy
import numpy.random as nrand
import pandas as pd
from pyquery import PyQuery as pq
import datetime
import time
import enum
def Month2int(monthstr):
if monthstr=='Jan':
return 1
elif monthstr=='Feb':
return 2
elif monthstr=='Mar':
return 3
elif monthstr=='Apr':
return 4
elif monthstr=='May':
return 5
elif monthstr=='Jun':
return 6
elif monthstr=='Jul':
return 7
elif monthstr=='Aug':
return 8
elif monthstr=='Sep':
return 9
elif monthstr=='Oct':
return 10
elif monthstr=='Nov':
return 11
elif monthstr=='Dec':
return 12
else:
return 0
def ReadData(Frequency):
ETF_File=pd.read_csv('Commodity ETF List (125).csv')
ChooseETFSymbol=list()
for i in range(len(ETF_File['Inception'])):
if int(ETF_File['Inception'][i][:4])<2015:
ChooseETFSymbol.append(ETF_File['Symbol'][i])
ChooseETFSymbol.append("SPY")
print(ChooseETFSymbol)
startTime=[[2015,12,31],[2016,4,1],[2016,7,1],[2016,10,1],[2017,1,1],[2017,4,1],[2017,7,1],[2017,10,1],[2018,1,1],[2018,4,1],[2018,7,1],[2018,10,1],[2019,1,1]]
endTime=[[2016,3,30],[2016,6,30],[2016,9,30],[2016,12,30],[2017,3,30],[2017,6,30],[2017,9,30],[2017,12,30],[2018,3,30],[2018,6,30],[2018,9,30],[2018,12,30],[2019,3,30]]
AllETFDataFrame=pd.DataFrame()
ETFcount=0
for ETF in ChooseETFSymbol:
DateList=list()
AdjCloseList=list()
for TimePeriod in range(len(startTime)):
startdatatime = datetime.datetime(startTime[TimePeriod][0], startTime[TimePeriod][1], startTime[TimePeriod][2], 0, 0, 0)
starttimestamp = time.mktime(startdatatime.timetuple())
enddatatime = datetime.datetime(endTime[TimePeriod][0], endTime[TimePeriod][1], endTime[TimePeriod][2], 0, 0, 0)
endtimestamp = time.mktime(enddatatime.timetuple())
try:
#Website=pq('https://finance.yahoo.com/quote/'+ETF+'/history?period1=1451491200&period2=1553097600&interval=1d&filter=history&frequency=1d', headers={'user-agent': 'pyquery'})
Website=pq('https://finance.yahoo.com/quote/'+ETF+'/history?period1='+str(int(starttimestamp))+'&period2='+str(int(endtimestamp))+'&interval=1d&filter=history&frequency=1d', headers={'user-agent': 'pyquery'})
#Website=pq('https://finance.yahoo.com/quote/GCC/history?period1=1451491200&period2=1553097600&interval=1d&filter=history&frequency=1d', headers={'user-agent': 'pyquery'})
except:
print("Cannot send requests")
Tbody=Website('tbody')
Trs = Tbody('tr')
for Tr in Trs.items():
datas=Tr('span')
datacount=0
for data in datas.items():
if datacount==4:
pricedata=data.text()
if datacount==0:
DateStrings=data.text().replace(",","").split(" ")
dataDatetime=datetime.date(int(DateStrings[2]),Month2int(DateStrings[0]),int(DateStrings[1]))
datacount=datacount+1
#print(pricedata)
AdjCloseList.append(float(pricedata.replace(",","")))
DateList.append(dataDatetime)
ETFDataFrame=pd.DataFrame(AdjCloseList,columns=[str(ETF)],index=DateList).sort_index()
#print(ETFDataFrame)
if ETFcount==0:
AllETFDataFrame=ETFDataFrame
else:
AllETFDataFrame=AllETFDataFrame.join(ETFDataFrame,how='left',sort=True)
ETFcount=ETFcount+1
AllETFDataFrame.index=pd.to_datetime(AllETFDataFrame.index)
AllETFDataFrame.index.name='Date'
AllETFDataFrame.index.drop_duplicates(keep='first')
#AllETFDataFrame.apply(lambda x : x.drop_duplicates(subset='Date',keep='first'))
if Frequency=='M':
# dfg = AllETFDataFrame.groupby(pd.TimeGrouper('M'))
# print(dfg)
# business_end_day = dfg.agg('Date':np.max)['Date'].tolist()
# ReturnFrame=AllETFDataFrame[business_end_day]
# ReturnFrame=AllETFDataFrame.groupby('Date').apply(lambda x : x.drop_duplicates('Date')
# .set_index('Date')
# .resample('M')
# .ffill())
ReturnFrame=AllETFDataFrame.resample('M').mean()
elif Frequency=='W':
ReturnFrame=AllETFDataFrame.resample('w').mean()
else:
ReturnFrame=AllETFDataFrame
print(ReturnFrame)
return ReturnFrame
def sortedDictValues1(adict):
items = adict.items()
items.sort()
return [value for key, value in items]
def sortedDictValues2(adict):
keys = adict.keys()
keys.sort()
return [dict[key] for key in keys]
def vol(returns):
return numpy.std(returns)
def beta(returns, market):
m = numpy.matrix([returns, market])
return numpy.cov(m)[0][1] / numpy.std(market)
def lpm(returns, threshold, order):
threshold_array = numpy.empty(len(returns))
threshold_array.fill(threshold)
diff = threshold_array - returns
diff = diff.clip(min=0)
return numpy.sum(diff ** order) / len(returns)
def hpm(returns, threshold, order):
threshold_array = numpy.empty(len(returns))
threshold_array.fill(threshold)
diff = returns - threshold_array
diff = diff.clip(min=0)
return numpy.sum(diff ** order) / len(returns)
def var(returns, alpha):
sorted_returns = numpy.sort(returns)
index = int(alpha * len(sorted_returns))
return abs(sorted_returns[index])
def cvar(returns, alpha):
sorted_returns = numpy.sort(returns)
index = int(alpha * len(sorted_returns))
sum_var = sorted_returns[0]
for i in range(1, index):
sum_var += sorted_returns[i]
return abs(sum_var / index)
def prices(returns, base):
s = [base]
for i in range(len(returns)):
s.append(base * (1 + returns[i]))
return numpy.array(s)
def dd(returns, tau):
# Returns the draw-down given time period tau
values = prices(returns, 100)
pos = len(values) - 1
pre = pos - tau
drawdown = float('+inf')
# Find the maximum drawdown given tau
while pre >= 0:
dd_i = (values[pos] / values[pre]) - 1
if dd_i < drawdown:
drawdown = dd_i
pos, pre = pos - 1, pre - 1
# Drawdown should be positive
return abs(drawdown)
def max_dd(returns):
max_drawdown = float('-inf')
for i in range(0, len(returns)):
drawdown_i = dd(returns, i)
if drawdown_i > max_drawdown:
max_drawdown = drawdown_i
# Max draw-down should be positive
return abs(max_drawdown)
def average_dd(returns, periods):
drawdowns = []
for i in range(0, len(returns)):
drawdown_i = dd(returns, i)
drawdowns.append(drawdown_i)
drawdowns = sorted(drawdowns)
total_dd = abs(drawdowns[0])
for i in range(1, periods):
total_dd += abs(drawdowns[i])
return total_dd / periods
def average_dd_squared(returns, periods):
drawdowns = []
for i in range(0, len(returns)):
drawdown_i = math.pow(dd(returns, i), 2.0)
drawdowns.append(drawdown_i)
drawdowns = sorted(drawdowns)
total_dd = abs(drawdowns[0])
for i in range(1, periods):
total_dd += abs(drawdowns[i])
return total_dd / periods
def treynor_ratio(er, returns, market, rf):
return (er - rf) / beta(returns, market)
def sharpe_ratio(er, returns, rf):
return (er - rf) / vol(returns)
def information_ratio(returns, benchmark):
diff = returns - benchmark
return numpy.mean(diff) / vol(diff)
def modigliani_ratio(er, returns, benchmark, rf):
np_rf = numpy.empty(len(returns))
np_rf.fill(rf)
rdiff = returns - np_rf
bdiff = benchmark - np_rf
return (er - rf) * (vol(rdiff) / vol(bdiff)) + rf
def excess_var(er, returns, rf, alpha):
return (er - rf) / var(returns, alpha)
def conditional_sharpe_ratio(er, returns, rf, alpha):
return (er - rf) / cvar(returns, alpha)
def omega_ratio(er, returns, rf, target=0):
return (er - rf) / lpm(returns, target, 1)
def sortino_ratio(er, returns, rf, target=0):
return (er - rf) / math.sqrt(lpm(returns, target, 2))
def kappa_three_ratio(er, returns, rf, target=0):
return (er - rf) / math.pow(lpm(returns, target, 3), float(1/3))
def gain_loss_ratio(returns, target=0):
return hpm(returns, target, 1) / lpm(returns, target, 1)
def upside_potential_ratio(returns, target=0):
return hpm(returns, target, 1) / math.sqrt(lpm(returns, target, 2))
def calmar_ratio(er, returns, rf):
return (er - rf) / max_dd(returns)
def sterling_ration(er, returns, rf, periods):
return (er - rf) / average_dd(returns, periods)
def burke_ratio(er, returns, rf, periods):
return (er - rf) / math.sqrt(average_dd_squared(returns, periods))
def test_risk_metrics():
# This is just a testing method
r = nrand.uniform(-1, 1, 50)
m = nrand.uniform(-1, 1, 50)
print("vol =", vol(r))
print("beta =", beta(r, m))
print("hpm(0.0)_1 =", hpm(r, 0.0, 1))
print("lpm(0.0)_1 =", lpm(r, 0.0, 1))
print("VaR(0.05) =", var(r, 0.05))
print("CVaR(0.05) =", cvar(r, 0.05))
print("Drawdown(5) =", dd(r, 5))
print("Max Drawdown =", max_dd(r))
def test_risk_adjusted_metrics():
ReadData()
#模擬市場m與投組r的報酬
r = nrand.uniform(-1, 1, 50)
m = nrand.uniform(-1, 1, 50)
# Expected return
#投組期望報酬
e = numpy.mean(r)
# 無風險利率
f = 0.06
print("Treynor Ratio =", treynor_ratio(e, r, m, f))
print("Sharpe Ratio =", sharpe_ratio(e, r, f))
print("Information Ratio =", information_ratio(r, m))
# Risk-adjusted return based on Value at Risk
print("Excess VaR =", excess_var(e, r, f, 0.05))
print("Conditional Sharpe Ratio =", conditional_sharpe_ratio(e, r, f, 0.05))
print("")
print("Omega Ratio =", omega_ratio(e, r, f))
print("Sortino Ratio =", sortino_ratio(e, r, f))
print("Kappa 3 Ratio =", kappa_three_ratio(e, r, f))
print("Gain Loss Ratio =", gain_loss_ratio(r))
print("Upside Potential Ratio =", upside_potential_ratio(r))
# Risk-adjusted return based on Drawdown risk
print("Calmar Ratio =", calmar_ratio(e, r, f))
print("Sterling Ratio =", sterling_ration(e, r, f, 5))
print("Burke Ratio =", burke_ratio(e, r, f, 5))
def ComputeOmega():
import pandas as pd
#AllData=ReadData('M')
#模擬市場m與投組r的報酬
#r = nrand.uniform(-1, 1, 50)
#m = nrand.uniform(-1, 1, 50)
ETFName=AllData.columns
ResultDict=dict()
for etf in ETFName[:-1]:
Asset=AllData[etf].values
SPY=AllData["SPY"].values
# Asset=MyDataFrameProcess(AllData[etf].values)
# SPY=MyDataFrameProcess(AllData["SPY"].values)
r1=(Asset[1:]-Asset[:-1])/Asset[:-1]
#r2=(AllData["DDP"].values[1:]-AllData["DDP"].values[:-1])/AllData["DDP"].values[:-1]
m=(SPY[1:]-SPY[:-1])/SPY[:-1]
#m=AllData["SPY"].values.pct_change()
# Expected return
#投組期望報酬
e = numpy.mean(r1)
# 無風險利率
f = 0.06
ResultDict[etf]=omega_ratio(e, r1, f)
# Risk-adjusted return based on Lower Partial Moments
#print(etf,"Omega Ratio =", omega_ratio(e, r1, f))
#ResultDict_Sort=[ v for v in sorted(ResultDict.values())]
import operator
ResultDict_Sort=sorted(ResultDict.items(),key=operator.itemgetter(1),reverse=True)
#ResultDict_Sort=[(k,ResultDict[k]) for k in sorted(ResultDict.keys())]
Result=pd.DataFrame.from_dict(ResultDict_Sort)
Result.index.name='Ranking'
Result.columns=['ETF','Value']
#ResultDict_Sort=sortedDictValues2(ResultDict)
print(Result)
from functools import reduce
def str2float(s):
def fn(x,y):
return x*10+y
n=s.index('.')
s1=list(map(int,[x for x in s[:n]]))
s2=list(map(int,[x for x in s[n+1:]]))
return reduce(fn,s1) + reduce(fn,s2)/10**len(s2)
#print('\'123.4567\'=',str2float('123.4567'))
def MyDataFrameProcess(Strlist):
import numpy as np
AnsList=list()
for i in range(len(Strlist)):
AnsList.append(str2float(Strlist[i]))
return np.array(AnsList)
def ComputeGeneralizedSharpe():
import numpy as np
import math
from scipy.stats import skew
from scipy.stats import kurtosis
#AllData=ReadData('M')
ETFName=AllData.columns
ResultDict=dict()
for etf in ETFName[:-1]:
# Asset=MyDataFrameProcess(AllData[etf].values)
# SPY=MyDataFrameProcess(AllData["SPY"].values)
Asset=AllData[etf].values
SPY=AllData["SPY"].values
#print(LD)
#ej3ru84=array(pd.DataFrame(LD).pct_change().values).reshape(len(LD))
Asset_Return=(Asset[1:]-Asset[:-1])/Asset[:-1]
#print(Asset_Return)
mean = np.mean(Asset_Return)
standard = np.std(Asset_Return)
variance = np.var(Asset_Return)
S = skew(Asset_Return)
K = kurtosis(Asset_Return)
if K>3+(5/3)*S*S:
#print(S,K,K>3+(5/3)*S*S)
a = 3*math.sqrt(3*K-4*(S**2)-9)/variance*(3*K-5*(S**2)-9)
b = 3*S/standard*(3*K-5*(S**2)-9)
n = mean-(3*S*standard)/(3*K-4*(S**2)-9)
d = 3*standard*math.sqrt(3*K-4*(S**2)-9)/(3*K-5*(S**2)-9)
phi = math.sqrt((a**2)-(b**2))
rf = 0.02 # 定存利率
lamda = 0.1
astar = 1/lamda*(b+(a*(n-rf)/math.sqrt((d**2)+((n-rf)**2))))
asksr = math.sqrt(2*(lamda*astar*(n-rf)-d*(phi-math.sqrt((a**2)-((b-lamda*astar)**2)))))
else:
a=0
b = 3*S/standard*(3*K-5*(S**2)-9)
n = mean-(3*S*standard)/(3*K-4*(S**2)-9)
d = 0
phi = 0
rf = 0.02 # 定存利率
lamda = 0.5
#astar = 1/lamda*(b+(a*(n-rf)/math.sqrt((d**2)+((n-rf)**2))))
asksr = 0
ResultDict[etf]=asksr
#print("asksr",asksr,a,b,n,d,phi)
#ResultDict_Sort=[ v for v in sorted(ResultDict.values())]
import operator
ResultDict_Sort=sorted(ResultDict.items(),key=operator.itemgetter(1),reverse=True)
#ResultDict_Sort=[(k,ResultDict[k]) for k in sorted(ResultDict.keys())]
Result=pd.DataFrame.from_dict(ResultDict_Sort)
Result.index.name='Ranking'
Result.columns=['ETF','Value']
#ResultDict_Sort=sortedDictValues2(ResultDict)
print(Result)
return asksr
def ComputeRiskiness():
from scipy.optimize import fsolve,leastsq,root
import numpy as np
#AllData=ReadData('M')
def f(alpha):
import numpy as np
sum=0
for i in range(len(gamble)):
sum=np.exp(-gamble[i]*alpha)+sum
#print("sum",sum,len(gamble))
return sum-len(gamble)
ETFName=AllData.columns
ResultDict=dict()
for etf in ETFName[:-1]:
# Risk-adjusted return based on Lower Partial Moments
#print(etf,"Omega Ratio =", omega_ratio(e, r1, f))
Asset=AllData[etf].values
SPY=AllData["SPY"].values
#Asset=MyDataFrameProcess(AllData[etf].values)
#print(Asset)
#ej3ru84=array(pd.DataFrame(LD).pct_change().values).reshape(len(LD))
#Asset_Return=(Asset[1:]-Asset[:-1])/Asset[:-1]
Asset_Return=((Asset[1:]-Asset[:-1])/Asset[:-1])/10000
#print(Asset_Return)
gamble=Asset_Return
#print(f(alpha0))
result=fsolve(f,0.0003)
#print("Result",result)
#print("Test",f(result))
#print("Riskiness",np.exp(-result))
ResultDict[etf]=np.exp(-result)
import operator
ResultDict_Sort=sorted(ResultDict.items(),key=operator.itemgetter(1),reverse=True)
#ResultDict_Sort=[ v for v in sorted(ResultDict.values())]
#ResultDict_Sort=[(k,ResultDict[k]) for k in sorted(ResultDict.keys())]
Result=pd.DataFrame.from_dict(ResultDict_Sort)
Result.index.name='Ranking'
Result.columns=['ETF','Value']
#ResultDict_Sort=sortedDictValues2(ResultDict)
print(Result)
if __name__ == "__main__":
import pandas as pd
AllData=ReadData('W')
AllData.to_pickle('ETFDataFrame_W')
AllData=pd.read_pickle('ETFDataFrame_W')
ComputeOmega()
ComputeRiskiness()
ComputeGeneralizedSharpe()
|
994,731 | b5f9f90112e810284d65a849a38f9d6f77f4b4ab | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.core.urlresolvers import reverse
# Create your models here.
class User(models.Model):
username = models.CharField(max_length=50)
password = models.CharField(max_length=50)
email = models.CharField(max_length=100)
def __str__(self):
return self.username
class Post(models.Model):
image = models.CharField(max_length=500)
user = models.ForeignKey(User, on_delete=models.CASCADE)
likes = models.IntegerField(default=0)
comment = models.CharField(max_length=100)
def get_absolute_url(self):
return reverse("snap:detail", (), kwargs={"pk": self.pk})
def __str__(self):
return self.comment
|
994,732 | b1a610ce52f81fcd65840da4652ee38b613553b5 | import numpy as np
class HashTableLP(object):
def __init__(self,size, hashNum):
self.item = np.zeros(size,dtype=np.object)-1
self.hashNum = hashNum
def insert(self,k):
for i in range(len(self.item)):
pos = h(self, k)+i
if pos >= len(self.item):
pos = pos - len(self.item)
if self.item[pos] == -1:
self.item[pos] = k
return pos
return -1
def find(self,k):
for i in range(len(self.item)):
pos = h(self, k)+i
if pos >= len(self.item):
pos = pos - len(self.item)
if self.item[pos] == -1:
return -1
if self.item[pos].word == k.word:
return pos
return -1
def delete(self,k):
f = self.find(k)
if f >=0:
self.item[f] = -2
return f
def h(self,k):
if self.hashNum == 1:
return ((len(k.word)-1)) % len(self.item)
elif self.hashNum == 2:
return ord(k.word[0]) % len(self.item)
elif self.hashNum == 3:
return (ord(k.word[0])+ord(k.word[-1])) % len(self.item)
elif self.hashNum == 4:
sum = 0
for x in range(len(k.word)):
sum = sum + ord(k.word[x])
if sum > len(self.item):
sum = sum - len(self.item)
return (sum % len(self.item))
elif self.hashNum == 5:
return recursiveForm(self, len(self.item), k.word)
else:
mx = 0
for x in range(len(k.word)):
temp = ord(k.word[x])
mx += temp * 2302
return mx % len(self.item)
def recursiveForm(self, n, s):
if s == '':
return 1
else:
return ((ord(s[0]) + 255*recursiveForm(self, n, s[1:])) % n)
def print_table(self):
print('Table contents:')
print(self.item)
def n(self, k):
temp = []
hash = self.h(k)
for b in self.item:
if self.h(b) == hash:
temp.append(b)
if len(temp) < 1:
return -1
else:
temp.sort()
return temp[len(temp)-1]
def resize(H):
new = HashTableLP(len(H.item)*2, H.hashNum)
for x in range(len(H.item)):
if H.item[x] != -1:
insert(new, H.item[x])
return new |
994,733 | fa57aa0fe58e853af9eb0be6e95da567517c8d48 | import os
import shutil
from cookiecutter import main
import pytest
CC_ROOT = os.path.abspath(
os.path.join(
__file__,
os.pardir,
os.pardir,
)
)
@pytest.fixture(scope='function')
def default_baked_project(tmpdir):
out_dir = str(tmpdir.mkdir('pki-project'))
main.cookiecutter(
CC_ROOT,
no_input=True,
# extra_content={},
output_dir=out_dir,
)
yield os.path.join(
out_dir,
'acme_corp-pki',
)
shutil.rmtree(out_dir)
def test_readme(default_baked_project):
readme_path = os.path.join(default_baked_project, 'README.md')
assert os.path.exists(readme_path)
assert no_curlies(readme_path)
def test_makefile(default_baked_project):
makefile_path = os.path.join(default_baked_project, 'Makefile')
assert os.path.exists(makefile_path)
assert no_curlies(makefile_path)
def test_folders(default_baked_project):
expected_dirs = [
]
ignored_dirs = [
default_baked_project,
]
abs_expected_dirs = [os.path.join(default_baked_project, d) for d in expected_dirs]
abs_dirs, _, _ = list(zip(*os.walk(default_baked_project)))
assert len(set(abs_expected_dirs + ignored_dirs) - set(abs_dirs)) == 0
def no_curlies(filepath):
"""
Utility to ensure that no curly braces appear in a file
"""
with open(filepath, 'r') as f:
data = f.read()
template_strings = [
'{{',
'}}',
'{%',
'%}',
]
template_strings_in_file = [s in data for s in template_strings]
return not any(template_strings_in_file)
|
994,734 | cea1dc029a3b3f1be40c33e073f9a79681f9c864 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
class IntroductionClass:
def __init__(self, data):
self.data = data |
994,735 | 46b9f5e6baf2f0fadd150d0176abea18b0714cc5 | import yaml
import requests
from django.contrib import messages
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.db.models import Q
from django.http import JsonResponse
import datetime
from django.db.models import Count
from pprint import pprint
from helper.common import manager as helper_manager
from support import models as support_models
from django.shortcuts import redirect
from leave_manager.common import users
from leave_manager.common import leave_manager
from services import models as services_model
from employee import models as employee_models
from . import models as notification_models
from django.core.cache import cache
from django.conf import settings
from helper.common import redis
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django.views.decorators.cache import cache_page
CACHE_TTL = getattr(settings, 'CACHE_TTL', settings.CACHE_TTL)
CACHE_MAX_TTL = getattr(settings, 'CACHE_MAX_TTL', settings.CACHE_MAX_TTL)
credential = yaml.load(open('credentials.yaml'), Loader=yaml.FullLoader)
template_version = ''
try:
template_version = credential['template_version']
except Exception as e:
template_version = 'v1'
def update_notification_settings(emp):
try:
notify = notification_models.NotificationSettings.objects.get(employee=emp)
notify.show_notification_icon = False
notify.save()
except (Exception, notification_models.NotificationSettings.DoesNotExist) as e:
pass
print(e)
@login_required
def notifications(request):
if 'crm_branch' not in cache:
redis.set_crm_branch(request)
try:
current_branch = cache.get('crm_branch')
context = {}
context.update({"current_branch": current_branch})
user_all_branch = helper_manager.get_current_user_branch(request.user)
data = []
count = 0
employee = employee_models.Employee.objects.get(user=request.user)
notifications = notification_models.Notifications.objects.filter(
employee=employee).order_by('-id')
common_notifications = notification_models.Notifications.objects.filter(
employee=None, for_branch__in=user_all_branch).order_by('-id').distinct()
for i in notifications:
symbol = "/static/crmManager/v1/assets/lms.png"
count = count+1
if i.leave:
if not i.is_read:
data.append({
'id': i.id,
'text': i.text,
'leave_id': i.leave.id,
'leave_reason': i.leave.reason,
'from_date': i.leave.from_date,
'to_date': i.leave.to_date,
'date': i.date,
'time': str(i.time).split('.')[0],
'is_read': i.is_read,
'leave': True,
'tag': i.tag,
'reject_reason': i.leave.reject_reason,
"symbol": symbol,
'url': i.url
})
if i.tag == "approved":
i.is_read = True
if i.compensation:
if not i.is_read:
data.append({
'id': i.id,
'text': i.text,
'compensation_id': i.compensation.id,
'leave_reason': i.compensation.reason,
'days': i.compensation.days,
'date': i.date,
'time': str(i.time).split('.')[0],
'is_read': i.is_read,
'leave': True,
'tag': i.tag,
"symbol": symbol
})
for i in common_notifications:
is_read_commmon_status = False
if employee in i.seen_by.all():
is_read_commmon_status = True
else:
count = count+1
is_read_commmon_status = False
symbol = "/static/crmManager/v1/assets/"
if i.holiday:
symbol += "holiday.png"
data.append({
'id': i.id,
'text': 'Holiday Notification of ' + i.holiday.title,
'holiday_title': i.holiday.title,
'holiday_id': i.holiday.id,
'from_date': i.holiday.from_date,
'to_date': i.holiday.to_date,
'date': i.date,
'time': str(i.time).split('.')[0],
'is_read': is_read_commmon_status,
'holiday': True,
'branch': i.for_branch.all(),
'tag': i.tag,
"symbol": symbol
})
i.seen_by.add(employee)
# if i.notice:
# symbol += "notice.png"
# data.append({
# 'id':i.id,
# 'text': i.notice.topic,
# 'notice_message':i.notice.message,
# 'date':i.date,
# 'time':str(i.time).split('.')[0],
# 'is_read':i.is_read,
# 'notice':True,
# 'tag':i.tag,
# "symbol": symbol
# })
# data = sorted(data, key=lambda i: (i['date'], i['time']), reverse=True)
context.update({"data": data})
context.update({"count": count})
update_notification_settings(employee)
return render(request, "notifications/"+template_version+"/notifications.html", context)
except (Exception, employee_models.Employee.DoesNotExist) as e:
print("Notification API Exception", e)
return HttpResponseRedirect(reverse('crm_index'))
def notification_count(request):
employee = employee_models.Employee.objects.none()
data = []
count = 0
try:
employee = employee_models.Employee.objects.get(user=request.user)
notifications = notification_models.Notifications.objects.filter(employee=employee).order_by('-id')
user_all_branch = helper_manager.get_current_user_branch(request.user)
common_notifications = notification_models.Notifications.objects.filter(employee=None, for_branch__in=user_all_branch).order_by('-id').distinct()
for i in notifications:
count = count+1
for i in common_notifications:
if employee not in i.seen_by.all():
count = count+1
return count
except (Exception, employee_models.Employee.DoesNotExist) as e:
print("Context 1 ",e)
return count
def create_notification(request, data, for_employee=""):
if data["type"] == "leave-approved":
if notification_models.Notifications.objects.create(leave=data['data'], text=data['text'], employee=for_employee, tag="approved"):
return True
else:
return False
elif data["type"] == "apply-leave":
if notification_models.Notifications.objects.create(employee=for_employee, leave=data["data"], text=data["text"], url=data['url'], tag=data["data"].id):
return True
return False
elif data["type"] == "leave-rejected":
if notification_models.Notifications.objects.create(leave=data['data'], text = data['text'], employee=for_employee, tag="rejected"):
return True
else:
return False
|
994,736 | e1de0385768bc990a8b86325e1a0bdd2be0e6552 | import os
Import('buildenv')
zookeeper_include = Glob('zookeeper/include/*.h')
if buildenv._isLinux:
if buildenv._is64:
Install('../lib','lib64/libscew.a')
Install('../lib','lib64/libexpat.a')
Install('../lib','lib64/libreadline.a')
Install('../lib','lib64/libncurses.a')
Install('../lib','zookeeper/lib64/libzookeeper_st.a')
Install('../lib','zookeeper/lib64/libzookeeper_mt.a')
else:
Install('../lib','lib/libscew.a')
Install('../lib','lib/libexpat.a')
Install('../lib','lib/libreadline.a')
Install('../lib','lib/libncurses.a')
Install('../lib','zookeeper/lib/libzookeeper_st.a')
Install('../lib','zookeeper/lib64/libzookeeper_mt.a')
Install('../include/thirdparty/zookeeper/',zookeeper_include)
else:
libevent_include0 = Glob('libevent-2.1.8-stable/win64/include/*.h')
libevent_include1 = Glob('libevent-2.1.8-stable/win64/include/event2/*.h')
Install('../include/thirdparty/libevent-2.1.8-stable/', libevent_include0)
Install('../include/thirdparty/libevent-2.1.8-stable/event2/', libevent_include1)
Install(buildenv.libInstallPath, Glob('libevent-2.1.8-stable/win64/lib/*.lib')) |
994,737 | 72d58bb940ed1c4c0cae7b96d1a08022774e7f63 | from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import gettext_lazy as _
from users.managers import UserManager
class AskRegistration(models.Model):
"""Класс AskRegistration используется для описания модели хранящей
временные коды регистрации.
Родительский класс -- models.Model.
Атрибуты класса
--------
PK <--
email : models.EmailField()
емейл на который отправлен код подтверждения
confirmation_code : models.TextField()
сгенерированный код подтверждения
pub_date : models.DateTimeField()
дата и время запроса
"""
email = models.EmailField(
verbose_name='email',
help_text='Введите email'
)
confirmation_code = models.TextField(
verbose_name='Код подтвержения',
help_text='Введите код подтверждения'
)
ask_date = models.DateTimeField(
verbose_name='Дата запроса',
auto_now_add=True,
help_text='Укажите дату и время запроса'
)
class Meta:
verbose_name_plural = 'Запросы на регистрацию'
verbose_name = 'Запрос на регистрацию'
ordering = ['-ask_date']
def __str__(self):
"""Вернуть строковое представление в виде email."""
return self.email
class User(AbstractUser):
"""Класс AskRegistration используется для описания модели хранящей
временные коды регистрации.
Родительский класс -- AbstractUser.
Атрибуты класса
--------
PK <-- Review, Comment
bio : models.CharField()
о пользователе
email : models.EmailField()
емейл пользователя
confirmation_code : models.CharField()
сгенерированный код подтверждения
created_at : models.DateTimeField()
дата и время создания пользователя
updated_at : models.DateTimeField()
дата и время создания пользователя
"""
class Role(models.TextChoices):
"""Класс Role используется для определния допустимых пользовательских
ролей."""
USER = 'user', _('Пользователь')
MODERATOR = 'moderator', _('Модератор')
ADMIN = 'admin', _('Администратор')
bio = models.CharField(
max_length=1000,
null=True,
blank=True
)
email = models.EmailField(
db_index=True,
unique=True
)
role = models.CharField(
max_length=10,
choices=Role.choices,
default=Role.USER,
)
updated_at = models.DateTimeField(
auto_now=True
)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
objects = UserManager()
class Meta:
ordering = ('id',)
def __str__(self):
""" Строковое представление модели (отображается в консоли) """
return self.email
|
994,738 | 0d36e0f7bf2c9d56530d167b5413aa097f8b2c24 | from flask import Flask, render_template, request, url_for, flash, redirect, Response, session, send_from_directory
from flask_bootstrap import Bootstrap
import os
from app.classes.MarioExtravaganza import MarioExtravaganza
app = Flask(__name__)
Bootstrap(app)
app.secret_key = os.environ["SECRET"]
## MAIN PAGE
@app.route("/", methods = ["GET", "POST"])
def main():
if request.method == "POST":
try:
# try to build object and get file download data
# to return to user
marioObj = MarioExtravaganza()
fileData = marioObj.process(request)
download = Response(fileData, mimetype = "text/csv")
download.headers.set("Content-Disposition", "attachment", filename="racelineups.csv")
except Exception as e:
# redirect to error page if error is encountered
flash(str(e))
return redirect(url_for("error"))
return download
return render_template("main.html", title = "Mario Kart Extravaganza")
##ERROR PAGE
@app.route("/error", methods = ["GET", "POST"])
def error():
# return to main page if post request
if request.method =='POST':
return redirect(url_for('main'))
return render_template('error.html')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
|
994,739 | ee351a423e4d883b38b190cc5c01e2d8b7a5eb92 | #
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class TColumnValue:
"""
Attributes:
- bool_val
- byte_val
- short_val
- int_val
- long_val
- double_val
- string_val
- binary_val
- timestamp_val
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'bool_val', None, None, ), # 1
(2, TType.I32, 'int_val', None, None, ), # 2
(3, TType.I64, 'long_val', None, None, ), # 3
(4, TType.DOUBLE, 'double_val', None, None, ), # 4
(5, TType.STRING, 'string_val', None, None, ), # 5
(6, TType.BYTE, 'byte_val', None, None, ), # 6
(7, TType.I16, 'short_val', None, None, ), # 7
(8, TType.STRING, 'binary_val', None, None, ), # 8
(9, TType.STRING, 'timestamp_val', None, None, ), # 9
)
def __init__(self, bool_val=None, byte_val=None, short_val=None, int_val=None, long_val=None, double_val=None, string_val=None, binary_val=None, timestamp_val=None,):
self.bool_val = bool_val
self.byte_val = byte_val
self.short_val = short_val
self.int_val = int_val
self.long_val = long_val
self.double_val = double_val
self.string_val = string_val
self.binary_val = binary_val
self.timestamp_val = timestamp_val
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.bool_val = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BYTE:
self.byte_val = iprot.readByte();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I16:
self.short_val = iprot.readI16();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.int_val = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.long_val = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.double_val = iprot.readDouble();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.string_val = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.binary_val = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.timestamp_val = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TColumnValue')
if self.bool_val is not None:
oprot.writeFieldBegin('bool_val', TType.BOOL, 1)
oprot.writeBool(self.bool_val)
oprot.writeFieldEnd()
if self.int_val is not None:
oprot.writeFieldBegin('int_val', TType.I32, 2)
oprot.writeI32(self.int_val)
oprot.writeFieldEnd()
if self.long_val is not None:
oprot.writeFieldBegin('long_val', TType.I64, 3)
oprot.writeI64(self.long_val)
oprot.writeFieldEnd()
if self.double_val is not None:
oprot.writeFieldBegin('double_val', TType.DOUBLE, 4)
oprot.writeDouble(self.double_val)
oprot.writeFieldEnd()
if self.string_val is not None:
oprot.writeFieldBegin('string_val', TType.STRING, 5)
oprot.writeString(self.string_val)
oprot.writeFieldEnd()
if self.byte_val is not None:
oprot.writeFieldBegin('byte_val', TType.BYTE, 6)
oprot.writeByte(self.byte_val)
oprot.writeFieldEnd()
if self.short_val is not None:
oprot.writeFieldBegin('short_val', TType.I16, 7)
oprot.writeI16(self.short_val)
oprot.writeFieldEnd()
if self.binary_val is not None:
oprot.writeFieldBegin('binary_val', TType.STRING, 8)
oprot.writeString(self.binary_val)
oprot.writeFieldEnd()
if self.timestamp_val is not None:
oprot.writeFieldBegin('timestamp_val', TType.STRING, 9)
oprot.writeString(self.timestamp_val)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TResultRow:
"""
Attributes:
- colVals
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'colVals', (TType.STRUCT,(TColumnValue, TColumnValue.thrift_spec)), None, ), # 1
)
def __init__(self, colVals=None,):
self.colVals = colVals
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.colVals = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = TColumnValue()
_elem5.read(iprot)
self.colVals.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TResultRow')
if self.colVals is not None:
oprot.writeFieldBegin('colVals', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.colVals))
for iter6 in self.colVals:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TColumnData:
"""
Attributes:
- is_null
- bool_vals
- byte_vals
- short_vals
- int_vals
- long_vals
- double_vals
- string_vals
- binary_vals
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'is_null', (TType.BOOL,None), None, ), # 1
(2, TType.LIST, 'bool_vals', (TType.BOOL,None), None, ), # 2
(3, TType.LIST, 'byte_vals', (TType.BYTE,None), None, ), # 3
(4, TType.LIST, 'short_vals', (TType.I16,None), None, ), # 4
(5, TType.LIST, 'int_vals', (TType.I32,None), None, ), # 5
(6, TType.LIST, 'long_vals', (TType.I64,None), None, ), # 6
(7, TType.LIST, 'double_vals', (TType.DOUBLE,None), None, ), # 7
(8, TType.LIST, 'string_vals', (TType.STRING,None), None, ), # 8
(9, TType.LIST, 'binary_vals', (TType.STRING,None), None, ), # 9
)
def __init__(self, is_null=None, bool_vals=None, byte_vals=None, short_vals=None, int_vals=None, long_vals=None, double_vals=None, string_vals=None, binary_vals=None,):
self.is_null = is_null
self.bool_vals = bool_vals
self.byte_vals = byte_vals
self.short_vals = short_vals
self.int_vals = int_vals
self.long_vals = long_vals
self.double_vals = double_vals
self.string_vals = string_vals
self.binary_vals = binary_vals
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.is_null = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = iprot.readBool();
self.is_null.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.bool_vals = []
(_etype16, _size13) = iprot.readListBegin()
for _i17 in xrange(_size13):
_elem18 = iprot.readBool();
self.bool_vals.append(_elem18)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.byte_vals = []
(_etype22, _size19) = iprot.readListBegin()
for _i23 in xrange(_size19):
_elem24 = iprot.readByte();
self.byte_vals.append(_elem24)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.short_vals = []
(_etype28, _size25) = iprot.readListBegin()
for _i29 in xrange(_size25):
_elem30 = iprot.readI16();
self.short_vals.append(_elem30)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.int_vals = []
(_etype34, _size31) = iprot.readListBegin()
for _i35 in xrange(_size31):
_elem36 = iprot.readI32();
self.int_vals.append(_elem36)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.long_vals = []
(_etype40, _size37) = iprot.readListBegin()
for _i41 in xrange(_size37):
_elem42 = iprot.readI64();
self.long_vals.append(_elem42)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.double_vals = []
(_etype46, _size43) = iprot.readListBegin()
for _i47 in xrange(_size43):
_elem48 = iprot.readDouble();
self.double_vals.append(_elem48)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.string_vals = []
(_etype52, _size49) = iprot.readListBegin()
for _i53 in xrange(_size49):
_elem54 = iprot.readString();
self.string_vals.append(_elem54)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.LIST:
self.binary_vals = []
(_etype58, _size55) = iprot.readListBegin()
for _i59 in xrange(_size55):
_elem60 = iprot.readString();
self.binary_vals.append(_elem60)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TColumnData')
if self.is_null is not None:
oprot.writeFieldBegin('is_null', TType.LIST, 1)
oprot.writeListBegin(TType.BOOL, len(self.is_null))
for iter61 in self.is_null:
oprot.writeBool(iter61)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.bool_vals is not None:
oprot.writeFieldBegin('bool_vals', TType.LIST, 2)
oprot.writeListBegin(TType.BOOL, len(self.bool_vals))
for iter62 in self.bool_vals:
oprot.writeBool(iter62)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.byte_vals is not None:
oprot.writeFieldBegin('byte_vals', TType.LIST, 3)
oprot.writeListBegin(TType.BYTE, len(self.byte_vals))
for iter63 in self.byte_vals:
oprot.writeByte(iter63)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.short_vals is not None:
oprot.writeFieldBegin('short_vals', TType.LIST, 4)
oprot.writeListBegin(TType.I16, len(self.short_vals))
for iter64 in self.short_vals:
oprot.writeI16(iter64)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.int_vals is not None:
oprot.writeFieldBegin('int_vals', TType.LIST, 5)
oprot.writeListBegin(TType.I32, len(self.int_vals))
for iter65 in self.int_vals:
oprot.writeI32(iter65)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.long_vals is not None:
oprot.writeFieldBegin('long_vals', TType.LIST, 6)
oprot.writeListBegin(TType.I64, len(self.long_vals))
for iter66 in self.long_vals:
oprot.writeI64(iter66)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.double_vals is not None:
oprot.writeFieldBegin('double_vals', TType.LIST, 7)
oprot.writeListBegin(TType.DOUBLE, len(self.double_vals))
for iter67 in self.double_vals:
oprot.writeDouble(iter67)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.string_vals is not None:
oprot.writeFieldBegin('string_vals', TType.LIST, 8)
oprot.writeListBegin(TType.STRING, len(self.string_vals))
for iter68 in self.string_vals:
oprot.writeString(iter68)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.binary_vals is not None:
oprot.writeFieldBegin('binary_vals', TType.LIST, 9)
oprot.writeListBegin(TType.STRING, len(self.binary_vals))
for iter69 in self.binary_vals:
oprot.writeString(iter69)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.is_null is None:
raise TProtocol.TProtocolException(message='Required field is_null is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
994,740 | bf6105e89fda11c3f817da77e220852772b23b5f | import bs4
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
#Opens webpages from gurps wiki and creates a csv with info about advantages, disadvantages, and skills
#Open, Save, and Parce Webpage
#Save info as csv
#Opens the webpage and returns it
#parameters should be type string
def openWebpage(url):
uClient = uReq(url)
pageHTML = uClient.read()
pageSoup = soup(pageHTML,"html.parser")
return pageSoup
#finds all of the tables labeled as "wikitable"
def searchSoup(soup):
tables = soup.findAll("table",{"class":"wikitable"})
return tables
#Function below breaks single responsibility - Not best practice:
#Also formats and removes blank lines
#How to make more general?
#What if the tables have different dimentions?
#What if you dont want to join tables?
#turns the tables into a list of rows
def transformToList(tables):
contents = []
for i in tables:
tableRows = i.find_all('tr')
for j in tableRows:
rowList = []
cell = j.find_all('td')
for k in cell:
text = str(k.text)
text = text[0:-1]
rowList.append(text)
if(len(rowList) > 0):
contents.append(rowList)
return contents
#takes the list created in "transformToList" and writes a csv file
def createCSV(list,fileName):
fileName = open(fileName + ".csv","w+")
for i in range(0,len(list)):
for j in range(0,len(list[i])):
fileName.write((list[i])[j] + ',')
fileName.write('\n')
fileName.close()
#Main program here:
def main():
#Advantages
advantagesWebpage = openWebpage("https://gurps.fandom.com/wiki/List_of_Advantages")
advantagesTables = searchSoup(advantagesWebpage)
advantages = transformToList(advantagesTables)
createCSV(advantages,"advantages")
#Disadvantages
disadvantagesWebpage = openWebpage("https://gurps.fandom.com/wiki/List_of_Disadvantages")
disadvantagesTables = searchSoup(disadvantagesWebpage)
disadvantages = transformToList(disadvantagesTables)
createCSV(disadvantages,"disadvantages")
#Skills
skillsWebpage = openWebpage("https://gurps.fandom.com/wiki/List_of_Skills")
skillsTables = searchSoup(skillsWebpage)
skills = transformToList(skillsTables)
createCSV(skills,"skills")
if __name__ == "__main__":
main()
|
994,741 | 4c9a1e744958dd7e90b3780d7bcbaa0b9ec026cd | from numpy import *
import scipy.linalg as ln
import matplotlib
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.colors as col
from matplotlib.colors import LogNorm
from matplotlib.ticker import LogLocator,LogFormatter
from matplotlib.backends.backend_pdf import PdfPages
def Eddington1D(k,a,g,T,dz):
n=k.shape[0]-1
A=zeros((2*n+3,2*n+3),float)
B=zeros((2*n+3),float)
dz=0.25
for i in range(0,n+1):
A[2*i+1,2*i+1]=+3*k[i]*(1-a[i])*dz
A[2*i+1,2*i+2]=1
A[2*i+1,2*i]=-1
B[2*i+1]=3*k[i]*(1-a[i])*T[i]*dz
if(2*i+3<2*n+3):
km=0.5*(k[i]+k[i+1])
am=0.5*(a[i]+a[i+1])
gm=0.5*(g[i]+g[i+1])
A[2*i+2,2*i+2]=km*(1-am*gm)*dz
A[2*i+2,2*i+3]=1
A[2*i+2,2*i+1]=-1
B[2*i+2]=0.
A[2*n+2,2*n+1]=1.
A[2*n+2,2*n+2]=-1./3
A[2*n+2,2*n]=-1./3
B[2*n+2]=2.7
A[0,0]=(2-eps)/3./eps
A[0,1]=1.
A[0,2]=(2-eps)/3./eps
B[0]=292
I01p=dot(linalg.pinv(A),B)
#I01p=ln.solve_banded((2,2), A, B)
Tb=(I01p[2*n]+I01p[2*n+2])/2*cos(53./180.*pi)+I01p[2*n+1]
return Tb,A,B, I01p
eps=0.9
dz=0.25
#k=dat[:,0]
#a=dat[:,1]
#g=dat[:,2]
#T=dat[:,3]
f=19.
ts=300
#from radtran import *
import radtran as rt
umu=cos(53/180.*pi)
import pickle
kext3d,salb3d,asym3d,t2,height=pickle.load(open('scattFields.neoguri.pickle','rb'), encoding='bytes')
n1=kext3d.shape[2]
levs=[0.1,0.2,0.4,0.8,1.6,3.2,6.4,15]
plt.figure()
cs=plt.contourf(arange(n1),height[:-1,5,0],kext3d[:,5,:,4],levels=levs,norm=LogNorm())
plt.xlim((0,n1-1))
plt.colorbar(cs)
plt.xlabel('Grid Cell')
plt.ylabel('Height')
plt.title('Extinction Coeff (km-1)')
#plt.savefig(pp,orientation='portrait',papertype='landscape',format='pdf')
plt.savefig('extinct.tiff')
plt.figure()
levs=arange(13)*0.0675+0.2
levs[-1]=1.
cs=plt.contourf(arange(n1),height[:-1,5,0],salb3d[:,5,:,4],levels=levs)
plt.xlim((0,n1-1))
plt.colorbar(cs)
plt.xlabel('Grid Cell')
plt.ylabel('Height')
plt.title('Scattering albedo')
plt.savefig('scatt.tiff')
#plt.savefig(pp,orientation='portrait',papertype='landscape',format='pdf')
plt.figure()
levs=arange(11)*0.07+0.2
cs=plt.contourf(arange(n1),height[:-1,5,0],asym3d[:,5,:,4],levels=levs)
plt.xlim((0,n1-1))
plt.colorbar(cs)
plt.xlabel('Grid Cell')
plt.ylabel('Height')
plt.title('Asymmetry factor')
plt.savefig('asymmetry.tiff')
dx=0.25
dz=0.25
dx=2.
def getSlantProp(a,hm,ny,nx,dx):
asl=[]
t1=tan(53./180*pi)
for k in range(a.shape[0]):
dnx=int((20-hm[k])*t1/dx)
#dnx=0
asl.append(a[k,ny,nx+dnx,4])
#print asl
asl=array(asl)
aint=interp(arange(80)*0.25+.125,hm,asl)
return aint,asl
t1L=[]
t2L=[]
kb2d=zeros((100,80),float)
kb2d_fd=zeros((100,80),float)
for nx in range(25,125):
ny=4
hm=0.5*height[:-1,ny,nx]+0.5*height[1:,ny,nx]
k,k2=getSlantProp(kext3d,hm,ny,nx,dx)
a,a2=getSlantProp(salb3d,hm,ny,nx,dx)
g,g2=getSlantProp(asym3d,hm,ny,nx,dx)
T=interp(arange(80)*0.25+.125,hm,t2[:,ny,nx])
#stop
eps=0.9
emis=0.9
ebar=0.9
umu=cos(53/180.*pi)
nlyr=k.shape[0]-1
lyrhgt=0.25*arange(nlyr+1)
fisot=2.7
Ts=T[0]
gp=g/(1+g)
kp=(1-a*g**2)*k
ap=(1-g**2)*a/(1-a*g**2)
tb = rt.radtran(umu,nlyr,Ts,T,lyrhgt,k[:-1],a[:-1],g[:-1],fisot,emis,ebar)
abig,b = rt.seteddington1d(k[:],a[:],g[:],T,eps,dz,Ts)
incang=53.
i01p=linalg.solve(abig,b)
tb2 = rt.tbf90(i01p,T,incang,k[:],a[:],g[:],eps,dz)
tbb=1.0
lam2=i01p
i01pb,kb,ab,gb = rt.tbf90_b(tb2,tbb,i01p,T,incang,k,a,g,eps,dz)
print(i01pb)
y=0.
yb=1.
lam1=i01pb
lam11=dot(i01pb,linalg.inv(abig))
kb1,ab1,gb1 = rt.suml1al2_b(y,yb,lam11,lam2,k,a,g,T,eps,dz)
y=0.
yb=1
kb2,ab2 = rt.suml1b_b(y,yb,lam11,k,a,g,T,eps,dz)
print(tb,tb2)
t1L.append(tb)
t2L.append(tb2)
kb2d[nx-25,:]=ab-ab1+ab2
n=80
for i in range(n):
ap=a.copy()
ap[i]=ap[i]+0.01
abig,b = rt.seteddington1d(k[:],ap[:],g[:],T,eps,dz,Ts)
incang=53.
i01p=linalg.solve(abig,b)
tb21 = rt.tbf90(i01p,T,incang,k[:],ap[:],g[:],eps,dz)
kb2d_fd[nx-25,i]=(tb21-tb2)/0.01
plt.figure()
plt.plot(t1L)
plt.plot(t2L)
plt.figure()
plt.subplot(211)
plt.pcolormesh(kb2d[:,::-1].T,cmap='RdBu')
plt.subplot(212)
plt.pcolormesh(kb2d_fd[:,::-1].T,cmap='RdBu')
|
994,742 | b8e7145ef91bb420d33d43d6bbde0cb582bdd551 | from __future__ import unicode_literals
from django.utils.translation import gettext_lazy
from django.views.generic import ListView
from django.http import HttpResponseRedirect
from cradmin_legacy.registry import cradmin_instance_registry
class RoleSelectView(ListView):
paginate_by = 30
template_name = 'cradmin_legacy/roleselect.django.html'
context_object_name = 'roles'
pagetitle = gettext_lazy('What would you like to edit?')
autoredirect_if_single_role = True
list_cssclass = 'cradmin-legacy-roleselect-list cradmin-legacy-roleselect-list-flat'
def get_queryset(self):
cradmin_instance = cradmin_instance_registry.get_current_instance(self.request)
return cradmin_instance.get_rolequeryset()
def get(self, *args, **kwargs):
cradmin_instance = cradmin_instance_registry.get_current_instance(self.request)
if self.get_autoredirect_if_single_role() and self.get_queryset().count() == 1:
only_role = self.get_queryset().first()
return HttpResponseRedirect(str(cradmin_instance.rolefrontpage_url(
cradmin_instance.get_roleid(only_role))))
else:
# Add cradmin_instance to request just like cradmin_legacy.decorators.cradminview.
# Convenient when overriding standalone-base.django.html and using the current
# CrInstance to distinguish multiple crinstances.
self.request.cradmin_instance = cradmin_instance
return super(RoleSelectView, self).get(*args, **kwargs)
def get_autoredirect_if_single_role(self):
return self.autoredirect_if_single_role
def get_pagetitle(self):
return self.pagetitle
def get_list_cssclass(self):
return self.list_cssclass
def get_context_data(self, **kwargs):
context = super(RoleSelectView, self).get_context_data(**kwargs)
context['pagetitle'] = self.get_pagetitle()
context['list_cssclass'] = self.get_list_cssclass()
return context
|
994,743 | 9fc768241e6b971cffee0a6f9831f0b2b1d575ec | import urllib.request
import re
def gethref(url):
html_content = urllib.request.urlopen(url).read()
r = re.compile('href="(.*?)"')
result = r.findall(html_content.decode('GBK'))
return result
def find_links(website):
html_content = urllib.request.urlopen(website).read()
r = re.compile('href="(.*?)"')
result = r.findall(html_content.decode('GBK'))
return result
if __name__ == '__main__':
print(find_links('http://www.sina.com/'))
print(gethref('http://www.taobao.com')) |
994,744 | 0e13133348e7f75e8610697c84f902833394a50a | # -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
class Timetable(models.Model):
school = models.ForeignKey("schools.School")
name = models.CharField(max_length=100, unique=True)
slug = models.CharField(max_length=100, unique=True)
class Meta:
verbose_name = _('timetable')
verbose_name_plural = _('timetables')
def __unicode__(self):
return unicode(self.name)
def save(self, *args, **kwargs):
self.slug = slugify(unicode(self))
super(Timetable, self).save(*args, **kwargs)
class Time(models.Model):
timetable = models.ForeignKey("timetables.Timetable",
verbose_name=_('timetable'))
start = models.TimeField()
end = models.TimeField()
class Meta:
ordering = ['start', ]
def __unicode__(self):
return u'{0} - {1}'.format(unicode(self.start), unicode(self.end))
def clean(self):
if self.end <= self.start:
error_msg = _('The start must be before the end')
raise ValidationError(error_msg)
intersection = Time.objects.filter(Q(
start__lte=self.start, end__gte=self.start
) | Q(
start__gte=self.start, start__lt=self.end
), timetable__name=self.timetable.name)
if self.pk:
intersection.exclude(pk=self.pk)
if intersection.exists():
error_msg = _('Schedule not valid')
raise ValidationError(error_msg)
class ClassTimetable(models.Model):
classroom = models.ForeignKey('classes.Class', verbose_name=_('classroom'),
unique=True)
timetable = models.ForeignKey('timetables.Timetable',
verbose_name=_('timetable'))
class ClassSubjectTime(models.Model):
# weekday
WEEKDAY_CHOICES = (
('mon', _('Monday')),
('tue', _('Tuesday')),
('wed', _('Wednesday')),
('thu', _('Thursday')),
('fri', _('Friday')),
('sat', _('Saturday')),
('sun', _('Sunday')),
)
weekday = models.CharField(_('weekday'), max_length=3,
choices=WEEKDAY_CHOICES)
class_subject = models.ForeignKey('classes.ClassSubject',
verbose_name=_('class subject'))
time = models.ForeignKey("timetables.Time", verbose_name=_('time'))
@classmethod
def get_classes_given(cls, class_subject, subperiod):
weekdays = {'mon': 0, 'tue': 1, 'wed': 2, 'thu': 3, 'fri': 4,
'sat': 5, 'sun': 6}
days = subperiod.get_days()
classes_given = 0
for cst in cls.objects.filter(class_subject=class_subject):
classes_given += days[weekdays[cst.weekday]]
return classes_given
def __unicode__(self):
return u'{0} - {1}'.format(self.time, self.class_subject)
def clean(self):
class_subject_time = ClassSubjectTime.objects.filter(
weekday=self.weekday,
class_subject__classroom=self.class_subject.classroom,
time=self.time
)
if self.pk:
class_subject_time = class_subject_time.exclude(pk=self.pk)
if class_subject_time.exists():
raise ValidationError(ugettext(
"A class subject time already exist"))
class Meta:
verbose_name = _('class subject time')
verbose_name_plural = _('class subject times')
unique_together = ('class_subject', 'weekday', 'time')
|
994,745 | 4c1f31c0e9250f63b6b0525494a4756e8c779bb8 | # -*- coding: utf-8 -*-
# wakeup.py
import json
import simplejson
import subprocess
from run import csrf
from lib.files import *
from lib.config import *
from flask import request
from flask import redirect, url_for
from lib.upload_file import uploadfile
from views.login import login_required
from werkzeug.utils import secure_filename
from flask import Blueprint, render_template
wakeup = Blueprint('wakeup', __name__,
url_prefix = '/wakeup',
template_folder='templates',
static_folder='static')
@csrf.exempt
@wakeup.route('/wakeup', methods=["GET", "POST"])
@login_required
# wakeup test
def wakeup_index():
if request.method == 'POST':
threshold = request.form['threshold']
radio_wakeup = request.form['radio_value']
if threshold and radio_wakeup:
info_list = []
file_name = DATA_AREA + g.name + 'wakeup'
result_path = DATA_AREA + g.name + RESULT_PATH
os.system("rm -rf {}".format(result_path))
shell_script = "{0} {1} {2}".format(file_name, threshold, radio_wakeup)
shell_info = subprocess.Popen(['./wakeup_test/wakeup_test_check1.sh ' + shell_script], stdout = subprocess.PIPE, shell = True).stdout.read()
shell_info = str(shell_info).split(r"\n")
for i in shell_info:
if i == "":
continue
info_list.resultend(i.replace(r"\t", " "))
return render_template('wakeup/wakeup_test.html', info = info_list)
return render_template('wakeup/wakeup_test.html', info = ["Hi, 缺少参数!"])
return render_template('wakeup/wakeup_test.html')
@csrf.exempt
@wakeup.route("/wakeup_upload", methods=['GET', 'POST'])
# wakeup file upload
def WakeupUpload():
if request.method == 'POST':
files = request.files['file']
if files:
filename = secure_filename(files.filename) #check file name
filename = SetupFileName(filename)
file_type = files.content_type
if not CheckFileType(files.filename):
result = uploadfile(name=filename, type=file_type, size=0, not_allowed_msg="文件类型不允许")
else:
if filename.endswith(".pcm"):
dir_name = g.name + PCM_SRC
dir_name = os.path.join(DATA_AREA, dir_name)
else:
dir_name = g.name + CONFIG_SRC
dir_name = os.path.join(DATA_AREA, dir_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
uploaded_file_path = os.path.join(dir_name, filename)
files.save(uploaded_file_path)
size = os.path.getsize(uploaded_file_path)
result = uploadfile(name=filename, type=file_type, size=size)
return simplejson.dumps({"files": [result.get_file()]})
if request.method == 'GET':
files = ''
if not os.path.exists(WAKEUP_FOLDER):
return redirect(url_for('index'))
for f in os.listdir(WAKEUP_FOLDER):
if os.path.isfile(os.path.join(WAKEUP_FOLDER, f)):
files = f
return redirect(url_for("wakeupindex"))
# del wakeup file
@csrf.exempt
@wakeup.route("/DelFile/<data>", methods=['GET', 'POST'])
def DelFile(data):
dir_head = DATA_AREA + request.cookies.get("remember_token").split("|")[0] + "_wakeup/"
pcm_head = DATA_AREA + g.name + PCM_SRC
result_head = DATA_AREA + g.name + RESULT_PATH
data1 = json.loads(data)
key, = data1
if key == "del":
if len(data1[key]) == 0:
return "亲,没有文件可以删除!"
for name in data1[key]:
file_name = name.split("*")
abs_file = os.path.join((dir_head + file_name[0]), file_name[-1])
if os.path.exists(abs_file):
os.remove(abs_file)
return "删除成功!!!"
if key == "pcm":
file_set = set(data1[key])
pcm_list = []
if os.path.exists(pcm_head):
pcm_list = os.listdir(pcm_head)
for name in file_set:
file_name = name.split("*")
if file_name[-1] in pcm_list:
pcm_list.remove(file_name[-1])
for i in pcm_list:
pcm_file = os.path.join(pcm_head, i.strip())
if os.path.exists(pcm_file):
os.remove(pcm_file)
return "删除成功!!!"
if key == "result":
file_set = set(data1[key])
result_list = []
if os.path.exists(result_head):
result_list = os.listdir(result_head)
for name in file_set:
file_name = name.split("*")
if file_name[-1] in result_list:
result_list.remove(file_name[-1])
for i in result_list:
result_file = os.path.join(result_head, i.strip())
if os.path.exists(result_file):
os.remove(result_file)
return "删除成功!!!"
return "删除失败,请重试!"
# clear wakeup file
@csrf.exempt
@wakeup.route("/clear_wakeup", methods=['GET'])
def clear_wakeup():
dir_head = DATA_AREA + g.name + WAKEUP_SRC
os.system("rm -rf {}/*".format(dir_head)) #unix
return render_template('result/wakeup_file_upload.html') |
994,746 | faee622d03dfafaf7cad75b6701577ac04bbf2dc | from summpackage.summtest import g
print(g) |
994,747 | 4f11c0a2e0e62c0a52ceb21fe107aed81a010632 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('Agg')
import json, urllib, numpy as np, matplotlib.pylab as plt, matplotlib.ticker as mtick, sys
import sunpy.map
from astropy.io import fits
from sunpy.cm import color_tables as ct
import sunpy.wcs as wcs
import datetime
import matplotlib.dates as mdates
import matplotlib.colors as mcol
import matplotlib.patches as ptc
from matplotlib.dates import *
import math
import scipy.ndimage.interpolation as interpolation
import subprocess
import random
import chainer
from chainer import datasets
from chainer import serializers
from chainer import links as L
from chainer import functions as F
from chainer import Variable, optimizers
import chainer.cuda as xp
image_wavelengths = [211]
optimizer_p = chainer.optimizers.SMORMS3()
optimizer_d = chainer.optimizers.SMORMS3()
optimizer_g = chainer.optimizers.SMORMS3()
start_dcgan_at_epoch=1000
image_size = 1023
dt_hours = 4
gpuid=-1
if gpuid >= 0:
chainer.cuda.get_device(gpuid).use()
image_size = 1023
def get_sun_image(time, wavelength):
try:
time_str = time.strftime("%Y.%m.%d_%H:%M:%S")
url = "http://jsoc.stanford.edu/cgi-bin/ajax/jsoc_info?ds=aia.lev1[{}_TAI/12s][?WAVELNTH={}?]&op=rs_list&key=T_REC,CROTA2,CDELT1,CDELT2,CRPIX1,CRPIX2,CRVAL1,CRVAL2&seg=image_lev1".format(time_str, wavelength)
response = urllib.request.urlopen(url)
data = json.loads(response.read().decode())
filename = data['segments'][0]['values'][0]
url = "http://jsoc.stanford.edu"+filename
aia_image = fits.open(url, cached=True) # download the data
aia_image.verify("fix")
exptime = aia_image[1].header['EXPTIME']
if exptime <= 0:
return None
quality = aia_image[1].header['QUALITY']
if quality !=0:
print(time, "bad quality",file=sys.stderr)
return None
original_width = aia_image[1].data.shape[0]
return interpolation.zoom(aia_image[1].data, image_size / float(original_width)) / exptime
except Exception as e:
print(e)
return None
"""
Returns the brightness-normalized image of the Sun
depending on the wavelength.
"""
def get_normalized_image_variable(time, wavelength):
img = get_sun_image(time, wavelength)
if img is None:
return None
img = img[np.newaxis, np.newaxis, :, :]
img = img.astype(np.float32)
x = Variable(img)
if gpuid >= 0:
x.to_gpu()
if wavelength == 211:
ret = F.sigmoid(x / 100)
elif wavelength == 193:
ret = F.sigmoid(x / 300)
elif wavelength == 94:
ret = F.sigmoid(x / 30)
else:
ret = F.log(F.max(1,x))
return ret
"""
Plot the image of the sun using the
SDO-AIA map.
"""
def plot_sun_image(img, filename, wavelength, title = '', vmin=0.5, vmax = 1.0):
if gpuid >= 0:
img = img.get()
cmap = plt.get_cmap('sdoaia{}'.format(wavelength))
plt.title(title)
plt.imshow(img,cmap=cmap,origin='lower',vmin=vmin, vmax=vmax)
plt.savefig(filename)
plt.close("all")
t = datetime.datetime(2015,1,1)
for ctr in range(6):
t1 = t + ctr * datetime.timedelta(seconds = 12)
img = get_normalized_image_variable(t1, 211)
if img is None:
print("image missing: ", t1)
plt.rcParams['figure.figsize'] = (16.0,16.0)
plot_sun_image(img.data[0,0], "slowmotion-{}.png".format(ctr), 211, str(t1))
|
994,748 | 8421f655c229895cc780a658431d98319973bd0f | import time
from time import sleep
import pytest
from application.snake import Snake
#if tests fail, may have to increase sleepTime to give game instance more time to execute.
sleepTime = 3
def test_start():
#create snake game instance, let it run until game over, then test if game over happened.
snake = Snake()
snake.snakeStart()
time.sleep( sleepTime )
assert snake.gameState == "finished"
def test_direction():
#create snake game instance, change direction to left, wait till game over, test if game over happened after delay.
snake = Snake()
snake.snakeStart()
snake.changeDirection(2)
time.sleep( sleepTime )
assert snake.gameState == "finished"
def test_score():
#create snake game instance, create fruit at specific location, then test if snake grabbed fruit by seeing if score increased.
snake = Snake()
snake.snakeStart()
snake.gridLayout[1][8] = snake.fruitSymbol
time.sleep( sleepTime )
assert snake.score[0] > 0
|
994,749 | 4a3d96517baf56f926fe96478c1d25b147991fb1 | #! /usr/bin/env python3
import numpy as np
from PIL import Image
def read_image(image_path):
# Read image file
img = Image.open(image_path)
image_arr = np.asarray(img).copy()
return image_arr
if __name__ == '__main__':
import argparse
from steganography_decoder import Decoder
from steganography_encoder import Encoder
values = []
for i in range(7, -1, -1):
res = b'\x01'[0] << i
values.append(res)
parser = argparse.ArgumentParser()
parser.add_argument('--action', required=True)
parser.add_argument('--path', required=True)
parser.add_argument('--output')
parser.add_argument('--message')
args = parser.parse_args()
action = args.action.lower()
output_path = args.output
message = args.message
file_path = args.path
if action != 'encode' and action != 'decode':
parser.error('action is not valid')
image_array = read_image(file_path)
if action == 'decode':
decoder = Decoder(values, image_array)
res = decoder.get_values()
print('Result: ' + res)
if action == 'encode':
if not output_path or not message:
parser.error('Missing output path and message')
encoder = Encoder(values, image_array)
encoded_image = encoder.write_msg(image_array, message)
new_image = Image.fromarray(encoded_image)
new_image.save(output_path)
|
994,750 | 595182e5f3c2e5829e7bbef3d7bf7d3bc4576c6f | from typing import List
from logging import Logger
from telegram.ext import Dispatcher, CommandHandler, Filters
# Commands
from .admin import admin
from .registrasi import registrasi
from .link import link
from .formulir import formulir
from .donasi import donasi
from .elearning import elearning
from .courses import courses
from .eula import eula
from .about import about
from .cancel import cancel
from .inline_help import inline_help
from .reset import reset
from .reset_token import reset_token
from .start_elearning import start_elearning
from .start import start
private_filter = Filters.private
class CommandMixin(object):
logger: Logger = None
COMMANDS_GROUP: int = 0
COMMANDS: List[CommandHandler] = [
CommandHandler("admin", admin, private_filter),
CommandHandler("link", link, private_filter),
CommandHandler("formulir", formulir, private_filter),
CommandHandler("registrasi", registrasi, private_filter),
CommandHandler("about", about, private_filter),
CommandHandler("elearning", elearning, private_filter),
CommandHandler("kursus", courses, private_filter),
CommandHandler("eula", eula, private_filter),
CommandHandler("reset", reset, private_filter),
CommandHandler("reset_token", reset_token, private_filter),
CommandHandler("donasi", donasi, private_filter),
CommandHandler("cancel", cancel, private_filter),
CommandHandler(
"start",
start_elearning,
filters=Filters.private & Filters.regex(r"^\/start TOKEN-[a-z0-9]{32}$"),
),
CommandHandler("start", inline_help, Filters.regex(r"^/start inline-help$")),
CommandHandler("start", start, private_filter),
]
def register_commands(self, dispatcher: Dispatcher):
try:
if self.COMMANDS:
for conversation in self.COMMANDS:
dispatcher.add_handler(conversation, group=self.COMMANDS_GROUP)
self.logger.info("Commands added!")
return True
except Exception as e:
self.logger.exception(e)
return False
|
994,751 | ee758f56035a5162f6f9b0b107468c36580bc104 | import os,subprocess,psutil,re,shutil,datetime,sys,glob
import urllib,urllib2,urllib3
from bioservices.kegg import KEGG
from socket import error as SocketError
import errno
from Bio import SeqIO
import xmltodict
from xml.dom import minidom
from xml.parsers.expat import ExpatError
import random, time
from goatools import obo_parser
import csv
import json
import pandas as pd
import requests
from collections import Counter
from itertools import combinations
from xml.etree import cElementTree as ET
import pickle
import cPickle
import operator
from compileSkylineDataToFit import compileSkylineFile
import numpy as np
from statistics import mean
from presencePepSeqInHuman import presencePepSeqHuman
from ncbiGeneAPI import ncbiGeneExp
from addModCol import addModCol
from addSelCol import addSelCol
from maketotalassaypep import totalAssayPep
from preloadData import preLoadJsonData
from uploadDataElasticSearch import uploadData
import ctypes
from generate_pre_downloadable_file import preDownloadFile
def makemergedict(listOfResourceFile,colname,humanuniprotdic,humankdeggic):
mergedictdata={}
humanmousemergedic={}
humanfuncdict = cPickle.load(open(humanuniprotdic, 'rb'))
humanKEGGdict = cPickle.load(open(humankdeggic, 'rb'))
for fitem in listOfResourceFile:
with open(fitem,'r') as pepfile:
reader = csv.DictReader(pepfile, delimiter='\t')
for row in reader:
info=[]
for i in colname:
info.append(str(row[i]).strip())
if mergedictdata.has_key(info[0].strip()):
mergedictdata[info[0].strip()].append(info[4])
else:
mergedictdata[info[0].strip()]=[info[4]]
temphumanunilist=info[-1].split(',')
hPNlist=[]
hGNlist=[]
hdislist=[]
hunidislist=[]
hunidisURLlist=[]
hdisgenlist=[]
hdisgenURLlist=[]
hDruglist=[]
hGoIDList=[]
hGoNamList=[]
hGoTermList=[]
hGoList=[]
hKeggList=[]
hKeggdetailsList=[]
for h in temphumanunilist:
if h in humanfuncdict:
hPNlist.append(humanfuncdict[h][0])
hGNlist.append(humanfuncdict[h][1])
hdislist.append(humanfuncdict[h][4])
hunidislist.append(humanfuncdict[h][5])
hunidisURLlist.append(humanfuncdict[h][6])
hdisgenlist.append(humanfuncdict[h][7])
hdisgenURLlist.append(humanfuncdict[h][8])
hDruglist.append(humanfuncdict[h][9])
hGoIDList.append(humanfuncdict[h][10])
hGoNamList.append(humanfuncdict[h][11])
hGoTermList.append(humanfuncdict[h][12])
hGoList.append(humanfuncdict[h][-1])
if h in humanKEGGdict:
hKeggList.append(humanKEGGdict[h][0])
hKeggdetailsList.append(humanKEGGdict[h][1])
hPN='NA'
hGN='NA'
hdis='NA'
hunidis='NA'
hunidisURL='NA'
hdisgen='NA'
hdisgenURL='NA'
hDrug='NA'
hGoiddata='NA'
hGonamedata='NA'
hGotermdata='NA'
hGodata='NA'
hKeggdata='NA'
hKeggdetails='NA'
if len(hPNlist)>0:
hPN='|'.join(list(set([l.strip() for k in hPNlist for l in k.split('|') if len(l.strip()) >0])))
if len(hGNlist)>0:
hGN='|'.join(list(set([l.strip() for k in hGNlist for l in k.split('|') if len(l.strip()) >0])))
if len(hdislist)>0:
hdis='|'.join(list(set([l.strip() for k in hdislist for l in k.split('|') if len(l.strip()) >0])))
if len(hunidislist)>0:
hunidis='|'.join(list(set([l.strip() for k in hunidislist for l in k.split('|') if len(l.strip()) >0])))
if len(hunidisURLlist)>0:
hunidisURL='|'.join(list(set([l.strip() for k in hunidisURLlist for l in k.split('|') if len(l.strip()) >0])))
if len(hdisgenlist)>0:
hdisgen='|'.join(list(set([l.strip() for k in hdisgenlist for l in k.split('|') if len(l.strip()) >0])))
if len(hdisgenURLlist)>0:
hdisgenURL='|'.join(list(set([l.strip() for k in hdisgenURLlist for l in k.split('|') if len(l.strip()) >0])))
if len(hDruglist)>0:
hDrug='|'.join(list(set([l.strip() for k in hDruglist for l in k.split('|') if len(l.strip()) >0])))
if len(hGoIDList)>0:
hGoiddata='|'.join(list(set([l.strip() for k in hGoIDList for l in k.split('|') if len(l.strip()) >0])))
if len(hGoNamList)>0:
hGonamedata='|'.join(list(set([l.strip() for k in hGoNamList for l in k.split('|') if len(l.strip()) >0])))
if len(hGoTermList)>0:
hGotermdata='|'.join(list(set([l.strip() for k in hGoTermList for l in k.split('|') if len(l.strip()) >0])))
if len(hGoList)>0:
hGodata='|'.join(list(set([l.strip() for k in hGoList for l in k.split('|') if len(l.strip()) >0])))
if len(hKeggList)>0:
hKeggdata='|'.join(list(set([str(l).strip() for k in hKeggList for l in k ])))
if len(hKeggdetailsList)>0:
hKeggdetails='|'.join(list(set([l.strip() for k in hKeggdetailsList for l in k.split('|') if len(l.strip()) >0])))
humanmousemergedic[info[0].strip()]=[str(hPN),str(hGN),str(hdis),str(hunidis),str(hunidisURL),\
str(hdisgen),str(hdisgenURL),str(hDrug),str(info[-1]),str(hGoiddata),str(hGonamedata),str(hGotermdata),\
str(hGodata),str(hKeggdata),str(hKeggdetails)]
print (str(fitem),"data dictionay job done",str(datetime.datetime.now()))
return mergedictdata,humanmousemergedic
def runprog():
# runcomplete=compileSkylineFile()
# if runcomplete ==0:
# print("No new data has been added!",str(datetime.datetime.now()))
runcomplete=1
return runcomplete
if __name__ == '__main__':
colname=['UniProtKB Accession','Protein','Gene','Organism','Peptide Sequence','Summary Concentration Range Data','All Concentration Range Data','All Concentration Range Data-Sample LLOQ Based','Peptide ID',\
'Special Residues','Molecular Weight','GRAVY Score','Transitions','Retention Time','Analytical inofrmation',\
'Gradients','AAA Concentration','CZE Purity','Panel','Knockout','LLOQ','ULOQ','Sample LLOQ','Protocol','Trypsin','QC. Conc. Data','Human UniProtKB Accession']
print (datetime.datetime.now())
print ("Update mother file job starts now")
#increase the field size of CSV
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
runcomplete=runprog()
if runcomplete==1:
#get home directory path
homedir = os.path.normpath(os.getcwd() + os.sep + os.pardir)
outfilefilename ='outfilefile.csv'
filename='ReportBook_mother_file.csv'
kdicfile='keggdic.obj'
filepath = os.path.join(homedir, 'src/qmpkbmotherfile', filename)
# #copy mother file from its source to working directory
if os.path.exists(filepath):
movefilepath=os.path.join(homedir, 'updatefile', filename)
if os.path.exists(movefilepath):
os.remove(movefilepath)
if not os.path.exists(movefilepath):
shutil.copy2(filepath, movefilepath)
#create backup folder before update and then move that folder with old version mother file for backup
mydate = datetime.datetime.now()
folder_name="version_"+mydate.strftime("%B_%d_%Y_%H_%M_%S")
if not os.path.exists('./backup/'+folder_name):
os.makedirs('./backup/'+folder_name)
if not os.path.exists('./backup/'+folder_name+'/ReportBook_mother_file.csv'):
shutil.copy2(movefilepath, './backup/'+folder_name+'/ReportBook_mother_file.csv')
listOfResourceFile=['mouse_report_peptrack_data.csv']
humanuniprotdic='humanUniprotfuncinfodic.obj'
humankdeggic='humankeggdic.obj'
mergedictdata,humanmousemergedic=makemergedict(listOfResourceFile,colname,humanuniprotdic,humankdeggic)
if len(mergedictdata)>0:
print ("Data formatting and checking pep seq present in uniprot specified seq, job starts",str(datetime.datetime.now()))
uniidlist=[]
dicmrm={}
orgidDic={}
unqisocheckdic={}
unifuncdic={}
countProt=0
countPep=0
RETRY_TIME = 20.0
curr_dir = os.getcwd()
below_curr_dir=os.path.normpath(curr_dir + os.sep + os.pardir)
totalUniId=list(set(mergedictdata.keys()))
tempcanonicalUnId=[]
canonisounidic={}
for tuid in totalUniId:
tuempcode=(str(tuid).split('-'))[0]
if canonisounidic.has_key(tuempcode):
canonisounidic[tuempcode].append(tuid)
else:
canonisounidic[tuempcode]=[tuid]
canonisounidic={a:list(set(b)) for a, b in canonisounidic.items()}
unqcanonicalUnId=list(set(canonisounidic.keys()))
print ("Total Unique protein in this file: ",len(unqcanonicalUnId))
countProt=0
print ("Extracting Protein Name, Gene, Organism Name,GO,Sub cellular data, drug bank data ,disease data and checking pep seq present in uniprot specified seq, job starts",str(datetime.datetime.now()))
tempgotermdic={}
tempsubcdic={}
protgnogscgofilename='uniprotfuncdata.csv'
protgnogscgofile=open(protgnogscgofilename,'w')
protgnogscgoHeader=['ActualUniID','UpdatedUniID','PepSeq','ProteinName','Gene','Organism',\
'OrganismID','Subcellular','Mouse GOID','Mouse GOName','Mouse GoTerm','Mouse Go',\
'Human DrugBank','Human DiseaseData','Human UniProt DiseaseData','Human UniProt DiseaseData URL',\
'Human DisGen DiseaseData','Human DisGen DiseaseData URL','PresentInSeq','Human UniProtKB Accession',\
'Human ProteinName','Human Gene','Human Kegg Pathway Name',\
'Human Kegg Pathway','Human Go ID','Human Go Name','Human Go Term','Human Go']
protgnogscgofile.write('\t'.join(protgnogscgoHeader)+'\n')
for subcgcode in unqcanonicalUnId:
time.sleep(2)
ScAllLocList=[]
GoIDList=[]
GoNamList=[]
GoTermList=[]
GOinfo=[]
PN='NA'
GN='NA'
OG='NA'
OGID='NA'
try:
countProt+=1
if countProt%1000 ==0:
print (str(countProt), "th protein Protein Name, Gene, Organism Name, GO, sub cellular and checking pep seq present in uniprot specified seq job starts",str(datetime.datetime.now()))
SGrequestURL="https://www.uniprot.org/uniprot/"+str(subcgcode)+".xml"
SGunifile=urllib.urlopen(SGrequestURL)
SGunidata= SGunifile.read()
SGunifile.close()
try:
SGunidata=minidom.parseString(SGunidata)
try:
subcelldata=(SGunidata.getElementsByTagName('subcellularLocation'))
for subcItem in subcelldata:
try:
subloc=(subcItem.getElementsByTagName('location')[0]).firstChild.nodeValue
if len(str(subloc).strip()) >0:
ScAllLocList.append(str(subloc).strip())
except:
pass
except IndexError:
pass
try:
godata=(SGunidata.getElementsByTagName('dbReference'))
for gItem in godata:
if (gItem.attributes['type'].value).upper() == 'GO':
try:
gonamedetails=(str(gItem.getElementsByTagName('property')[0].attributes['value'].value).strip()).split(':')[1]
gotermdetails=(str(gItem.getElementsByTagName('property')[0].attributes['value'].value).strip()).split(':')[0]
GoNamList.append(gonamedetails)
goid=str(gItem.attributes['id'].value).strip()
GoIDList.append(goid)
tempGoTerm=None
if gotermdetails.lower()=='p':
tempGoTerm='Biological Process'
if gotermdetails.lower()=='f':
tempGoTerm='Molecular Function'
if gotermdetails.lower()=='c':
tempGoTerm='Cellular Component'
GoTermList.append(tempGoTerm)
tempGOData=gonamedetails+';'+goid+';'+tempGoTerm
GOinfo.append(tempGOData)
except:
pass
if (gItem.attributes['type'].value).strip() == 'NCBI Taxonomy':
try:
OGID=str(gItem.attributes['id'].value).strip()
except:
pass
except IndexError:
pass
try:
try:
PN=(((SGunidata.getElementsByTagName('protein')[0]).getElementsByTagName('recommendedName')[0]).getElementsByTagName('fullName')[0]).firstChild.nodeValue
except:
PN=(((SGunidata.getElementsByTagName('protein')[0]).getElementsByTagName('submittedName')[0]).getElementsByTagName('fullName')[0]).firstChild.nodeValue
except IndexError:
pass
try:
try:
GN=((SGunidata.getElementsByTagName('gene')[0]).getElementsByTagName('name')[0]).firstChild.nodeValue
except:
GN='NA'
except IndexError:
pass
try:
try:
OG=((SGunidata.getElementsByTagName('organism')[0]).getElementsByTagName('name')[0]).firstChild.nodeValue
except:
OG='NA'
except IndexError:
pass
except ExpatError:
pass
except IOError:
pass
subcelldata='NA'
goiddata='NA'
gonamedata='NA'
gotermdata='NA'
goData='NA'
if len(ScAllLocList)>0:
subcelldata='|'.join(list(set(ScAllLocList)))
if len(GoIDList)>0:
goiddata='|'.join(list(set(GoIDList)))
if len(GoNamList)>0:
gonamedata='|'.join(list(set(GoNamList)))
if len(GoTermList)>0:
gotermdata='|'.join(list(set(GoTermList)))
if len(GOinfo)>0:
goData='|'.join(list(set(GOinfo)))
if subcgcode in canonisounidic:
for canisoitem in canonisounidic[subcgcode]:
time.sleep(1)
try:
tempfastaseq=''
unifastaurl="https://www.uniprot.org/uniprot/"+str(canisoitem)+".fasta"
fr = requests.get(unifastaurl)
fAC=(str(fr.url).split('/')[-1].strip()).split('.')[0].strip()
fastaresponse = urllib.urlopen(unifastaurl)
for seq in SeqIO.parse(fastaresponse, "fasta"):
tempfastaseq=(seq.seq).strip()
if len(tempfastaseq.strip()) >0:
if canisoitem in mergedictdata:
for temppgopepseq in mergedictdata[canisoitem]:
pepinfastapresent='No'
if temppgopepseq in tempfastaseq:
pepinfastapresent='Yes'
protFileDataList=['NA']*28
if '-' in fAC:
if canisoitem in humanmousemergedic:
protFileDataList[0]=str(canisoitem)
protFileDataList[1]=str(fAC)
protFileDataList[2]=str(temppgopepseq)
protFileDataList[3]=str(PN)
protFileDataList[4]=str(GN)
protFileDataList[5]=str(OG)
protFileDataList[6]=str(OGID)
protFileDataList[12]=str(humanmousemergedic[canisoitem][7])
protFileDataList[13]=str(humanmousemergedic[canisoitem][2])
protFileDataList[14]=str(humanmousemergedic[canisoitem][3])
protFileDataList[15]=str(humanmousemergedic[canisoitem][4])
protFileDataList[16]=str(humanmousemergedic[canisoitem][5])
protFileDataList[17]=str(humanmousemergedic[canisoitem][6])
protFileDataList[18]=str(pepinfastapresent)
protFileDataList[19]=str(humanmousemergedic[canisoitem][8])
protFileDataList[20]=str(humanmousemergedic[canisoitem][0])
protFileDataList[21]=str(humanmousemergedic[canisoitem][1])
protFileDataList[22]=str(humanmousemergedic[canisoitem][-2])
protFileDataList[23]=str(humanmousemergedic[canisoitem][-1])
protFileDataList[24]=str(humanmousemergedic[canisoitem][9])
protFileDataList[25]=str(humanmousemergedic[canisoitem][10])
protFileDataList[26]=str(humanmousemergedic[canisoitem][11])
protFileDataList[27]=str(humanmousemergedic[canisoitem][12])
else:
protFileDataList[0]=str(canisoitem)
protFileDataList[1]=str(fAC)
protFileDataList[2]=str(temppgopepseq)
protFileDataList[3]=str(PN)
protFileDataList[4]=str(GN)
protFileDataList[5]=str(OG)
protFileDataList[6]=str(OGID)
protFileDataList[18]=str(pepinfastapresent)
else:
if canisoitem in humanmousemergedic:
protFileDataList[0]=str(canisoitem)
protFileDataList[1]=str(fAC)
protFileDataList[2]=str(temppgopepseq)
protFileDataList[3]=str(PN)
protFileDataList[4]=str(GN)
protFileDataList[5]=str(OG)
protFileDataList[6]=str(OGID)
protFileDataList[7]=str(subcelldata)
protFileDataList[8]=str(goiddata)
protFileDataList[9]=str(gonamedata)
protFileDataList[10]=str(gotermdata)
protFileDataList[11]=str(goData)
protFileDataList[12]=str(humanmousemergedic[canisoitem][7])
protFileDataList[13]=str(humanmousemergedic[canisoitem][2])
protFileDataList[14]=str(humanmousemergedic[canisoitem][3])
protFileDataList[15]=str(humanmousemergedic[canisoitem][4])
protFileDataList[16]=str(humanmousemergedic[canisoitem][5])
protFileDataList[17]=str(humanmousemergedic[canisoitem][6])
protFileDataList[18]=str(pepinfastapresent)
protFileDataList[19]=str(humanmousemergedic[canisoitem][8])
protFileDataList[20]=str(humanmousemergedic[canisoitem][0])
protFileDataList[21]=str(humanmousemergedic[canisoitem][1])
protFileDataList[22]=str(humanmousemergedic[canisoitem][-2])
protFileDataList[23]=str(humanmousemergedic[canisoitem][-1])
protFileDataList[24]=str(humanmousemergedic[canisoitem][9])
protFileDataList[25]=str(humanmousemergedic[canisoitem][10])
protFileDataList[26]=str(humanmousemergedic[canisoitem][11])
protFileDataList[27]=str(humanmousemergedic[canisoitem][12])
else:
protFileDataList[0]=str(canisoitem)
protFileDataList[1]=str(fAC)
protFileDataList[2]=str(temppgopepseq)
protFileDataList[3]=str(PN)
protFileDataList[4]=str(GN)
protFileDataList[5]=str(OG)
protFileDataList[6]=str(OGID)
protFileDataList[7]=str(subcelldata)
protFileDataList[8]=str(goiddata)
protFileDataList[9]=str(gonamedata)
protFileDataList[10]=str(gotermdata)
protFileDataList[11]=str(goData)
protFileDataList[18]=str(pepinfastapresent)
protgnogscgofile.write('\t'.join(protFileDataList)+'\n')
except IOError:
pass
protgnogscgofile.close()
mergedictdata.clear()
countProt=0
print ("Extracting Protein Name, Gene, Organism Name,GO,Sub cellular data, and checking pep seq present in uniprot specified seq, job done",str(datetime.datetime.now()))
countProt=0
countPep=0
tempunifuncdic={}
with open(protgnogscgofilename) as pgosgfile:
preader = csv.DictReader(pgosgfile, delimiter='\t')
for prow in preader:
tempCol=['ActualUniID','ProteinName','Gene','Organism',\
'OrganismID','Subcellular','PepSeq','Human DiseaseData',\
'Human UniProt DiseaseData','Human UniProt DiseaseData URL',\
'Human DisGen DiseaseData','Human DisGen DiseaseData URL',\
'Mouse GOID','Mouse GOName','Mouse GoTerm','Mouse Go',\
'Human DrugBank','Human UniProtKB Accession','Human ProteinName','Human Gene',\
'Human Kegg Pathway Name','Human Kegg Pathway',\
'Human Go ID','Human Go Name','Human Go Term','Human Go','PresentInSeq']
templist=[]
for tc in tempCol:
templist.append(str(prow[tc]).strip())
tempfuncid=str(prow['UpdatedUniID']).strip()+'_'+str(prow['PepSeq']).strip()
tempunifuncdic[tempfuncid]=templist
uniidlist.append((((prow['UpdatedUniID']).split('-'))[0]).strip())
if str(prow['PresentInSeq']).strip() =='Yes':
tempid=str(prow['UpdatedUniID']).strip()+'_'+str(prow['OrganismID']).strip()
if unqisocheckdic.has_key(tempid):
unqisocheckdic[tempid].append(str(prow['PepSeq']).strip())
else:
unqisocheckdic[tempid]=[str(prow['PepSeq']).strip()]
unquniidlist=list(set(uniidlist))
print ("Extracting KEGG pathway name, job starts",str(datetime.datetime.now()))
keggdictfile={}
uniproturl = 'https://www.uniprot.org/uploadlists/'
k = KEGG()
for kx in range(0,len(unquniidlist),2000):
countProt+=kx+2000
if countProt%2000 ==0:
print (str(countProt), "th protein kegg job starts",str(datetime.datetime.now()))
uniprotcodes=' '.join(unquniidlist[kx:kx+2000])
uniprotparams = {
'from':'ACC',
'to':'KEGG_ID',
'format':'tab',
'query':uniprotcodes
}
while True:
try:
kuniprotdata = urllib.urlencode(uniprotparams)
kuniprotrequest = urllib2.Request(uniproturl, kuniprotdata)
kuniprotresponse = urllib2.urlopen(kuniprotrequest)
for kuniprotline in kuniprotresponse:
kudata=kuniprotline.strip()
if not kudata.startswith("From"):
kuinfo=kudata.split("\t")
if len(kuinfo[1].strip()):
kegg=k.get(kuinfo[1].strip())
kudict_data = k.parse(kegg)
try:
try:
if len(str(kuinfo[0]).strip()) >5:
tempkeggData='|'.join('{};{}'.format(key, value) for key, value in kudict_data['PATHWAY'].items())
keggdictfile[kuinfo[0].strip()]=[kudict_data['PATHWAY'].values(),tempkeggData]
except TypeError:
pass
except KeyError:
pass
break
except urllib2.HTTPError:
time.sleep(RETRY_TIME)
print ('Hey, I am trying again until succeeds to get data from KEGG!',str(datetime.datetime.now()))
pass
kdicf = open(kdicfile, 'wb')
pickle.dump(keggdictfile, kdicf , pickle.HIGHEST_PROTOCOL)
kdicf.close()
diseasefilepath = os.path.join(below_curr_dir, 'src/UniDiseaseInfo/' 'humsavar.txt')
print ("Extracting disease data, job starts",str(datetime.datetime.now()))
try:
urllib.urlretrieve('ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/variants/humsavar.txt',diseasefilepath)
urllib.urlcleanup()
except:
print ("Can't able to download humsavar.txt file!!")
print ("Extracting Human disease data, job done",str(datetime.datetime.now()))
print ("Checking uniqueness of peptide sequence and presence in isoforms, job starts",str(datetime.datetime.now()))
countProt=0
countPep=0
outfilefileUnqIsoname='UnqIsoresult.csv'
outfilefileUnqIso = open(outfilefileUnqIsoname,'w')
outfilefileUnqIso.write('UniProtKB Accession'+'\t'+'Peptide Sequence'+'\t'+'Unique in protein'+'\t'+'Present in isoforms'+'\n')
for mkey in unqisocheckdic.keys():
pepunid=mkey.split('_')[0]
unqtemppepseqList=list(set(unqisocheckdic[mkey]))
pepUnqDic={}
pepIsodic={}
nonprotuniqstatDic={}
peppresentUniFastaDic={}
canopepunid=''
pepunidver=''
if '-' in pepunid:
pepunidinfo=pepunid.split('-')
canopepunid=pepunidinfo[0]
pepunidver=pepunidinfo[-1]
else:
canopepunid=pepunid
pirUqorgid=mkey.split('_')[1]
countProt+=1
if countProt%1000 ==0:
print (str(countProt), "th protein peptide uniqueness job starts",str(datetime.datetime.now()))
time.sleep(10)
for mx in range(0,len(unqtemppepseqList),90):
countPep+=mx+90
if countPep%4000 ==0:
print (str(countPep), "th peptide seq uniqueness check job starts",str(datetime.datetime.now()))
unqtemppepseq=','.join(unqtemppepseqList[mx:mx+90])
while True:
try:
PIRpepMatrequestURLUnq ="https://research.bioinformatics.udel.edu/peptidematchapi2/match_get?peptides="+str(unqtemppepseq)+"&taxonids="+str(pirUqorgid)+"&swissprot=true&isoform=true&uniref100=false&leqi=false&offset=0&size=-1"
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
PIRPepMatrUnq = requests.get(PIRpepMatrequestURLUnq, headers={ "Accept" : "application/json"},verify=False)
if not PIRPepMatrUnq.ok:
PIRPepMatrUnq.raise_for_status()
sys.exit()
PIRPepMatresponseBodyUnq = PIRPepMatrUnq.json()
if len(PIRPepMatresponseBodyUnq['results'][0]['proteins'])>0:
for piritemUnq in PIRPepMatresponseBodyUnq['results'][0]['proteins']:
uniID=piritemUnq['ac'].strip()
pirRevStatUnq=piritemUnq['reviewStatus'].strip()
if pepunid.lower() == (str(uniID).lower()).strip():
for sxmatchpep in piritemUnq["matchingPeptide"]:
matchpeptide=sxmatchpep["peptide"]
if str(matchpeptide).strip() in unqtemppepseqList[mx:mx+90]:
peppresentUniFastaDic[str(matchpeptide).strip()]=True
if 'sp' == (str(pirRevStatUnq).lower()).strip():
canouniID=''
uniIDver=''
if '-' in uniID:
uniIDinfo=uniID.split('-')
canouniID=uniIDinfo[0]
uniIDver=uniIDinfo[-1]
else:
canouniID=uniID
for mxmatchpep in piritemUnq["matchingPeptide"]:
uimatchpeptide=mxmatchpep["peptide"]
if str(uimatchpeptide).strip() in unqtemppepseqList[mx:mx+90]:
if (canouniID.strip()).lower() == (canopepunid.strip()).lower():
if len(uniIDver.strip()) ==0:
pepUnqDic[str(uimatchpeptide).strip()]=True
if len(uniIDver.strip()) !=0:
if pepIsodic.has_key(str(uimatchpeptide).strip()):
pepIsodic[str(uimatchpeptide).strip()].append(uniID)
else:
pepIsodic[str(uimatchpeptide).strip()]=[uniID]
if canouniID.strip() !=canopepunid.strip():
nonprotuniqstatDic[str(uimatchpeptide).strip()]=True
break
except requests.exceptions.ConnectionError:
time.sleep(RETRY_TIME)
print ('Hey, I am trying again until succeeds to get data from Peptide Match Server!',str(datetime.datetime.now()))
pass
except requests.exceptions.ChunkedEncodingError:
time.sleep(RETRY_TIME)
print ('chunked_encoding_error happened',str(datetime.datetime.now()))
pass
for peptideseq in unqtemppepseqList:
peptideunique='NA'
pepisodata='No'
if peptideseq not in nonprotuniqstatDic:
if peptideseq in pepUnqDic:
if pepUnqDic[peptideseq]:
peptideunique='Yes'
else:
peptideunique='Not unique'
else:
peptideunique='NA'
if peptideseq in pepIsodic:
pepisodata=','.join(list(set(pepIsodic[peptideseq])))
outfilefileUnqIso.write(str(pepunid)+'\t'+str(peptideseq)+'\t'+str(peptideunique)+'\t'+str(pepisodata)+'\n')
outfilefileUnqIso.close()
print ("Checking uniqueness of peptide sequence and presence in isoforms, job done",str(datetime.datetime.now()))
tempunqisodic={}
with open(outfilefileUnqIsoname) as unqisofile:
uireader = csv.DictReader(unqisofile, delimiter='\t')
for uirow in uireader:
tempunqisodic[str(uirow['UniProtKB Accession']).strip()+'_'+str(uirow['Peptide Sequence']).strip()]=[str(uirow['Unique in protein']).strip(),str(uirow['Present in isoforms']).strip()]
keggdict = cPickle.load(open(kdicfile, 'rb'))
tempunikeggunqisofuncdic={}
for tukey in tempunifuncdic:
tempkeggdata='NA'
tempkeggdetails='NA'
tempunqdata='NA'
tempisodata='NA'
tuniID=tukey.split('_')[0]
if tuniID in keggdict:
tempkeggdata='|'.join(list(set(keggdict[tuniID][0])))
tempkeggdetails=keggdict[tuniID][1]
if tukey in tempunqisodic:
tempunqdata=tempunqisodic[tukey][0]
tempisodata=tempunqisodic[tukey][1]
tuitem=tempunifuncdic[tukey]
tuitem.insert(7,tempunqdata)
tuitem.insert(8,tempisodata)
tuitem.insert(9,tempkeggdata)
tuitem.insert(10,tempkeggdetails)
tempolduniid=tuitem[0]
tuitem[0]=tuniID
modtukey=tempolduniid+'_'+tukey.split('_')[1]
tempunikeggunqisofuncdic[modtukey]=tuitem
print ("Functional data dictionay job done",str(datetime.datetime.now()))
keggdict.clear()
tempunifuncdic.clear()
tempunqisodic.clear()
temptransdic={}
for fitem in listOfResourceFile:
with open(fitem,'r') as pepfile:
reader = csv.DictReader(pepfile, delimiter='\t')
for row in reader:
resinfo=[]
for i in colname:
resinfo.append(str(row[i]).strip())
restempid=resinfo[0].strip()+'_'+resinfo[4].strip()
if temptransdic.has_key(restempid):
temptransdic[restempid].append(resinfo[5:])
else:
temptransdic[restempid]=[resinfo[5:]]
print (str(fitem),"transition data dictionay job done",str(datetime.datetime.now()))
outFileColName=['UniProtKB Accession','Protein','Gene','Organism','Organism ID','SubCellular',\
'Peptide Sequence','Summary Concentration Range Data','All Concentration Range Data',\
'All Concentration Range Data-Sample LLOQ Based','Peptide ID','Special Residues','Molecular Weight',\
'GRAVY Score','Transitions','Retention Time','Analytical inofrmation','Gradients','AAA Concentration',\
'CZE Purity','Panel','Knockout','LLOQ','ULOQ','Sample LLOQ','Protocol','Trypsin','QC. Conc. Data',\
'Unique in protein','Present in isoforms','Mouse Kegg Pathway Name','Mouse Kegg Pathway',\
'Human Disease Name','Human UniProt DiseaseData','Human UniProt DiseaseData URL',\
'Human DisGen DiseaseData','Human DisGen DiseaseData URL','Mouse Go ID',\
'Mouse Go Name','Mouse Go Term','Mouse Go','Human Drug Bank',\
'Human UniProtKB Accession','Human ProteinName','Human Gene',\
'Human Kegg Pathway Name','Human Kegg Pathway','Human Go ID',\
'Human Go Name','Human Go Term','Human Go','UniprotKb entry status']
outFileColNameData='\t'.join(outFileColName)
outfilefile = open(outfilefilename,'w')
outfilefile.write(outFileColNameData+'\n')
for key in temptransdic:
for subtempitem in temptransdic[key]:
temprow=['NA']*52
peptracktemprowpos=[7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27]
for i,j in zip(subtempitem,peptracktemprowpos):
temprow[j]=str(i)
functemprowpos=[0,1,2,3,4,5,6,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51]
if key in tempunikeggunqisofuncdic:
for x,y in zip(tempunikeggunqisofuncdic[key],functemprowpos):
temprow[y]=str(x)
if (temprow[0].strip()).upper() != 'NA' and (temprow[6].strip()).upper() != 'NA':
finalreportdata='\t'.join(temprow)
outfilefile.write(finalreportdata+'\n')
temprow=[]
outfilefile.close()
print ("Initial report file creation, job done",str(datetime.datetime.now()))
temptransdic.clear()
tempunikeggunqisofuncdic.clear()
os.rename(outfilefilename,filename)
shutil.move(movefilepath,filepath)
print ("Initial report file transfer, job done",str(datetime.datetime.now()))
addModCol()
keggcmd='python statKEGGcoverage.py'
subprocess.Popen(keggcmd, shell=True).wait()
statjobcmd='python generateSummaryReport.py'
subprocess.Popen(statjobcmd, shell=True).wait()
totalAssayPep()
addSelCol()
presencePepSeqHuman()
ncbiGeneExp()
preLoadJsonData(homedir,filepath)
uploadData()
print ("Extracting Prepare download, job starts",str(datetime.datetime.now()))
preDownloadFile()
print ("Extracting Prepare download, job starts",str(datetime.datetime.now()))
|
994,752 | 5465d8a0fc3c979d5b6331e1f6128458d7c86186 | # Data Preprocessing Template
# Importing the libraries
import pandas as pd
import matplotlib.pyplot as plt
import hebb as hb
import perceptron as per
import form as fm
# Importing the dataset
def set_dataset(pass_data):
dataset = pd.read_csv(pass_data)
return dataset
"""
x = dataset.iloc[:,:-1].values
y = dataset.iloc[:,7:8].values
from sklearn.preprocessing import *
s=StandardScaler()
x=s.fit_transform(x)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)
y_hat_hebbian=hb.hebb(1,1,[0,0,0,0,0,0,0])(x_train,y_train,x_test,y_test)
y_hat_perceptron=per.perceptron(1,1,[0,0,0,0,0,0,0])(x_train,y_train,x_test,y_test)
"""
|
994,753 | 200d83dad2ab78c89011134081cbf0ac643a7d05 | # Generated by Django 2.1 on 2021-07-27 09:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auction', '0004_auto_20210726_2220'),
]
operations = [
migrations.CreateModel(
name='Bid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bidprice', models.IntegerField(default=0)),
],
),
migrations.AlterField(
model_name='art',
name='buyingPrice',
field=models.IntegerField(default=0, verbose_name='Buying Price (Ksh)'),
),
migrations.AlterField(
model_name='art',
name='reservedPrice',
field=models.IntegerField(default=0, verbose_name='Reserved Price (Ksh)'),
),
migrations.AddField(
model_name='bid',
name='art',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auction.Art'),
),
migrations.AddField(
model_name='bid',
name='buyer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auction.Profile'),
),
]
|
994,754 | 9400c95436dd14da30c48e2f1e1d71d4764a3630 | def read_file():
filename = input('Enter path of file:')
with open(filename) as f:
print(f.read())
def write_to_file():
filename = input('Enter path of file: ')
content = input('Enter content: ')
with open(filename, 'w') as f:
f.write(content)
if __name__ == '__main__':
read_file()
#write_to_file() |
994,755 | 9b9f4acec3e2ff1fc78a2fc4b583f192eaffcc2d | #-*-coding:utf-8-*-
dimension = 384
caseNum = 0
theta = list()
items = list()
featureRange = list()
featureMean = list()
def load_from_file(filename):
global caseNum, theta, items
with open(filename, 'r') as f:
for line in f.readlines()[1:]:
item = list()
data = line.strip().split(',')
for num in data[1:]:
item.append((float) (num))
items.append(item)
caseNum = len(items)
# load theta
with open('origin-theta-for-step2.txt', 'r') as fin:
content = ''.join(fin.read().split())[1:-1]
theta = [(float) (feature) for feature in content.split(',')]
def caculate_hypothesis(features):
hypothesis = 0.0
for i in range(dimension + 1):
hypothesis += features[i] * theta[i]
return hypothesis
def feature_scaling_init():
global featureRange, featureMean
featureMin = items[0][:-1]
featureMax = items[0][:-1]
featureSum = [0 for i in range(dimension)]
for item in items:
item = item[:-1]
for i in range(len(item)):
temp = item[i]
if temp > featureMax[i]: featureMax[i] = temp
if temp < featureMin[i]: featureMin[i] = temp
featureSum[i] += temp
featureRange = [featureMax[i] - featureMin[i] for i in range(dimension)]
featureMean = [featureSum[i] / caseNum for i in range(dimension)]
def division(divisor, divident):
if divident == 0:
return 0.0
else:
return divisor / divident
def feature_scaling(features):
return [division((features[i] - featureMean[i]), featureRange[i]) for i in range(dimension)]
def cost_function():
cost = 0.0
for item in items:
features = feature_scaling(item[:dimension])
features.append(1.0)
cost += (caculate_hypothesis(features) - item[dimension]) ** 2
cost = cost / (2.0 * caseNum)
return cost
def descent(alpha):
global theta
for j in range(len(theta)):
acc = 0.0
for i in range(caseNum):
item = items[i]
features = feature_scaling(item[:dimension])
features.append(1.0)
acc += (caculate_hypothesis(features) - item[dimension]) * features[j]
theta[j] = theta[j] - (alpha / caseNum) * acc
def gradient_descent(alpha):
lastCost = 0.0
unchangeCount = 0
while unchangeCount < 3:
# alpha: learning rate
descent(alpha)
cost = cost_function()
print str(theta), cost
print '\n'
diff = lastCost - cost
if diff > 0 and diff < 0.003:
unchangeCount += 1
if diff > 0.003:
unchangeCount = 0
lastCost = cost
def gradient_descent_for_test(alpha):
while True:
# alpha: learning rate
descent(alpha)
print cost_function()
def main():
alpha = 1.0
# tain
print 'Loading train set...'
load_from_file('train_temp.csv')
print 'Prepare for feature scaling...'
feature_scaling_init()
print 'Executing gradient descent...'
gradient_descent(alpha)
fout = open('result-theta.txt', 'a')
fout.write(str(theta) + '\n')
fout.close()
if __name__ == "__main__":
main() |
994,756 | ec54b510f29fabcad3f5c1869ad6536f2f2d28ee | from django.shortcuts import render
from django.core.mail import EmailMessage
from django.http import HttpResponse
import json
# Create your views here.
def home(request):
return render(request, 'home.html')
def about(request):
return render(request, 'about.html')
def services(request):
return render(request, 'services.html')
def services_details(request):
return render(request, 'services_details.html')
def copyright(request):
return render(request, 'copyright.html')
def privacypolicy(request):
return render(request, 'privacy_policy.html')
def terms(request):
return render(request, 'terms&conditions.html')
def cookies(request):
return render(request, 'cookies.html')
def contact_form(request):
if request.POST:
print('this')
email = EmailMessage(
request.POST.get('subject'),
"{}, {}".format(request.POST.get('message'), request.POST.get('message')),
'contact@cmdlb.com',
['marcobaldanza@hotmail.co.uk'],
# ['johnnyb@jthelectrics.co.uk'],
reply_to=[request.POST['email']],
)
try:
email.send(fail_silently=False)
message = {'message': "Thank you for your email, we'll be in touch soon!"}
except:
message = {'message': "Something went wrong, but we're working on it!"}
return HttpResponse(json.dumps(message), content_type='application/json') |
994,757 | 27412f1bd33d84dd9b567e15b000ca72c0fb55b8 | from math import sin, pi
import numpy as np
class Heaviside:
def __init__(self,eps=None):
self.eps=eps
def __call__(self,x):
eps=self.eps
r=np.zeros_like(x)
if eps==None:
condition=x>=0
r[condition]=1.0
else:
x=np.asarray(x)
condition1=np.logical_and(-eps<=x , x<=eps)
condition2=x>eps
r[condition1]=0.5+x[condition1]/(2*eps)+np.sin(np.pi*x[condition1]/eps)/(2*np.pi)
r[condition2]=1.0
return r
class Indicator:
def __init__(self, a, b, eps=None):
self.a, self.b, self.eps=a, b, eps
def __call__(self,x):
a, b, epsilon=self.a, self.b, self.eps
if epsilon==None:
H=Heaviside()
else:
H=Heaviside(epsilon)
return H(x-a)*H(b-x)
#a,b=0,2
#I = Indicator(a, b)
## indicator function on [a,b]
#print (I(a-1.0), I(b+0.1), I((a+b)/2.0))
#x=np.linspace(-0.5,2.5,16)
#print(I(x))
#I = Indicator(a, b, eps=1.0)
## smoothed indicator function on [0,2]
#print (I(0), I(1), I(1.9))
#print(I(x))
|
994,758 | 403b7b2cb60222ff752c2637725ffc135249e4d9 | #
# One Convergence, Inc. CONFIDENTIAL
# Copyright (c) 2012-2014, One Convergence, Inc., USA
# All Rights Reserved.
#
# All information contained herein is, and remains the property of
# One Convergence, Inc. and its suppliers, if any. The intellectual and
# technical concepts contained herein are proprietary to One Convergence,
# Inc. and its suppliers.
#
# Dissemination of this information or reproduction of this material is
# strictly forbidden unless prior written permission is obtained from
# One Convergence, Inc., USA
#
"""
This is a core library for creating Openstack resources.
"""
import time
import json
# import sys
# sys.path.append("../../")
import atf.config.common_config as config
from atf.lib.request import OCRequest
import atf.lib.nvp_atf_logging as log
LOG_OBJ = log.get_atf_logger()
class OpenStackLibrary(OCRequest):
"""
This is a core library that provides APIs to use the services of Neutron,
Nova, Glance, Keystone. This library basically contains core functions
for CRUD operation and also some additional functions to have advanced
operation.
"""
cloud_admin_info = {"project_name": "",
"project_id": "",
"token_domain": "",
"token_project": ""}
def __init__(self, os_pub_ip="127.0.0.1"):
"""
os_pub_ip: Public IP of the OpenStack node.
"""
OCRequest.__init__(self)
self.host_ip = os_pub_ip
# To be filled by create_tenent/create_project/from outside.
self.project_info = {"project_name": "",
"project_id": "",
"token_domain": "",
"token_project": ""}
# Update cloud admin's info.
if not self.cloud_admin_info["project_name"]:
self.set_cloud_admin_info()
def set_cloud_admin_info(self, only_token=False):
"""It will initialize the cloud admin.
"""
try:
self.cloud_admin_info["project_name"] = config.cloud_admin_project
if config.keystone_api_version == 'v3':
token_domain = self.get_keystone_v3_token(
config.cloud_admin_project,
config.cloud_admin_domain,
config.cloud_admin_user,
config.cloud_admin_passwd,
scope="domain"
)
token_project = self.get_keystone_v3_token(
config.cloud_admin_project,
config.cloud_admin_domain,
config.cloud_admin_user,
config.cloud_admin_passwd,
scope="project")
self.cloud_admin_info["token_domain"] = token_domain
self.cloud_admin_info["token_project"] = token_project
if not only_token:
project_id = self.get_keystone_v3_project_id(
config.cloud_admin_project)
if not isinstance(project_id, unicode):
return False
self.cloud_admin_info["project_id"] = project_id
else:
token_project = self.get_token(
config.cloud_admin_project,
config.cloud_admin_user,
config.cloud_admin_passwd)
token_domain = token_project
self.cloud_admin_info["token_domain"] = token_domain
self.cloud_admin_info["token_project"] = token_project
if not only_token:
project_id = self.get_tenant_id(config.cloud_admin_project)
if not isinstance(project_id, unicode):
return False
self.cloud_admin_info["project_id"] = project_id
return True
except Exception as err:
LOG_OBJ.exception(err)
return False
def set_tenant_info(self, project_name, token_domain,
token_project, project_id=None):
"""
It sets the project info into the object and gives back the old project
info like project id and tokens (domain specific & project specific).
params:
project_name: project name of new project.
token_domain: domain specific token.
token_project: project specific token.
Return: Tuple containing the old project's info such as project id,
domain specific token & project specific token.
"""
old_project_info = (self.project_info["project_name"],
self.project_info["token_domain"],
self.project_info["token_project"],
self.project_info['project_id'])
# Set the new project info.
self.project_info['project_name'] = project_name
self.project_info["token_domain"] = token_domain
self.project_info["token_project"] = token_project
if not project_id:
if config.keystone_api_version == "v3":
project_id = self.get_keystone_v3_project_id(project_name)
else:
project_id = self.get_tenant_id(project_name)
self.project_info["project_id"] = project_id
LOG_OBJ.debug("Successfully set the project info for project: %s" %
project_name)
return old_project_info
def create_tenant_user_wrapper(self, tenant_name, user_name, password,
domain_name=None, roles=[]):
"""Its wrapper method depending upon keystone api version calls
keystone rest api's to create keystone resources
:param string tenant_name: tenant (or project) name.
:param string user_name: user name
:param string password: tenant password.
:param string domain_name: domain name. Required in case
keystone v3(optional)
:param list roles: user roles. (optional)
:Returns: On success returns project id.
On Failure returns string containing error message.
"""
try:
project_info = {"project_name": tenant_name,
"user_name": user_name,
"password": password,
"roles": roles}
if config.keystone_api_version == 'v3':
# check if domain is created or not.
if not domain_name:
domain_name = config.keystonev3_domain_name
domain_id = self.get_keystone_v3_domain_id(domain_name)
if not isinstance(domain_id, unicode):
err_msg = ("Get domain id failed with reason"
" %s" % domain_id)
LOG_OBJ.error(err_msg)
return err_msg
# Create project and users
domain_role = config.domain_member_role_name
project_id = self.create_keystone_v3_project_user(
domain_name, domain_role, project_info)
if not isinstance(project_id, unicode):
err_msg = "Failed to create project using keystone v3 api."
LOG_OBJ.error(err_msg)
return err_msg
return project_id
else:
if not project_info["roles"]:
project_info["roles"] = config.\
remote_project_info[0]["roles"]
tenant_id = self.create_tenant(project_info)
if not tenant_id:
err_msg = "Failed to create tenant using keystone v2 api."
LOG_OBJ.error(err_msg)
return err_msg
return tenant_id
except Exception as err:
LOG_OBJ.exception(err)
return "Some problem occurred while creating "\
"keystone resources."
def get_image_id(self, image_name):
"""
Get the image ID based on the image name.
param: image_name: Name of the image.
Return: ID (Unicode) of the image, on success.
"""
_url = "http://" + self.host_ip + ":8774/v2/" +\
self.cloud_admin_info["project_id"] + "/images/detail"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.cloud_admin_info["token_project"]}
_body = None
_result = self.request("GET", _url, _headers, _body)
if _result is None:
LOG_OBJ.error("No response from server while getting images.")
return
if _result.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get image ID Failed with status %s " %
_result.status)
return _result.status
_output = json.loads(_result.data)
for _images in _output['images']:
if _images['name'].lower() == image_name.lower():
LOG_OBJ.info("Image Name: %s, Image ID : %s " %
(image_name, _images['id']))
return _images['id']
LOG_OBJ.error("The image: %s is NOT found" % image_name)
def get_flavor_id(self, flavor_name):
"""
Gets the image flavor.
param: Get the images's flavor ID.
Return: ID (Unicode) of the flavor, on success.
"""
_url = "http://" + self.host_ip + ":8774/v2/" +\
self.cloud_admin_info["project_id"] + \
"/flavors/detail"
_headers = {'x-auth-token': self.cloud_admin_info["token_project"]}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from server while getting flavors.")
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get flavor ID Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
for flavors in output['flavors']:
if flavors['name'].lower() == flavor_name.lower():
LOG_OBJ.debug("Flavor Name: %s, ID: %s" % (flavor_name,
flavors['id']))
return flavors['id']
LOG_OBJ.error("Flavor:%s is NOT found" % flavor_name)
def create_server(self, image_name, flavor_name, net_name,
server_name, **kwargs):
"""
It launches the vm.
NOTE:
It allows to create the vm
(a) by passing net_name,
(b) by passing net_id,
(c) in multiple networks [by passing network ids],
(d) using port(s) in network(s).
[by passing port id(s) and network id(s)]
params:
image_name: Name of the image using which the vm will be booted up.
flavor_name: Name of the falvor.
net_name: Network name on which the vm will be launched.
server_name: Name of the server.
Optional params:
host_name: Name of the compute host.
port_ids: (list) Ids of the port using which the vm will
be launched.
poll_on_status: Whether to wait on the Active/Error status of vm.
Default is True
net_ids: (List) Id(s) of network(s)
return_details: Whether to return the details of the server
or simply send it's ID. Default is False
Note: When net_id is passed, we can give any dummy name or empty string
for netName.
This is made as generic to work with the existing ATF as well as
new ATF which requires to create the vm with multiple interfaces.
returns:
server id unicode (if return_details=Fale)
dict containing the details of the server if return_details=True
"""
LOG_OBJ.debug("Launching server...")
net_ids = kwargs.get("net_ids", [])
if not net_ids:
net_id = self.get_net_id(net_name)
if not isinstance(net_id, unicode):
LOG_OBJ.error("Problem while getting net_id corresponding"
" to net:%s" % net_name)
return
net_ids.append(net_id)
if not isinstance(net_ids, list):
net_ids = [net_ids]
LOG_OBJ.debug("Net Name: %s or NetID: %s" % (net_name, net_ids))
host = kwargs.get('host_name', "")
if host != "":
host = "nova:" + host
port_ids = kwargs.get('port_ids', [])
if not port_ids:
for net_id in net_ids:
port_name = server_name + "_" + str(net_id)[:5] + "_port"
port_id = self.create_port(net_name, port_name,
net_id=net_id)
LOG_OBJ.debug("portId is %s" % port_id)
if not isinstance(port_id, unicode):
return
port_ids.append(port_id)
if not isinstance(port_ids, list):
port_ids = [port_ids]
boot_nic = []
for port_id, net_id in zip(port_ids, net_ids):
boot_nic.append({"uuid": net_id, "port": port_id})
_url = "http://" + self.host_ip + ":8774/v2/" + \
self.project_info["project_id"] + "/servers"
_headers = {'x-auth-token': self.project_info["token_project"],
'content-type': 'application/json'}
# Get the image id.
image_id = self.get_image_id(image_name)
if not isinstance(image_id, unicode):
LOG_OBJ.error("Problem while getting image_id corresponding"
" to imageName:%s" % image_name)
return
# GEt the flavor id
flavor_id = self.get_flavor_id(flavor_name)
if not isinstance(flavor_id, unicode):
LOG_OBJ.error("Problem while getting flavor_id corresponding"
" to flavorName:%s" % flavor_name)
return
_server_info = {"server": {
"name": server_name,
"imageRef": image_id,
"flavorRef": flavor_id,
"max_count": 1,
# "availability_zone": host,
"min_count": 1,
"networks": boot_nic
}}
if host:
_server_info['server']['availability_zone'] = host
_body = json.dumps(_server_info)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error(
"Unable to get the response from server while creating VM")
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Create Server Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Server details : %s " % output)
server_id = output['server']['id']
LOG_OBJ.debug("Server Details: %s" % output['server'])
# Default is poll on the server status.
if kwargs.get('poll_on_status', True):
out = self.poll_on_server_boot_up(server_id)
LOG_OBJ.info("-> Out: %s, type= %s" % (out, type(out)))
if not isinstance(out, unicode):
return out
# Default is "do not return the details"
if kwargs.get('return_details', False):
return output['server']
return server_id
def list_servers(self, all_tenants=False):
"""
This lists the server in a tenant.
params:
-
Optional params:
all_tenants: To enable searching the vm in all the tenants.
Return:
Dict containing the list of the servers, on success.
"""
_url = "http://" + self.host_ip + ":8774/v2/" + \
self.project_info["project_id"] + "/servers/detail"
if all_tenants:
_url = "http://" + self.host_ip + ":8774/v2/" + self.project_info[
"project_id"] + "/servers/detail?all_tenants=1"
_headers = {'x-auth-token': self.project_info["token_project"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from server while listing servers.")
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("List servers Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Servers List :%s " % output)
return output["servers"]
def show_server(self, server_id):
"""
It gives the details of the server.
params:
server_id: ID of the server.
Return:
Dict containing the details of the server, on success.
"""
_url = "http://" + self.host_ip + ":8774/v2/" + \
self.project_info["project_id"] + "/servers/" + server_id
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]
}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from server while showing the vms")
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Show server failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Instance Detail : %s " % output)
return output["server"]
def delete_server(self, server_id):
"""
It Deletes server.
Arguments:
server_id: uuid of the server
Returns: True on successful deletion of server.
"""
LOG_OBJ.info("Deleting server : %s" % server_id)
_url = "http://" + self.host_ip + ":8774/v2/" + \
self.project_info["project_id"] + "/servers/" + server_id
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_body = None
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from server while deleting vm.")
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get instance Failed with status %s " %
response.status)
return response.status
LOG_OBJ.info("Deleted server : %s " % server_id)
return True
def poll_for_active_status(self, server_id, req_status="ACTIVE"):
"""
It polls on the Active/given optional status of the server.
Note:
Polling finishes when the server is Active/required state or goes
to error state.
params:
server_id: Id of the server.
optional params:
req_status: Status of server, need to be polled.
Return: Status (String)
"""
status = "BUILDING"
iteration = 30
while status.upper() != req_status.upper() \
or status.upper() != "ERROR":
server_info = self.show_server(server_id)
if not isinstance(server_info, dict):
return
status = server_info['status']
LOG_OBJ.debug("Server status : %s" % status)
if status.upper() in [req_status.upper(), 'ERROR']:
break
LOG_OBJ.debug("Waiting till server goes to %s state..."
% req_status)
time.sleep(20)
iteration -= 1
if not iteration:
err_msg = "The server:%s is NOT in %s state" \
"within 10 minutes" % (server_id, status)
LOG_OBJ.error(err_msg)
return "POLL_TIME_EXCEEDED"
LOG_OBJ.debug("Server becomes %s" % status)
return status
def list_server_interfaces(self, server_id):
"""Returns server (instance) interfaces list.
:param string server_id: instance uuid.
"""
_url = "http://" + self.host_ip + ":8774/v2/" + \
self.project_info["project_id"] + "/servers/" +\
server_id + "/os-interface"
_headers = {'x-auth-token': self.project_info["token_project"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from server. Nova interface"
" list failed.")
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Nova interfaces list failed with status %s " %
response.status)
return None
output = json.loads(response.data)
LOG_OBJ.debug("Nova Interfaces List: %s" % output)
return output['interfaceAttachments']
def get_server_ip_mac(self, server_id):
"""
It gets the server's port info like IP and MAC.
Note: This corresponds to neutron port corresponding to the server.
server_id: ID of the server.
"""
port_list = self.list_port()
if not isinstance(port_list, list):
return
interface_list = []
for port in port_list:
if port["device_id"] == server_id:
port_info = {}
port_info['mac'] = port['mac_address']
port_info['ip_address'] = port['fixed_ips'][0]['ip_address']
interface_list.append(port_info)
LOG_OBJ.info("VM Interface Info : %s " % interface_list)
return interface_list
def get_server_ip(self, server_id):
"""
It gets the server ip based on the server ID.
params:
server_id: ID of the server.
Return:
Server's IP(s) (list), on success.
"""
interface_list = self.get_server_ip_mac(server_id)
if not isinstance(interface_list, list):
return
LOG_OBJ.debug("interface_list:%s" % interface_list)
ip_addresses = []
for interface in interface_list:
ip_addresses.append(interface['ip_address'])
LOG_OBJ.debug("ip_addresses for server %s is %s" % (server_id,
ip_addresses))
return ip_addresses
def get_server_details_by_name(self, server_name, all_tenants=False,
server_alternate_names=None):
"""
This returns the server details based on the name of the server.
params:
server_name: Name of the server.
tenant_id: Tenant ID
Optional params:
all_tenants: To enable searching the vm in all the tenants.
server_alternate_names: A list of alternate names.
Return:
Dict containing the details of the server, on success.
"""
servers = self.list_servers(all_tenants)
if not isinstance(servers, list):
return
if not server_alternate_names:
server_alternate_names = [server_name]
else:
server_alternate_names.append(server_name)
for server in servers:
if server['name'] in server_alternate_names:
LOG_OBJ.debug("Server details: %s" % server)
return server
LOG_OBJ.error("There is NO server with name: %s in tenant: %s" %
(server_name, self.project_info["project_id"]))
def get_server_console_log(self, server_id, length=1):
"""
It returns the console log of the server. The length tells how many
lines we want to TAIL the console log.
params:
server_id: ID of the server.
length: Length of the log that to be tailed.
Return:
String, on success.
"""
_url = "http://" + self.host_ip + ":8774/v2/" + \
self.project_info["project_id"] + "/servers/" + \
server_id + "/action"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]
}
console_output = {'os-getConsoleOutput': {'length': length}}
_body = json.dumps(console_output)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from server while"
" getting the console log of the server.")
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Error while getting the console log of the "
"server: %s. Response status= %s" %
(server_id, response.status))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Server's console log tailed with length: %d is %s"
% (length, output['output']))
return output['output']
def poll_on_server_boot_up(self, server_id, server_ip="",
monitor_duration_s=600):
"""
It polls on the server to check whether it booted up completely or not.
Using this we can also know whether the vm got the ip_addr or not.
Arguments:
server_id: The server ID
Optional params:
server_ip: the ip_addr of the vm [Optional]
monitor_duration: how long it polls on the server to boot up.
Return: On Success.
IP in Unicode form: On successful boot up and the
vm gets the ip_addr
On Failure:
String: message containing the respective reason.
"""
host = "host-"
try:
vm_status = self.poll_for_active_status(server_id)
if not vm_status:
err_msg = "Error while doing show server: %s" % str(server_id)
LOG_OBJ.error(err_msg)
return err_msg
if vm_status.lower() == "error":
err_msg = "VM: %s LAUNCHED WITH ERROR STATE" % str(server_id)
LOG_OBJ.error(err_msg)
return err_msg
start_time = time.time()
print "Poll on the server started at: %s" % time.ctime()
LOG_OBJ.info("Poll on the server started at: %s" %
time.ctime())
if server_ip != "":
host += server_ip.replace(".", "-")
while True:
# Get the server's console output.
console_output = self.get_server_console_log(server_id)
if not isinstance(console_output, unicode):
LOG_OBJ.error("Problem while getting vm console.")
return "Problem while getting vm console."
LOG_OBJ.info("Output of the console log: %s" % console_output)
if ("localhost" in console_output or
host in console_output) and "login" in console_output:
print "The Server; %s booted up successfully." % server_id
LOG_OBJ.info("The Server; %s booted up successfully."
% server_id)
if "localhost" in console_output:
# msg = "The server %s could not get the ip address" \
# % str(server_id)
# print 70 * "*" + "\n" + msg + "\n" + 70 * "*"
# LOG_OBJ.info(
# 70 * "*" + "\n" + msg + "\n" + 70 * "*")
# TODO: Made local fix for CISCO
# return msg
return unicode("dummy ip")
else:
ip_addr = None
try:
ip_addr = console_output.split()[0][5:].replace(
"-", ".")
msg = "The server: %s got the ip_addr: %s" % \
(str(server_id), str(ip_addr))
print msg
LOG_OBJ.info(msg)
except Exception as err:
LOG_OBJ.exception(err)
return "problem while getting ip from vm console."
return unicode(ip_addr)
print "Waiting for 20 secs for the server to come up .."
LOG_OBJ.info("Waiting for 20 secs for the server to come up..")
time.sleep(20)
now_time = time.time()
if (now_time - start_time) > monitor_duration_s:
msg = "The server couldn't boot up within %s seconds." % \
monitor_duration_s
print msg
LOG_OBJ.info(msg)
return msg
except Exception as err:
LOG_OBJ.exception(err)
return "Problem while polling on the server to boot up"
def list_security_groups(self):
""" List security groups. """
_url = "http://" + self.host_ip + ":8774/v2/" + \
self.project_info["project_id"] + "/os-security-groups"
_headers = {'x-auth-token': self.project_info["token_project"]}
_body = None
# parent_group_id = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error(
"No response from Server while getting security"
" groups for tenant: %s" %
self.project_info["project_id"])
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get Group Info Failed with status %s " %
response.status)
return
output = json.loads(response.data)
return output["security_groups"]
def create_security_group_rule(self, sg_id, protocol='', cidr='0.0.0.0/0',
from_port='', to_port='',
direction="ingress"):
"""Adds Security Group Rule To Security Group.
:param string sg_id: Security Group uuid.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/security"\
"-group-rules.json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_tenant_sec_data = {"security_group_rule":
{"security_group_id": sg_id,
"remote_ip_prefix": cidr,
"direction": direction
}
}
if protocol:
_tenant_sec_data["security_group_rule"]['protocol'] = protocol
if from_port and to_port:
_tenant_sec_data["security_group_rule"][
"port_range_min"] = from_port
_tenant_sec_data["security_group_rule"]["port_range_max"] = to_port
_body = json.dumps(_tenant_sec_data)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating"
"security groups for tenant: %s"
% self.project_info["project_id"])
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Adding Security Group Rule failed"
" with status %s " % response.status)
return
LOG_OBJ.debug("Created Security Group Rule.")
return True
def add_security_group_rules(self, protocol, cidr='0.0.0.0/0',
from_port='', to_port=''):
"""
It adds the security group rule(s) in the default security group
created for the tenant.
params:
from_port: Ingress port
to_port: Egress Port
protocol: Name of the protocol. Ex: tcp/udp/ ...
cidr: Subnet CIDR.
Reurn:
True, on success.
"""
sg_list = self.list_security_groups()
if not sg_list:
return "Failed to list security groups."
parent_group_id = None
for rules in sg_list:
if rules['name'] == "default":
parent_group_id = rules['id']
status = self.create_security_group_rule(parent_group_id, protocol,
cidr, from_port, to_port)
if not status:
return
LOG_OBJ.debug("Security group rules added to Default group.")
return True
def get_token(self, tenant_name, user_name, password):
"""
It gets the token of the tenant..
params:
tenant_name: Name of the tenant.
user_name: Name of the user.
password: Password of the user.
Return: Token (Unicode), on success.
"""
_url = "http://" + self.host_ip + ":5000/v2.0/tokens"
_headers = {"content-type": "application/json"}
_token_info = {"auth": {"tenantName": tenant_name,
"passwordCredentials":
{"username": user_name,
"password": password}}
}
_body = json.dumps(_token_info)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting token for"
" tenant: %s" % tenant_name)
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Request of token for %s tenant Failed with"
" status %s " % (tenant_name, response.status))
return response.status
output = json.loads(response.data)
token_id = output['access']['token']['id']
LOG_OBJ.debug("Token ID for tenant %s is %s" % (tenant_name, token_id))
return token_id
def get_tenant_id(self, tenant_name):
"""
It returns the tenant id of a tenant.
params: tenant_name: Name of the tenant.
Return:
tenant_id (unicode) on success.
"""
_url = "http://" + self.host_ip + ":35357/v2.0/tenants"
_headers = {'x-auth-token': self.cloud_admin_info['token_project']}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting tenants")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Tenant list Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
for tenant in output['tenants']:
if tenant['name'] == tenant_name:
LOG_OBJ.debug("Tenant Details : %s " % tenant)
return tenant['id']
LOG_OBJ.error("There is NO tenant with name: %s" % tenant_name)
return None
def get_user_id(self, tenant_id, user_name):
"""
This function is to get user id when user name is provided.
Arguments:
tenant_id: id of the tenant.
userName: name of the tenant user.
Return:
user id (unicode)
"""
_url = "http://" + self.host_ip + ":35357/v2.0/users"
_body = None
_headers = {'Content-type': 'application/json',
'x-auth-token': self.cloud_admin_info['token_project']}
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting user.")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get userID for %s tenant Failed with status %s " %
(self.tenant_name, response.status))
return response.status
output = json.loads(response.data)
_user_id = None
LOG_OBJ.debug("User list: %s" % output)
LOG_OBJ.debug("tenant ID: %s" % tenant_id)
for value in output['users']:
if value is not None and "tenantId" in value.keys():
if value['tenantId'] == tenant_id and value[
'name'].lower() == user_name.lower():
_user_id = value['id']
return _user_id
LOG_OBJ.error("User with name '%s' Not Found" % user_name)
return _user_id
def get_role_id(self, role_name):
"""
It gets the role id corresponding to a user role.
params:
role_name: Role name of the user.
Return:
Role ID (unicode), on success.
"""
_url = "http://" + self.host_ip + ":35357/v2.0/OS-KSADM/roles"
_body = None
_headers = {'Content-type': 'application/json',
'x-auth-token': self.cloud_admin_info['token_project']}
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting roles.")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get role id for %s Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
for value in output['roles']:
if value['name'].lower() == role_name.lower():
LOG_OBJ.debug("The role id for role: %s is %s" % (role_name,
value['id']))
return value['id']
LOG_OBJ.error("There is NO Role with name: %s" % role_name)
return None
def create_user(self, _user_data):
"""
It creates the user in a tenant.
params:
_user_data: The dict contains user info.
{"user": {"email":,
"password":
"enabled": True,
"name":,
"tenantId": }}
Return:
User ID(Unicode) on success.
"""
_url = "http://" + self.host_ip + ":35357/v2.0/users"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.cloud_admin_info['token_project']}
_body = json.dumps(_user_data)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating user: %s" %
_user_data['user']['name'])
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Create user Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("User created successfully. Details:%s" % output)
return output['user']['id']
def list_users(self):
"""
Returns list of tenants info.
"""
_url = "http://" + self.host_ip + ":35357/v2.0/users"
_body = None
_headers = {'Content-type': 'application/json',
'x-auth-token': self.cloud_admin_info['token_project']}
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error(" no response from Server")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(
"get user list Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("users List : %s")
return output["users"]
def delete_user(self, tenant_id, user_name):
"""
Arguments:
tenant_id: tenant id
user_name: user name
Return: On successful deletion of tenant user returns True.
"""
# get user id
_user_id = self.get_user_id(tenant_id, user_name)
if not isinstance(_user_id, unicode):
return None
_url = "http://" + self.host_ip + ":35357/v2.0/users/" + str(_user_id)
_headers = {'x-auth-token': self.cloud_admin_info['token_project']}
_body = None
LOG_OBJ.debug("Deleting user %s" % _user_id)
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(
"Failed to delete user with status %s " %
response.status)
return response.status
LOG_OBJ.debug("Deleted User %s successfully" % _user_id)
return True
def add_user_role(self, tenant_id, user_id, role_id):
"""
It adds the user role to the user.
params:
tenant_id: Id of the tenant
user_id: Id of the user.
role_id: Id of the role.
Return:
True, on success.
"""
_url = "http://" + self.host_ip + ":35357/v2.0/tenants/" + \
tenant_id + "/users/" + user_id + "/roles/OS-KSADM/" + role_id
_headers = {'x-auth-token': self.cloud_admin_info['token_project']}
_body = None
response = self.request("PUT", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while adding role")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Add user role Failed with status %s " %
response.status)
return response.status
LOG_OBJ.info("Role: %s is added to user:%s successfully."
% (role_id, user_id))
return True
def set_quota(self, tenant_id):
"""
It sets the tenant quota like cores, floating_ips, instances, ram
param:
tenant_id: Name of the tenant whose quota has to be modified.
Return:
True, on success.
"""
# Get the admin tenant's id.
_url = "http://" + self.host_ip + ":8774/v2/" + \
self.cloud_admin_info['project_id'] + "/os-quota-sets/" + tenant_id
_headers = {'Content-type': 'application/json',
'x-auth-token': self.cloud_admin_info['token_project']}
_body = {"quota_set": {
"cores": 80,
"floating_ips": 40,
"instances": 100,
"ram": 512000}}
response = self.request("PUT", _url, _headers, json.dumps(_body))
if response is None:
LOG_OBJ.error("No response from server while setting the quota"
" for tenant: %s" % tenant_id)
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Modifying quota Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Tenant Quota Modified. Details : %s " % output)
return True
def quota_update(self, tenant_id, fields):
"""
It updates the tenant quota.
params:
fields: Dict which contains quota fields(key) and values(value)
eg: {networks: 100, subnet: 100, ports: 50, ...}
Return: Dict containing the quota of tenant, on success.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/quotas/" + \
tenant_id + ".json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.cloud_admin_info['token_project']}
_body = {"quota": fields}
response = self.request("PUT", _url, _headers, json.dumps(_body))
if response is None:
LOG_OBJ.error("No response from server while updating the quota"
" for tenant: %s" % tenant_id)
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Updating quota Failed with status %s "
% response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Tenant Quota Details : %s " % output)
return output
def create_tenant(self, tenant_info):
"""
It creates the tenant whose basic info is given in tenant_info.
It does some updates in the tenant like adding security group rules,
changing the quotas, etc.
params: tenant_info: A dict
{tenant_name: Name of the tenant
user_name: User name in the tenant.[New user to be created]
user_id: ID of the user (Optional). Required, only if an
existing user needs to be added in the tenant.
password: Password of the user.
roles: role list (list)
}
Return: Tenant ID (Unicode), on success.
"""
LOG_OBJ.debug("Creating Tenant:%s" % tenant_info['project_name'])
_tenant_name = tenant_info['project_name']
_user_name = tenant_info.get('user_name', _tenant_name + "_user")
_password = tenant_info.get('password', _tenant_name + "_pass")
_url = "http://" + self.host_ip + ":35357/v2.0/tenants"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.cloud_admin_info['token_project']}
_tenant_data = {"tenant": {"enabled": True, "name": _tenant_name,
"description": "Testing API 3"}}
_body = json.dumps(_tenant_data)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating tenant: %s"
% _tenant_name)
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Create tenant Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Created tenant: %s successfully." % _tenant_name)
_tenant_id = output['tenant']['id']
# If user id is passed then, directly add that user to the tenant.
# otherwise Create a new user.
_user_id = tenant_info.get('user_id', None)
if not _user_id:
_user_data = {"user": {"email": None,
"password": _password,
"enabled": True,
"name": _user_name,
"tenantId": _tenant_id}}
_user_id = self.create_user(_user_data)
if not isinstance(_user_id, unicode):
return
tenant_info['userID'] = _user_id
# Add the user roles.
for role_name in tenant_info['roles']:
role_id = self.get_role_id(role_name)
if not isinstance(role_id, unicode):
return
# Add user role.
if not self.add_user_role(_tenant_id, _user_id, role_id):
return
# Get the token.
token_id = self.get_token(_tenant_name, _user_name, _password)
if not isinstance(token_id, unicode):
return
# Set the new context. note: This is v2 token, so only project scope.
self.set_tenant_info(_tenant_name, token_id, token_id, _tenant_id)
# Adding Security Group Rules
# Add the ICMP rule.
# if not isinstance(self.add_security_group_rules("icmp"), bool):
# return
# Add the rule for ssh
# if not isinstance(self.add_security_group_rules(
# "tcp", from_port='22', to_port='22'), bool):
# return
# Add the rule for all udp
# if not isinstance(self.add_security_group_rules(
# "udp", from_port='1', to_port='65535'), bool):
# return
# Modify the tenant quota.
# if not isinstance(self.set_quota(_tenant_id), bool):
# return
# Update the quota
# fields = {"network": 50, "subnet": 50, "port": 100, "floatingip": 50}
# quotas = self.quota_update(_tenant_id, fields)
# if not isinstance(quotas, dict):
# return
# LOG_OBJ.info("Quota for tenant[%s] is:%s" % (_tenant_id,
# str(quotas)))
return _tenant_id
def list_tenants(self):
"""
It will return list tenants info.
"""
_url = "http://" + self.host_ip + ":35357/v2.0/tenants"
_headers = {'x-auth-token': self.cloud_admin_info['token_project']}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error(" no response from Server")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(
" tenant list Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Tenant List : %s " % output)
return output["tenants"]
def delete_tenant(self, tenant_id): # not modified
"""
Arguments:
tenant_id: id of the tenant to be deleted.
Return: On successful deletion of tenant returns True.
"""
LOG_OBJ.debug("Deleting Tenant %s" % tenant_id)
_url = "http://" + self.host_ip + ":35357/v2.0/tenants/" + tenant_id
_headers = {'x-auth-token': self.cloud_admin_info['token_project']}
_body = None
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error(" no response from Server")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(
"get tenant delete Failed with status %s " %
response.status)
return response.status
LOG_OBJ.debug("Deleted tenant %s successfully." % tenant_id)
return True
def get_net_id(self, net_name):
"""
It gets the network ID.
params:
net_name: Name of the network.
Return: network ID (Unicode), on success.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/networks"
_headers = {'x-auth-token': self.project_info["token_project"]}
_body = None
result = self.request("GET", _url, _headers, _body)
if result is None:
LOG_OBJ.error(
"No response from Server while trying to"
" get networks of tenant: %s" %
self.project_info["project_id"])
return result
if result.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get network Failed with status %s " % result.status)
return result.status
output = json.loads(result.data)
LOG_OBJ.debug("Networks: %s" % output['networks'])
for nets in output['networks']:
if nets['name'].lower() == net_name.lower() and \
net_name == config.extnet_name:
LOG_OBJ.debug("Net ID : %s " % nets['id'])
return nets['id']
if nets['name'].lower() == net_name.lower() and \
nets['tenant_id'] == self.project_info["project_id"]:
LOG_OBJ.debug("Net ID : %s " % nets['id'])
return nets['id']
LOG_OBJ.debug("Net:%s Not Found" % net_name)
return
def create_net(self, net_name, shared="false"):
"""
It creates the net.
params:
net_name: Name of the network.
shared: Whether the net is shared or not.
Return:
Net ID (Unicode), on success.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/networks.json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_net_info = {"network":
{"name": net_name,
"shared": shared,
"admin_state_up": True}}
_body = json.dumps(_net_info)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating network.")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Creation of network Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Network is created successfully. Details : %s " %
output['network'])
return output['network']['id']
def delete_net(self, net_id):
"""
It deletes the network specified by network id.
param:
net_id: Netwrok ID.
Return:
True on success.
"""
LOG_OBJ.debug("Deleting network %s" % net_id)
_url = "http://" + self.host_ip + ":9696/v2.0/networks/" + \
net_id + ".json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_body = None
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while deleting net:%s" %
net_id)
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Deletion of Network Failed with status %s " %
response.status)
return response.status
LOG_OBJ.info("Deleted the network : %s " % net_id)
return True
def list_net(self):
"""
It lists the network under a tenant.
Return:
A List of networks.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/networks"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while listing the networks")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get network list Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Network List : %s " % output)
return output['networks']
def get_net_details(self, net_name="dummy_net", net_id=None):
"""
It gets the network details either by network name or id.
params:
net_name: Name of the network.
Optional params:
net_id: ID of the network.
Note: When net id is given net_name can be anything.
Return:
Dict containing the network details, on success.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/networks"
_headers = {'x-auth-token': self.project_info["token_project"]}
_body = None
result = self.request("GET", _url, _headers, _body)
if result is None:
LOG_OBJ.error("No response from Server while listing the nets")
return result.status
if result.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get network Failed with status %s " % result.status)
return result.status
output = json.loads(result.data)
for nets in output['networks']:
if (net_id is not None and (nets['id'] == net_id)) or \
nets['name'].lower() == net_name.lower():
LOG_OBJ.debug("Net details : %s " % nets)
return nets
LOG_OBJ.debug("Network with name:%s or with ID:%s is Not Found" %
(net_name, net_id))
def create_external_network(self, extnet_info, ignore_privious=False):
"""
It creates the external network.
params:
1. extnet_info: Info of ext net.
{extnet_name: Name of the external network.
gateway: Gateway IP
cidr: External network's subnet cidr.
start_ip: Starting IP to be used in the subnet.
end_ip: End IP to be used in the subnet.
}
2. ignore_privious (True/False). default false.
Return:
Ext Network ID (Unicode), on success.
"""
LOG_OBJ.debug("Creating External Network : ")
_tenant_name = config.cloud_admin_project
_net_name = extnet_info['extnet_name']
_gateway = extnet_info['gateway']
_cidr = extnet_info['cidr']
_start_ip = extnet_info['start_ip']
_end_ip = extnet_info['end_ip']
if not ignore_privious:
_url = "http://" + self.host_ip + ":9696/v2.0/networks"
_headers = {'x-auth-token': self.cloud_admin_info["token_project"]}
_body = None
response = self.request("GET", _url, _headers, _body)
output = json.loads(response.data)
if output is None:
LOG_OBJ.error("No response from server while getting"
" networks.")
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Getting networks list Failed with status %s " %
response.status)
return response.status
for nets in output['networks']:
if nets['router:external']:
LOG_OBJ.info("External Network already created")
return
_url = "http://" + self.host_ip + ":9696/v2.0/networks.json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.cloud_admin_info["token_project"]}
_extnet_info = {"network": {
"tenant_id": self.cloud_admin_info["project_id"],
"name": _net_name,
"router:external": "True",
"admin_state_up": True}}
_body = json.dumps(_extnet_info)
response = self.request("POST", _url, _headers, _body)
output = json.loads(response.data)
if output is None:
LOG_OBJ.error("No response from server while creating ext net.")
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Create ext network Failed with status %s " %
response.status)
return response.status
_ext_net_id = output['network']['id']
LOG_OBJ.debug("External Network created successfully. ID:%s" %
_ext_net_id)
# Creating External Subnet
_url = "http://" + self.host_ip + ":9696/v2.0/subnets.json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.cloud_admin_info["token_project"]}
_ext_subnet_info = {"subnet": {
"ip_version": 4,
"allocation_pools": [{"start": _start_ip,
"end": _end_ip}],
"gateway_ip": _gateway,
"enable_dhcp": "False",
"network_id": _ext_net_id,
"tenant_id": self.cloud_admin_info["project_id"],
"cidr": _cidr,
"name": _net_name + "-sub"}}
_body = json.dumps(_ext_subnet_info)
output = self.request("POST", _url, _headers, _body)
if output is None:
LOG_OBJ.error("No response from server while creating ext-subet")
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Create subnet Failed with status %s " %
response.status)
return response.status
return _ext_net_id
def create_subnet(self, network_name, subnet_name, cidr):
"""
It creates the subnet in the network specified.
params:
network_name: Name of the network
subnet_name: Name of the subnet
cidr: CIDR of the subnet.
allocation_pool (optional)
Return:
Subnet id (unicode), on success.
"""
_net_id = self.get_net_id(network_name)
if not isinstance(_net_id, unicode):
return
_url = "http://" + self.host_ip + ":9696/v2.0/subnets.json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_subnet_info = {"subnet":
{"ip_version": 4,
"network_id": _net_id,
"cidr": cidr,
"name": subnet_name}}
_body = json.dumps(_subnet_info)
LOG_OBJ.debug("Creating subnet in network %s of tenant %s."
% (_net_id, self.project_info["project_id"]))
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating subnet")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Creation of subnet Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Subnet details : %s " % output['subnet'])
return output['subnet']['id']
def delete_subnet(self, subnet_id):
"""
It deletes the subnet based on the id.
param:
subnet_id: ID of the subnet.
Return: True(Bool), on success.
"""
LOG_OBJ.debug("Deleting subnet %s" % subnet_id)
_url = "http://" + self.host_ip + ":9696/v2.0/subnets/" + \
subnet_id + ".json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_body = None
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while deleting subnet:%s" %
subnet_id)
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Deletion of subnet Failed with status %s " %
response.status)
return response.status
LOG_OBJ.info("Deleted the subnet : %s " % subnet_id)
return True
def list_subnet(self):
"""
It gets the subnets in a tenant.
Return:
A list containing subnets info.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/subnets"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while listing subnet.")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get subnet list Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("subnet List : %s " % output)
return output["subnets"]
def get_subnet_details(self, subnet_name="dummy_subnet", subnet_id=None):
"""
It gets the subnet details by subnet name or subnet id.
params:
subnet_name: Name of the subnet.
Optional params:
subnet_id: ID of the subnet.
Return:
Dict containing the subnet details.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/subnets"
_headers = {'x-auth-token': self.project_info["token_project"]}
_body = None
result = self.request("GET", _url, _headers, _body)
if result is None:
LOG_OBJ.error("No response from Server while getting subnets")
return result
if result.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get subnet details Failed with status %s " %
result.status)
return result.status
output = json.loads(result.data)
for subnets in output['subnets']:
if (subnet_id is not None and (subnets['id'] == subnet_id)) or\
subnets['name'].lower() == subnet_name.lower():
LOG_OBJ.debug("Subnet Details: %s" % subnets)
return subnets
LOG_OBJ.error("Subnet with name:%s or with id:%s is Not Found" %
(subnet_name, subnet_id))
def get_specific_port_by_server_id(self, net_id, server_id):
"""
This is basically used to identify a particular port info for a vm
that has multiple interfaces.
This will return the port of the vm created in the net: net_id.
Return: Dict containing port details, on success
"""
_url = "http://" + self.host_ip + ":9696/v2.0/ports.json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting ports.")
return None
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get port ID Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.debug("Port details: %s" % output)
for port in output['ports']:
if port["device_id"] == server_id and port['network_id'] == net_id:
LOG_OBJ.debug("Port ID:%s" % port['id'])
return port
LOG_OBJ.error("There is NO port corresponding to server ID: %s"
" in Network: %s" % (server_id, net_id))
def create_port(self, network_name, port_name="port",
security_groups=None, net_id=None, **kwargs):
"""
It creates the port on the specified network.
params:
network_name: Name of the network.
port_name: Name of the port.
Optional params:
security_groups: Id of the security group.
net_id: Network id.
port_security_enabled: True/False. Default is True
Whether the port security for the port to be enabled or NOT
Return:
Port ID (Unicode) on success.
"""
LOG_OBJ.debug("Creating Port : ")
if net_id is None:
net_id = self.get_net_id(network_name)
if not isinstance(net_id, unicode):
return
_url = "http://" + self.host_ip + ":9696/v2.0/ports.json"
_headers = {'x-auth-token': self.project_info["token_project"],
'content-type': 'application/json'}
_port_info = {"port": {"network_id": net_id,
"tenant_id": self.project_info["project_id"],
"name": port_name,
"admin_state_up": True,
"port_security_enabled": kwargs.get(
'port_security_enabled', True)
}}
if security_groups is not None:
_port_info["port"]["security_groups"] = security_groups
_body = json.dumps(_port_info)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error(" no response from Server")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Create port Failed with status %s" %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Port Details:%s" % output['port'])
return output['port']['id']
def list_port(self):
"""
Returns list of ports details.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/ports.json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server, while listing ports.")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get port list Failed with status %s"
% response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Port List : %s " % output)
return output["ports"]
def show_port(self, port_id):
"""
It gives the port info.
params: port_id: Id of the port.
Returns: dictionary containing port details.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/ports/" + \
port_id + ".json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from server, while accessing "
"details of %s port." % port_id)
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get port details Failed with status %s"
% response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Port Details : %s " % output)
return output["port"]
def delete_port(self, port_id):
"""
It deletes the port.
params: port_id: Id of the port
Returns: On successful deletion of port returns True.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/ports/" +\
port_id + ".json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_body = None
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from server, while deleting "
"%s port." % port_id)
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Delete port Failed with status %s"
% response.status)
return response.status
LOG_OBJ.debug("Deleted port: %s" % port_id)
return True
def list_router(self):
"""
It gets the routers info in a tenant.
Return:
A list of of routers, on success.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/routers.json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while listing routers.")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("List router Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Router List : %s " % output)
return output["routers"]
def list_router_ports(self, router_id):
"""
It lists all router ports related to given router.
param: router_id: ID of the router.
Return: List (containing the router ports), on success.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/ports.json?"\
"device_id=" + router_id
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from server, while listing router "
"ports of %s router" % router_id)
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Failed to list router ports with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Router port list related to %s router: "
"%s " % (router_id, output))
return output["ports"]
def create_router(self, router_name="test_router"):
"""
This is used to create router.
params:
router_name: Name of the router.
Return: Router ID (unicode), on success
"""
LOG_OBJ.debug(
"Creating router in tenant %s" %
self.project_info["project_id"])
_url = "http://" + self.host_ip + ":9696/v2.0/routers.json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_router_info = {
"router": {
"tenant_id": self.project_info["project_id"],
"name": router_name,
"admin_state_up": True}}
_body = json.dumps(_router_info)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server")
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Create router Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Router Details : %s " % output)
return output['router']['id']
def delete_router(self, router_id):
"""
It deletes the router.
params:
router_id: Id of the router.
Return: True, on success.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/routers/" + \
router_id + ".json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_body = None
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error(" no response from Server")
return
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Router delete Failed with status %s " %
response.status)
return response.status
LOG_OBJ.info("Deleted router:%s " % router_id)
return True
def set_router_gateway(self, ext_net_name, router_id):
"""
It sets the router to the external gateway.
params:
ext_net_name: External network name.
router_id: Id of the router.
Return: True on success.
"""
_ext_net_id = self.get_net_id(ext_net_name)
if not isinstance(_ext_net_id, unicode):
return
LOG_OBJ.debug("Setting external gateway of %s router." % router_id)
_url = "http://" + self.host_ip + ":9696/v2.0/routers/" + \
router_id + ".json"
_headers = {'x-auth-token': self.project_info["token_project"],
'content-type': 'application/json'}
_gwdata = {"router": {"external_gateway_info":
{"network_id": _ext_net_id}}}
_body = json.dumps(_gwdata)
response = self.request("PUT", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while setting router:"
" %s to gateway: %s" % (router_id, _ext_net_id))
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Setting router gateway Failed with status %s " %
response.status)
return response.status
LOG_OBJ.info("Router Gateway set is done for %s router" % router_id)
return True
def clear_router_gateway(self, router_id):
"""
For clearing external gateway for a router.
Argu:
router_id: router id
Return: On clearing external gateway successfully returns True.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/routers/" + \
router_id + ".json"
_headers = {'x-auth-token': self.project_info["token_project"],
'content-type': 'application/json'}
_gwdata = {"router":
{"external_gateway_info": {}}}
_body = json.dumps(_gwdata)
response = self.request("PUT", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from server. while clearing external "
"gateway of %s router." % router_id)
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Clearing router gateway Failed with "
"status %s " % response.status)
return response.status
LOG_OBJ.info("Cleared external gateway of %s router" % router_id)
return True
def add_router_interface(self, router_id, subnet_id=None, **kwargs):
"""
It attaches the subnet to the router.
:param router_id: Id of the router.
:param subnet_id: Id of the subnet to be attached to router.
:optional params:
port_id unicode: UUID of a new neutron port to be used as gateway.
Return:
True on success.
"""
port_id = kwargs.get("port_id")
if port_id is None and subnet_id is None:
LOG_OBJ.error("To attach subnet to router either provide "
"subnet id or a new port id")
return
_url = "http://" + self.host_ip + ":9696/v2.0/routers/" + \
router_id + "/add_router_interface.json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
_router_interface_info = {"port_id": port_id} \
if port_id else {"subnet_id": subnet_id}
_body = json.dumps(_router_interface_info)
response = self.request("PUT", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while attaching subnet:%s"
" to router: %s" % (subnet_id, router_id))
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Adding interface to router Failed with status %s " %
response.status)
return response.status
LOG_OBJ.info("Added interface of subnet %s to %s router." %
(subnet_id, router_id))
return True
def remove_router_interface(self, router_id, subnet_id=None, **kwargs):
"""
Deletes router interfaces.
Argu:
router_id: router id
subnet_id: subnet id
Optional Argu:
port_id: port uuid.
Returns: On successful removal of router insterface True.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/routers/" + \
router_id + "/remove_router_interface.json"
_headers = {'Content-type': 'application/json',
'x-auth-token': self.project_info["token_project"]}
if subnet_id:
_router_interface_info = {"subnet_id": subnet_id}
if kwargs.get("port_id"):
_router_interface_info = {"port_id": kwargs.get("port_id")}
_body = json.dumps(_router_interface_info)
response = self.request("PUT", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server, while removing "
"interface of %s router" % router_id)
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Removing interface to router Failed with "
"status %s " % response.status)
return response.status
LOG_OBJ.info("Removed router interface to router: %s" % router_id)
return True
def create_floating_ip(self, extnet_name,
return_details=False):
"""
It creates the floating ip from external ip pool.
params:
extnet_name: External network name.
return_details: Tells whether to return the details of floating ip.
Return:
On success: Floating Ip (Unicode), if return_details=False
Dictionary, if return_details=True
"""
_external_net_id = self.get_net_id(extnet_name)
if not isinstance(_external_net_id, unicode):
return
LOG_OBJ.debug("Creating floating ip.")
_url = "http://" + self.host_ip + ":9696/v2.0/floatingips.json"
_headers = {'x-auth-token': self.project_info["token_project"],
'content-type': 'application/json'}
_floatingip_info = {"floatingip": {
"floating_network_id": _external_net_id}}
_body = json.dumps(_floatingip_info)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating floating ip")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Creating floating ip Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Floating IP details : %s " % output)
if return_details:
return output['floatingip']
return output['floatingip']['id']
def list_floating_ip(self):
"""
It lists the floating ip allocated for the tenant.
Return:
List of floating IP, on success.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/floatingips.json"
_headers = {'x-auth-token': self.project_info["token_project"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while listing the"
" floating ips")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Retriving floating ip list Failed with"
" status %s " % response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.debug("Floating ip list: %s" % output)
return output["floatingips"]
def get_floating_ip_by_port_id(self, port_id):
"""
It Returns the floating ip i.e associated with the port_id.
params:
port_id: neutron port_id
Return:
Floating IP(Unicode), on success.
"""
floatingips = self.list_floating_ip()
if not isinstance(floatingips, list):
return None
for floating_ip_info in floatingips:
if floating_ip_info['port_id'] == port_id:
floating_ip = floating_ip_info['floating_ip_address']
LOG_OBJ.debug("Floating ip for port id:%s is %s" %
(port_id, floating_ip))
return floating_ip
LOG_OBJ.debug("There is NO floating ip for port id: %s" % port_id)
return None
def associate_floating_ip(self, floatingip_id, port_id):
"""
It associates the floating ip to a port.
params:
floatingip_id: Id of the floating IP.
port_id: Id of the port to which floating ip will be associated.
Return: True, on success.
"""
_url = "http://" + self.host_ip + ":9696/v2.0/floatingips/" + \
floatingip_id + ".json"
_headers = {'x-auth-token': self.project_info["token_project"],
'content-type': 'application/json'}
_floatingip_info = {"floatingip": {"port_id": port_id}}
_body = json.dumps(_floatingip_info)
response = self.request("PUT", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while associating"
" the floating ip")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Associating floating ip Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Associated floating ip %s with VM ip : %s " %
(output['floatingip']['floating_ip_address'],
output['floatingip']['fixed_ip_address']))
return True
def disassociate_floating_ip(self, floating_id):
"""
Disassociates floating ip from vm port.
Arguments:
floating_id: floating ip id.
Return: True on successful disassociation of floating ip.
"""
LOG_OBJ.debug("Disassociate Floatingip with id %s" % floating_id)
_url = "http://" + self.host_ip + ":9696/v2.0/floatingips/" + \
floating_id + ".json"
_headers = {'x-auth-token': self.project_info["token_project"],
'content-type': 'application/json'}
_floatingip_info = {"floatingip": {"port_id": None}}
_body = json.dumps(_floatingip_info)
response = self.request("PUT", _url, _headers, _body)
if response is None:
LOG_OBJ.error(" no response from Server")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Disassociating floating ip Failed with status %s "
% response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Dissociated floating ip %s "
% output['floatingip']['floating_ip_address'])
return True
def delete_floating_ip(self, floating_id):
"""
For Deleting floating ips.
Argu:
floating_id: floating ip id.
Return: On successful deletion of floating ip returns True,
"""
LOG_OBJ.debug("Deleting floating ip with id %s" % floating_id)
_url = "http://" + self.host_ip + ":9696/v2.0/floatingips/" + \
floating_id + ".json"
_headers = {'x-auth-token': self.project_info["token_project"],
'content-type': 'application/json'}
_body = None
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from server while deleting flaoting "
"ip with id %s" % floating_id)
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Deleting floating ip Failed with status %s"
% response.status)
return response.status
LOG_OBJ.info("Deleted floating ip with id: %s " % floating_id)
return True
# ######################################
# ####### Keystone V3 API calls ########
# ######################################
def get_keystone_v3_token(self, tenant_name, domain_name,
user_name, password, scope="domain"):
"""
It gets the token of the tenant..
params:
tenant_name: Name of the tenant.
user_name: Name of the user.
password: Password of the user.
scope: token scope (domain/project)
Return: Token (Unicode), on success.
"""
_url = "http://" + self.host_ip + ":5000/v3/auth/tokens"
_headers = {"content-type": "application/json"}
_token_info = {"auth": {
"identity": {
"methods": ["password"],
"password": {
"user": {
"domain": {
"name": domain_name
},
"name": user_name,
"password": password
}
}
},
"scope": {}
}
}
if scope == "domain":
_token_info['auth']['scope'] = {"domain": {"name": domain_name}}
else:
_token_info['auth']['scope'] = \
{"project": {"domain": {"name": domain_name},
"name": tenant_name}}
_body = json.dumps(_token_info)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting token for"
" tenant: %s" % tenant_name)
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Request of token for %s tenant Failed with"
" status %s " % (tenant_name, response.status))
return response.status
token_id = response.headers['x-subject-token']
print ("Token ID for tenant %s is %s" % (tenant_name, token_id))
LOG_OBJ.debug("Token ID for tenant %s is %s" % (tenant_name, token_id))
return token_id
def create_keystone_v3_domain(self, **kwargs):
"""
It creates the domain.
params:
kwargs : dictionary contains
Compulsory argument :
name = ""
Optional arguments
description = ""
enable/disable = True/False ...etc
Return:
On success: Domain ID
"""
LOG_OBJ.debug("Creating the domain.")
print self.project_info
_url = "http://" + self.host_ip + ":35357/v3/domains"
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_domain_info = {"domain": {}}
for argument in ["name", "description", "enabled", "disabled"]:
try:
_domain_info['domain'].update(
{argument: kwargs[argument]})
except KeyError:
pass
_body = json.dumps(_domain_info)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating domain")
print ("No response from Server while creating domain")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Creating domain Failed with status %s "
"and error : %s" % (response.status, response.data))
print (" Creating domain Failed with status %s and error : %s " %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Domain details : %s " % output)
print ("Domain details : %s " % output)
return output['domain']['id']
def set_keystone_v3_domain(self, **kwargs):
"""
It set the domain status.
params:
kwargs : dictionary contains
Compulsory argument :
domain_id = domain ID
Optional arguments
name = name of domain
description = ""
enable/disable = True/False ...etc
domain_id: domain ID
enable: True/False (to enable or disable the domain)
Return:
On success: True
"""
LOG_OBJ.debug("Creating the domain.")
_url = "http://" + self.host_ip + ":35357/v3/domains/" + \
str(kwargs['domain_id'])
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_domain_info = {"domain": {}}
for argument in ["name", "description", "enabled", "disabled"]:
try:
_domain_info['domain'].update(
{argument: kwargs[argument]})
except KeyError:
pass
_body = json.dumps(_domain_info)
response = self.request("PATCH", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while set the domain")
print ("No response from Server while set the domain")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Set domain Failed with status %s and error : %s" %
(response.status, response.data))
print ("Set domain Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
return True
def delete_keystone_v3_domain(self, domain_id):
"""
It deletes the domain.
params:
domain_id: domain ID
Return:
On success: True
"""
LOG_OBJ.debug("Disable the domain.")
kwargs = {"domain_id": domain_id, "enabled": False}
self.set_keystone_v3_domain(**kwargs)
LOG_OBJ.debug("Deleting the domain.")
_url = "http://" + self.host_ip + ":35357/v3/domains/" + str(domain_id)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while deleting the domain")
print ("No response from Server while deleting the domain")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Deleting domain Failed with status %s "
"and error : %s" % (response.status, response.data))
print (" Deleting domain Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
return True
def list_keystone_v3_domains(self):
"""
It gives list of all the domains.
Return:
On success: List of domains list
"""
LOG_OBJ.debug("List the domains.")
_url = "http://" + self.host_ip + ":35357/v3/domains"
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating domain")
print ("No response from Server while creating domain")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Listing domains Failed with status %s "
"and error : %s" % response.status, response.data)
print (" Listing domains Failed with status %s and error : %s" %
response.status, response.data)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Domains list : %s " % output)
print ("Domains list : %s " % output)
return output['domains']
def show_keystone_v3_domain(self, domain_id):
"""
It gives the domain info.
params: domain_id: Id of the domain.
Returns: dictionary containing domain details.
"""
LOG_OBJ.debug("Details of a domain.")
_url = "http://" + self.host_ip + ":35357/v3/domains/" + str(domain_id)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting the "
"details of domain")
print ("No response from Server while getting the "
"details of domain")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Show domain Failed with status %s and error : %s" %
(response.status, response.data))
print ("Show domain Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Domains details : %s " % output)
print ("Domains details : %s " % output)
return output['domain']
def get_keystone_v3_domain_id(self, domain_name):
"""
It gives the domain ID.
params: domain_name: name of the domain.
Returns: domain ID.
"""
LOG_OBJ.debug("Get the domain ID.")
_url = "http://" + self.host_ip + ":35357/v3/domains?name=" + \
str(domain_name)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting the "
"ID of domain")
print ("No response from Server while getting the "
"ID of domain")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get domain ID Failed with status %s and error "
": %s" % (response.status, response.data))
print ("Get domain ID Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Domain details : %s " % output)
if len(output['domains']) != 1:
LOG_OBJ.debug("No. of domains with name %s is %s"
% (domain_name, len(output['domains'])))
print("No. of domains with name %s is %s"
% (domain_name, len(output['domains'])))
return
return output['domains'][0]['id']
def create_keystone_v3_project(self, **kwargs):
"""
It creates the project.
params:
kwargs : dictionary contains
Compulsory argument :
name = project name
domain = domain ID
Optional arguments
description = ""
enable/disable = True/False ...etc
Return:
On success: Project ID
"""
LOG_OBJ.debug("Creating the project.")
print self.project_info
_url = "http://" + self.host_ip + ":35357/v3/projects"
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_project_info = {"project": {}}
for argument in ["name", "description", "domain_id",
"enabled", "disabled"]:
try:
_project_info['project'].update(
{argument: kwargs[argument]})
except KeyError:
pass
_body = json.dumps(_project_info)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating project")
print ("No response from Server while creating project")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Creating project Failed with status %s "
"and error : %s" % (response.status, response.data))
print (" Creating project Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Project details : %s " % output)
print ("Project details : %s " % output)
return output['project']['id']
def delete_keystone_v3_project(self, project_id, domain_id):
"""
It deletes the project.
params:
domain_id: project ID
Return:
On success: True
"""
LOG_OBJ.debug("Disable the project.")
kwargs = {"project_id": project_id, "enabled": False}
self.set_keystone_v3_project(**kwargs)
LOG_OBJ.debug("Deleting the project.")
_url = "http://" + self.host_ip + ":35357/v3/projects/" + \
str(project_id)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while deleting the project")
print ("No response from Server while deleting the project")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Deleting project Failed with status %s "
"and error : %s" % (response.status, response.data))
print (" Deleting project Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
return True
def list_keystone_v3_projects(self):
"""
It gives list of all the project.
Return:
On success: List of projects list
"""
LOG_OBJ.debug("List the projects.")
_url = "http://" + self.host_ip + ":35357/v3/projects"
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating project")
print ("No response from Server while creating project")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Creating project Failed with status %s "
"and error : %s" % (response.status, response.data))
print (" Creating project Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Projects list : %s " % output)
print ("Projects list : %s " % output)
return output['projects']
def set_keystone_v3_project(self, **kwargs):
"""
It set the project status.
params:
kwargs : dictionary contains
Compulsory argument :
project_id: project ID
Optional arguments
description = ""
name = project name
domain = domain ID
enable/disable = True/False ...etc
project_id: project ID
enable: True/False (to enable or disable the domain)
Return:
On success: True
"""
LOG_OBJ.debug("Creating the project.")
_url = "http://" + self.host_ip + ":35357/v3/projects/" + \
str(kwargs['project_id'])
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_project_info = {"project": {}}
for argument in ["name", "description", "domain_id",
"enabled", "disabled"]:
try:
_project_info['project'].update(
{argument: kwargs[argument]})
except KeyError:
pass
_body = json.dumps(_project_info)
response = self.request("PATCH", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while set the project")
print ("No response from Server while set the project")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Set project Failed with status %s and error : %s" %
(response.status, response.data))
print (" Set project Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
return True
def show_keystone_v3_project(self, project_id):
"""
It gives the project info.
params: project_id: Id of the project.
Returns: dictionary containing project details.
"""
LOG_OBJ.debug("Details of a project.")
_url = "http://" + self.host_ip + ":35357/v3/projects/" + \
str(project_id)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting the "
"details of project")
print ("No response from Server while getting the "
"details of project")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Show project Failed with status %s and error : %s" %
(response.status, response.data))
print ("Show project Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Project details : %s " % output)
print ("Project details : %s " % output)
return output['project']
def get_keystone_v3_project_id(self, project_name):
"""
It gives the project ID.
params: project_name: name of the project.
Returns: project ID.
"""
LOG_OBJ.debug("Get the project ID.")
_url = "http://" + self.host_ip + ":35357/v3/projects?name=" + \
str(project_name)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting the "
"ID of project")
print ("No response from Server while getting the "
"ID of project")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get project ID Failed with status %s and error "
": %s" % (response.status, response.data))
print ("Get project ID Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("project details : %s " % output)
print ("project details : %s " % output)
if len(output['projects']) != 1:
LOG_OBJ.debug("No. of projects with name %s is %s"
% (project_name, len(output['projects'])))
print("No. of projects with name %s is %s"
% (project_name, len(output['projects'])))
return
return output['projects'][0]['id']
def usage_keystone_v3_project(self):
"""
"""
pass
def create_keystone_v3_user(self, **kwargs):
"""
It creates the user.
params:
kwargs : dictionary contains
Compulsory argument :
name = user name
password = password for user
domain = domain ID
project = project ID
Optional arguments
description = ""
Return:
On success: Project ID
"""
LOG_OBJ.debug("Creating the user.")
print self.project_info
_url = "http://" + self.host_ip + ":35357/v3/users"
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_project_info = {"user": {}}
for argument in ["name", "description", "domain_id",
"default_project_id", "password",
"enable", "disable"]:
try:
_project_info['user'].update(
{argument: kwargs[argument]})
except KeyError:
pass
_body = json.dumps(_project_info)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating user")
print ("No response from Server while creating user")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Creating user Failed with status %s "
"and error : %s" % (response.status, response.data))
print (" Creating user Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("User details : %s " % output)
print ("User details : %s " % output)
return output['user']['id']
def delete_keystone_v3_user(self, user_id):
"""
It deletes the user.
params:
user_id: user ID
Return:
On success: True
"""
LOG_OBJ.debug("Disable the user.")
kwargs = {"user_id": user_id, "enabled": False}
self.set_keystone_v3_user(**kwargs)
LOG_OBJ.debug("Deleting the user.")
_url = "http://" + self.host_ip + ":35357/v3/users/" + str(user_id)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while deleting the user")
print ("No response from Server while deleting the user")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Deleting user Failed with status %s "
"and error : %s" % (response.status, response.data))
print (" Deleting user Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
return True
def list_keystone_v3_users(self):
"""
It gives list of all the users.
Return:
On success: List of users list
"""
LOG_OBJ.debug("List the users.")
_url = "http://" + self.host_ip + ":35357/v3/users"
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating user")
print ("No response from Server while creating user")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Creating user Failed with status %s "
"and error : %s" % (response.status, response.data))
print (" Creating user Failed with status %s " %
response.status)
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Users list : %s " % output)
print ("Users list : %s " % output)
return output['users']
def set_keystone_v3_user(self, **kwargs):
"""
It set the user properties.
params:
kwargs : dictionary contains
Compulsory argument :
user_id = user ID
Optional arguments
description = ""
name = user name
password = password for user
domain = domain ID
project = project ID
Return:
On success: True
"""
LOG_OBJ.debug("Creating the project.")
_url = "http://" + self.host_ip + ":35357/v3/users/" + \
str(kwargs['user_id'])
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_user_info = {"user": {}}
for argument in ["name", "description", "domain_id",
"default_project_id", "password",
"enabled", "disabled"]:
try:
_user_info['user'].update(
{argument: kwargs[argument]})
except KeyError:
pass
_body = json.dumps(_user_info)
response = self.request("PATCH", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while set the user")
print ("No response from Server while set the user")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Set user Failed with status %s and error : %s" %
(response.status, response.data))
print (" Set user Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
return True
def show_keystone_v3_user(self, user_id):
"""
It gives the user info.
params: user_id: Id of the project.
Returns: dictionary containing user details.
"""
LOG_OBJ.debug("Details of a user.")
_url = "http://" + self.host_ip + ":35357/v3/users/" + str(user_id)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting the "
"details of user")
print ("No response from Server while getting the "
"details of user")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Show user Failed with status %s and error : %s" %
(response.status, response.data))
print ("Show user Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("User details : %s " % output)
print ("User details : %s " % output)
return output['user']
def get_keystone_v3_user_id(self, user_name):
"""
It gives the user ID.
params: user_name: name of the user.
Returns: user ID.
"""
LOG_OBJ.debug("Get the user ID.")
_url = "http://" + self.host_ip + ":35357/v3/users?name=" + \
str(user_name)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting the "
"ID of user")
print ("No response from Server while getting the "
"ID of user")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get user ID Failed with status %s and error "
": %s" % (response.status, response.data))
print ("Get user ID Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("user details : %s " % output)
print ("user details : %s " % output)
if len(output['users']) != 1:
LOG_OBJ.debug("No. of users with name %s is %s"
% (user_name, len(output['users'])))
print("No. of users with name %s is %s"
% (user_name, len(output['users'])))
return
return output['users'][0]['id']
def password_change_keystone_v3_user(self):
"""
"""
pass
def create_keystone_v3_role(self, role_name):
"""
It creates the role.
params:
role_name: role name.
Return:
On success: Role ID
"""
LOG_OBJ.debug("Creating the role.")
_url = "http://" + self.host_ip + ":35357/v3/roles"
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_role_info = {"role": {
"name": role_name}}
_body = json.dumps(_role_info)
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while creating role")
print ("No response from Server while creating role")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Creating role Failed with status %s "
"and error : %s" % (response.status, response.data))
print (" Creating role Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Role details : %s " % output)
print ("Role details : %s " % output)
return output['role']['id']
def delete_keystone_v3_role(self, role_id):
"""
It deletes the role.
params:
user_id: role ID
Return:
On success: True
"""
LOG_OBJ.debug("Deleting the role.")
_url = "http://" + self.host_ip + ":35357/v3/roles/" + str(role_id)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while deleting the role")
print ("No response from Server while deleting the role")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Deleting role Failed with status %s and error"
" : %s " % (response.status, response.data))
print (" Deleting role Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
return True
def list_keystone_v3_roles(self):
"""
It gives list of all the roles.
Return:
On success: List of roles list
"""
LOG_OBJ.debug("List the roles.")
_url = "http://" + self.host_ip + ":35357/v3/roles"
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while listing role")
print ("No response from Server while listing role")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" List roles Failed with status %s and error : %s" %
(response.status, response.data))
print (" List roles Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Roles list : %s " % output)
print ("Roles list : %s " % output)
return output['roles']
def set_keystone_v3_role(self, role_id, role_new_name):
"""
It set the role properties.
params:
user_id: role ID
role_new_name : new name for role
Return:
On success: True
"""
LOG_OBJ.debug("Creating the role.")
_url = "http://" + self.host_ip + ":35357/v3/roles/" + str(role_id)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_role_info = {"role": {
"name": role_new_name}}
_body = json.dumps(_role_info)
response = self.request("PATCH", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while set the role")
print ("No response from Server while set the role")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" Set role Failed with status %s and error : %s" %
(response.status, response.data))
print (" Set role Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
return True
def show_keystone_v3_role(self, role_id):
"""
It gives the role info.
params: role_id: Id of the project.
Returns: dictionary containing role details.
"""
LOG_OBJ.debug("Details of a role.")
_url = "http://" + self.host_ip + ":35357/v3/roles/" + str(role_id)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting the "
"details of role")
print ("No response from Server while getting the "
"details of role")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Show role Failed with status %s and error : %s" %
(response.status, response.data))
print ("Show role Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Role details : %s " % output)
print ("Role details : %s " % output)
return output['role']
def remove_keystone_v3_role_from_user_or_group(self, user_id,
domain_id, role_id):
"""
It removes a role from a user or group.
params:
role_id: role ID,
domain_id: domain ID,
user_id: user ID
Return:
On success: True
"""
LOG_OBJ.debug("Removing the role.")
_url = "http://" + self.host_ip + ":35357/v3/domains/" + \
str(domain_id) + "/users/" + str(user_id) + "/roles/" + \
str(role_id)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("DELETE", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while removing role")
print ("No response from Server while removing role")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Removing role Failed with status %s "
"and error : %s" % (response.status, response.data))
print ("Removing role Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
return True
def add_keystone_v3_role_to_user_or_group(self, user_id, role_id,
pro_dom_id, id_flag):
"""
It adds a role to a user or group.
params:
user_id: user ID,
role_id: role ID,
pro_dom_id: project ID or domain ID,
id_flag = "domain"/ "project"
Return:
On success: True
"""
LOG_OBJ.debug("Adding the role.")
_url = "http://" + self.host_ip + ":35357/v3/" + id_flag + "s/" + \
str(pro_dom_id) + "/users/" + str(user_id) + "/roles/" + \
str(role_id)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("PUT", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while adding role")
print ("No response from Server while adding role")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Adding role Failed with status %s "
"and error : %s" % (response.status, response.data))
print ("Adding role Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
return True
def list_assigned_keystone_v3_roles(self, **kwargs):
"""
It gives list of all the roles assigned to users/projects/domains.
paramenters :
kwargs : dictionary contains
Optional argument :
role = role ID
user = user ID
domain = domain ID (or) project = project ID
Return:
On success: List of roles assignment list
"""
LOG_OBJ.debug("List the roles.")
url_filter = ""
for argument in kwargs.keys():
if "id" in url_filter:
url_filter += "&"
if argument in ["role", "user"]:
url_filter += argument + ".id=" + kwargs[argument]
elif argument in ["domain", "project"]:
url_filter += "scope." + argument + ".id=" + kwargs[argument]
_url = "http://" + self.host_ip + ":35357/v3/role_assignments"
if url_filter:
_url += "?" + url_filter
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while listing "
"roles assignment")
print ("No response from Server while listing roles assignment")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" List roles assignment is Failed with status %s "
"and error : %s" % (response.status, response.data))
print (" List roles asignment is Failed with status %s "
"and error : %s" % (response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("Roles assignment list : %s " % output)
print ("Roles assignment list : %s " % output)
return output['role_assignments']
def get_keystone_v3_role_id(self, role_name):
"""
It gives the role ID.
params: role_name: name of the role.
Returns: role ID.
"""
LOG_OBJ.debug("Get the role ID.")
_url = "http://" + self.host_ip + ":35357/v3/roles?name=" + \
str(role_name)
_headers = {'x-auth-token': self.cloud_admin_info["token_domain"],
'content-type': 'application/json'}
_body = None
response = self.request("GET", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while getting the "
"ID of role")
print ("No response from Server while getting the "
"ID of role")
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error("Get role ID Failed with status %s and error "
": %s" % (response.status, response.data))
print ("Get role ID Failed with status %s and error : %s" %
(response.status, response.data))
return response.status
output = json.loads(response.data)
LOG_OBJ.info("role details : %s " % output)
print ("role details : %s " % output)
if len(output['roles']) != 1:
LOG_OBJ.debug("No. of roles with name %s is %s"
% (role_name, len(output['roles'])))
print("No. of roles with name %s is %s"
% (role_name, len(output['roles'])))
return
return output['roles'][0]['id']
def poll_on_server_to_delete(self, service_id, monitor_time=200):
"""
Polls for deletion of server
params:
server_id: ID of the server.
monitor_time: Number of seconds to poll on server.
Return:
True on successful deletion of server.
error msg if server not got deleted.
"""
# monitor_time = 300
iteration = monitor_time / 10
try:
old_project_info = self.set_tenant_info(
self.cloud_admin_info["project_name"],
self.cloud_admin_info["token_domain"],
self.cloud_admin_info["token_project"],
self.cloud_admin_info["project_id"])
for attempt in range(1, iteration):
services_list = self.list_servers()
# assuming services_list is list of dict in true case.
if not isinstance(services_list, list):
return services_list
server_id_list = []
for service_dict in services_list:
server_id_list.append(service_dict["id"])
if service_id in server_id_list:
msg = "Server %s not yet deleted at attempt %s "\
"retrying once more...."\
% (service_id, attempt)
LOG_OBJ.warning(msg)
time.sleep(10)
else:
msg = "Server %s deleted successfully" % (service_id)
LOG_OBJ.info(msg)
break
else:
msg = "Server %s not deleted" % (service_id)
LOG_OBJ.info(msg)
return msg
return True
except Exception as err:
err_msg = "Exception %s occurred in polling server to delete" % err
LOG_OBJ.exception(err_msg)
return err_msg
finally:
reset_project_info = self.set_tenant_info(
*old_project_info)
if not isinstance(reset_project_info, tuple):
LOG_OBJ.warning("Not able to reset tenant info")
def create_keystone_v3_user_and_add_roles(self, project_info, domain_id,
domain_role, project_id):
"""
This creates a user and adds the user to domain and project,
with given roles
params:
project_info: project information dictionary (dict)
{'project_name': '',
'user_name': '',
'password': '',
'roles': []
}
domain_id: Id of domain.
domain_role: Name of role to add user to domain
project_id: Id of project
Returns: user Id
"""
kwargs = {"name": project_info['user_name'],
"password": project_info['password']}
if domain_id:
kwargs.update({"domain_id": domain_id})
if project_id:
kwargs.update({"default_project_id": project_id})
user_id = self.create_keystone_v3_user(**kwargs)
if not isinstance(user_id, unicode):
err_msg = "Problem while creating user: %s." % kwargs['name']
LOG_OBJ.error(err_msg)
return err_msg
for pro_dom_id in [domain_id, project_id]:
if pro_dom_id == domain_id:
id_flag = "domain"
roles = [domain_role]
else:
id_flag = "project"
roles = project_info['roles']
for role in roles:
role_id = self.get_keystone_v3_role_id(role)
if not isinstance(role_id, unicode):
err_msg = "Failed to get role: %s. " % (role)
LOG_OBJ.error(err_msg)
return err_msg
output = self.add_keystone_v3_role_to_user_or_group(
user_id, role_id, pro_dom_id, id_flag)
if output is not True:
err_msg = ("Adding role %s to user-id %s is failed"
% (role, user_id))
LOG_OBJ.error(err_msg)
return err_msg
return user_id
def create_keystone_v3_project_user(self, domain_name, domain_role,
project_details, set_context=True):
"""
Creates project in a domain, Creates user and adds the user
to domain and project with given roles
params:
domain_name: name of the domain.
domain_role: Name of role to add user to domain
project_details: project information dictionary (dict)
{'project_name': '',
'user_name': '',
'password': '',
'roles': []
}
set_context: Whether to set the project context or NOT.
Returns: project Id
"""
domain_id = self.get_keystone_v3_domain_id(domain_name)
if not isinstance(domain_id, unicode):
err_msg = ("Get domain id is failed with reason %s" % domain_id)
LOG_OBJ.error(err_msg)
return err_msg
# Creation of project
kwargs = {"name": project_details['project_name'],
"domain_id": domain_id}
project_id = self.create_keystone_v3_project(**kwargs)
if not isinstance(project_id, unicode):
err_msg = ("Project creation failed with reason %s" % project_id)
LOG_OBJ.error(err_msg)
return err_msg
# creation of user with adding roles.
user_id = self.create_keystone_v3_user_and_add_roles(
project_details, domain_id, domain_role, project_id)
if not isinstance(user_id, unicode):
err_msg = ("Problem while creating user and assigning role."
"Reason %s" % user_id)
LOG_OBJ.error(err_msg)
return err_msg
# Set the context to that of this new user of the tenant.
if set_context:
tokens = []
for token_scope in ["domain", "project"]:
token = self.get_keystone_v3_token(
project_details['project_name'], domain_name,
project_details['user_name'], project_details['password'],
scope=token_scope)
# NOTE: The token id is of type str not unicode, in v3 case.
if not isinstance(token, str):
err_msg = ("Get v3 user token is failed with "
"reason %s" % token)
LOG_OBJ.error(err_msg)
return err_msg
tokens.append(token)
# Set the token
self.set_tenant_info(project_details['project_name'], tokens[0],
tokens[1], project_id)
return project_id
def reboot_server(self, server_id, action="reboot", action_type="soft"):
"""
Reboots(hard/soft)/Starts/Stops the server.
params: server_id : id of the server to reboot/start/stop.
action: action can be either reboot or start or stop.
action_type: takes either soft/hard values, applies only for
reboot action.
"""
try:
LOG_OBJ.info("server %s %sing..." % (server_id, action))
_url = "http://" + self.host_ip + \
":8774/v2/" + self.cloud_admin_info["project_id"] + \
"/servers/" + server_id + "/action"
_headers = {'x-auth-token': self.cloud_admin_info["token_project"],
'content-type': 'application/json'}
if action.lower() == "start":
_body = '{"os-start": null}'
elif action.lower() == "stop":
_body = '{"os-stop": null}'
else:
if action_type.lower() == "hard":
_body = '{"reboot": {"type": "HARD"}}'
else:
_body = '{"reboot": {"type": "SOFT"}}'
response = self.request("POST", _url, _headers, _body)
if response is None:
LOG_OBJ.error("No response from Server while performing %s" %
action)
print "No response from Server while performing %s" % action
return response
if response.status not in [200, 201, 202, 203, 204]:
LOG_OBJ.error(" %s action Failed with status %s and error %s" %
(action, response.status, response.data))
print " %s action Failed with status %s and error : %s" % \
(action, response.status, response.data)
return response.status
return True
except Exception as err:
err_msg = "Exception %s occurred in %sing server to delete" % \
(err, action)
LOG_OBJ.exception(err_msg)
return None
|
994,759 | cf665f5b1bfa3488696a06983e0c74cfeda0a075 | #Podemos fazer repetições em python através do laço while
#Vamos fazer um contador de 0 a 10
cont = 0
#O while testa a condição e se for verdadeira ele sera excecutado
#diversas vezes ate que a condição se torne falsa
#Enquanto variavel cont for menor ou igual a 10, cont recebe 1
while cont <= 10 :
print(cont)
cont += 1
#Quando cont valer 11 este bloco não será mais executado
|
994,760 | 243a2964aeef7447fc007afb30af69ce80187301 | import random
number = random.randint(1, 9)
won = False
guessCount = 0
while True:
guess = input("Guess a number between 1 and 9 - or type exit to quit: ")
if guess == "exit":
break
else:
guess = int(guess)
guessCount += 1
if number > guess:
print("Higher...")
elif number < guess:
print("Lower...")
else:
print(f"You got it in {guessCount} guesses!")
break |
994,761 | 77f3ada8b349dc8eb55751c3a7e21a5f835f173b | from TechnicalAnalysis import *
import time
class WilliamsIndicatorsTest(WilliamsIndicators):
def __init__(self):
self.WI_test = WilliamsIndicators()
#self.all_candles = BinanceConnect.get_candles_list(self, self.currency_pair, self.time_interval)
#self.candles_all_time = self.get_candles_list_all_time(self.currency_pair, self.time_interval)
def test_check_bull_bar(self):
print(self.WI_test.check_bull_bar_dataframe(self.get_n_candles_as_dataframe(2)))
def test_check_bear_bar(self):
print(self.WI_test.check_bear_bar_dataframe(self.get_n_candles_as_dataframe(2)))
def test_aligator_plot(self):
#measure script run time
start_time = time.time()
#get last 1000 candles list
candles = self.WI_test.get_candles_list(self.currency_pair, self.time_interval)
i = 999
candles_list = candles[i-70:i]
self.WI_test.alligator_plot(candles_list)
#measure script run time
print('running time', time.time() - start_time, 'sec.')
def test_smma(self):
# get last 1000 candles list
candles = self.WI_test.get_candles_list(self.currency_pair, self.time_interval)
#candles = pd.read_csv('/Volumes/Data/Dropbox/Dropbox/Coding/BinanceTrade/binance_candles.csv')
i = 979
candles_list = candles[i - 20:i]
#candles_list = [1,2,3,1,2,3,1,2,3,1,2,3,1,2,3,1,2,3,1,2]
print(self.WI_test.SMMA(candles_list, 5, 3))
print(len(self.WI_test.SMMA(candles_list, 5, 3)))
def test_alligator_distance_between_lines(self):
# get last 1000 candles list and pass part of them to alligator
candles = self.WI_test.get_candles_list(self.currency_pair, self.time_interval)
i = 999
candles_list = candles[i - 70:i]
print(self.WI_test.alligator_distance_between_lines())
def test_distance_between_alligator_and_candles(self):
two_candles_list = self.all_candles[998:]
two_candles_index = 998
print(self.WI_test.distance_between_alligator_and_candles(two_candles_list, two_candles_index))
def test_check_bar_type(self):
for i in range(300, 515):
two_candles_list = self.all_candles[i:i+2]
print(self.WI_test.check_bar_type(two_candles_list))
def test_trend_direction(self):
for i in range(300, 515):
two_candles_list = self.all_candles[i:i+2]
print(self.WI_test.trend_direction(two_candles_list))
def test_profitunity_windows(self):
for i in range(300, 515):
two_candles_list = self.all_candles[i:i+2]
print(self.WI_test.profitunity_windows(two_candles_list))
def test_newbee_strategy(self):
for i in range(300, 515):
two_candles_list = self.all_candles[i:i+2]
trend = self.WI_test.trend_direction(two_candles_list)
bar_type = self.WI_test.check_bar_type(two_candles_list)
window = self.WI_test.profitunity_windows(two_candles_list)
print(trend, bar_type, window)
def test_angularation(self):
two_candles_index = len(self.candles_all_time) - 2
print(self.WI_test.angularation(two_candles_index))
#print(len(self.WI_test.angularation(two_candles_index)))
# measure script run time
start_time = time.time()
# currency_dict = {'LTC': 0, 'USDT': 1000}
# time_interval = Client.KLINE_INTERVAL_15MINUTE
x = WilliamsIndicatorsTest()
# x.test_check_bull_bar()
# x.test_check_bear_bar()
#x.test_smma()
#x.test_alligator_distance_between_lines()
# x.test_distance_between_alligator_and_candles()
# x.test_aligator_plot()
#x.test_check_bar_type()
#x.test_trend_direction()
#x.test_profitunity_windows()
#x.test_newbee_strategy()
x.test_angularation()
# measure script run time
print('Total running time', time.time() - start_time, 'sec.') |
994,762 | 85ef59830d6cd89109fcac38c110f5d107311788 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-12-19 17:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wetLab', '0034_auto_20161219_1728'),
]
operations = [
migrations.RenameField(
model_name='biosample',
old_name='documents',
new_name='document',
),
migrations.RenameField(
model_name='biosource',
old_name='documents',
new_name='document',
),
migrations.RenameField(
model_name='enzyme',
old_name='documents',
new_name='document',
),
migrations.RenameField(
model_name='individual',
old_name='documents',
new_name='document',
),
migrations.RenameField(
model_name='modification',
old_name='documents',
new_name='document',
),
migrations.RenameField(
model_name='othertreatment',
old_name='documents',
new_name='document',
),
migrations.RenameField(
model_name='treatmentchemical',
old_name='documents',
new_name='document',
),
migrations.RenameField(
model_name='treatmentrnai',
old_name='documents',
new_name='document',
),
]
|
994,763 | 1cf78e3b8fd299eb4c8e43cac5cd5ba90e8c6dd6 | from django.urls import path
from .views import UserSerializerListView, CustomAuthToken
urlpatterns = [
path('', UserSerializerListView.as_view()),
path('token-auth/', CustomAuthToken.as_view()),
] |
994,764 | 5310810f8672207b5e0830a2bff5c9ea31751965 | bicycles = ['trek', 'redline', 'connondale']
message = "I have a " + bicycles[0].title() + "!"
print(message)
bicycles[0] = 'honada' //修改列表的值
print(bicycles[0])
|
994,765 | 50ab0b8c5f7a69e1def44431666daeeae3563e48 | # Generated by Django 3.1.4 on 2020-12-10 22:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proyecto', '0006_delete_aparece'),
]
operations = [
migrations.AlterField(
model_name='categoria',
name='name',
field=models.CharField(max_length=58, verbose_name='Categoria'),
),
]
|
994,766 | 030a541fc8172a6678db1451db1a9b84c973d9eb | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import Optional, Sequence
import torch
import torch.nn.functional as F
from mmocr.utils.typing_utils import TextRecogDataSample
from torch import nn
from mmdeploy.core import FUNCTION_REWRITER, MODULE_REWRITER
@FUNCTION_REWRITER.register_rewriter(
func_name='mmocr.models.textrecog.decoders.ParallelSARDecoder'
'._2d_attention',
backend='default')
def parallel_sar_decoder__2d_attention(
self,
decoder_input: torch.Tensor,
feat: torch.Tensor,
holistic_feat: torch.Tensor,
valid_ratios: Optional[Sequence[float]] = None) -> torch.Tensor:
"""Rewrite `_2d_attention` of ParallelSARDecoder for default backend.
Rewrite this function to:
1. use torch.ceil to replace original math.ceil and if else in mmocr.
2. use narrow to replace original [valid_width:] in mmocr
"""
y = self.rnn_decoder(decoder_input)[0]
# y: bsz * (seq_len + 1) * hidden_size
attn_query = self.conv1x1_1(y) # bsz * (seq_len + 1) * attn_size
bsz, seq_len, attn_size = attn_query.size()
attn_query = attn_query.view(bsz, seq_len, attn_size, 1, 1)
attn_key = self.conv3x3_1(feat)
# bsz * attn_size * h * w
attn_key = attn_key.unsqueeze(1)
# bsz * 1 * attn_size * h * w
attn_weight = torch.tanh(torch.add(attn_key, attn_query, alpha=1))
# bsz * (seq_len + 1) * attn_size * h * w
attn_weight = attn_weight.permute(0, 1, 3, 4, 2).contiguous()
# bsz * (seq_len + 1) * h * w * attn_size
attn_weight = self.conv1x1_2(attn_weight)
# bsz * (seq_len + 1) * h * w * 1
bsz, T, h, w, c = attn_weight.size()
assert c == 1
if valid_ratios is not None:
# cal mask of attention weight
attn_mask = torch.zeros(bsz, T, h, w + 1, c).to(attn_weight.device)
for i, valid_ratio in enumerate(valid_ratios):
# use torch.ceil to replace original math.ceil and if else in mmocr
valid_width = torch.tensor(w * valid_ratio).ceil().long()
# use narrow to replace original [valid_width:] in mmocr
attn_mask[i].narrow(2, valid_width, w + 1 - valid_width)[:] = 1
attn_mask = attn_mask[:, :, :, :w, :]
attn_weight = attn_weight.masked_fill(attn_mask.bool(), float('-inf'))
attn_weight = attn_weight.view(bsz, T, -1)
attn_weight = F.softmax(attn_weight, dim=-1)
attn_weight = attn_weight.view(bsz, T, h, w, c).permute(0, 1, 4, 2,
3).contiguous()
attn_feat = torch.sum(
torch.mul(feat.unsqueeze(1), attn_weight), (3, 4), keepdim=False)
# bsz * (seq_len + 1) * C
# linear transformation
if self.pred_concat:
hf_c = holistic_feat.size(-1)
holistic_feat = holistic_feat.expand(bsz, seq_len, hf_c)
y = self.prediction(torch.cat((y, attn_feat, holistic_feat), 2))
else:
y = self.prediction(attn_feat)
# bsz * (seq_len + 1) * num_classes
y = self.pred_dropout(y)
return y
@FUNCTION_REWRITER.register_rewriter(
func_name='mmocr.models.textrecog.decoders.SequentialSARDecoder'
'._2d_attention',
backend='default')
def sequential_sar_decoder__2d_attention(self,
y_prev,
feat,
holistic_feat,
hx1,
cx1,
hx2,
cx2,
valid_ratios=None):
"""Rewrite `_2d_attention` of SequentialSARDecoder for default backend.
Rewrite this function to:
1. use torch.ceil to replace original math.ceil and if else in mmocr.
2. use narrow to replace original [valid_width:] in mmocr
"""
_, _, h_feat, w_feat = feat.size()
if self.dec_gru:
hx1 = cx1 = self.rnn_decoder_layer1(y_prev, hx1)
hx2 = cx2 = self.rnn_decoder_layer2(hx1, hx2)
else:
# has replaced LSTMCell with LSTM, forward func need rewrite
_, (hx1,
cx1) = self.rnn_decoder_layer1(y_prev.unsqueeze(0), (hx1, cx1))
_, (hx2, cx2) = self.rnn_decoder_layer2(hx1, (hx2, cx2))
tile_hx2 = hx2.view(hx2.size(1), hx2.size(-1), 1, 1)
attn_query = self.conv1x1_1(tile_hx2) # bsz * attn_size * 1 * 1
attn_query = attn_query.expand(-1, -1, h_feat, w_feat)
attn_key = self.conv3x3_1(feat)
attn_weight = torch.tanh(torch.add(attn_key, attn_query, alpha=1))
attn_weight = self.conv1x1_2(attn_weight)
bsz, c, h, w = attn_weight.size()
assert c == 1
if valid_ratios is not None:
# cal mask of attention weight
attn_mask = torch.zeros(bsz, c, h, w + 1).to(attn_weight.device)
for i, valid_ratio in enumerate(valid_ratios):
# use torch.ceil to replace original math.ceil and if else in mmocr
valid_width = torch.tensor(w * valid_ratio).ceil().long()
# use narrow to replace original [valid_width:] in mmocr
attn_mask[i].narrow(2, valid_width, w + 1 - valid_width)[:] = 1
attn_mask = attn_mask[:, :, :, :w]
attn_weight = attn_weight.masked_fill(attn_mask.bool(), float('-inf'))
attn_weight = F.softmax(attn_weight.view(bsz, -1), dim=-1)
attn_weight = attn_weight.view(bsz, c, h, w)
attn_feat = torch.sum(
torch.mul(feat, attn_weight), (2, 3), keepdim=False) # n * c
# linear transformation
if self.pred_concat:
y = self.prediction(torch.cat((hx2[0], attn_feat, holistic_feat), 1))
else:
y = self.prediction(attn_feat)
return y, hx1, hx1, hx2, hx2
@FUNCTION_REWRITER.register_rewriter(
func_name='mmocr.models.textrecog.decoders.SequentialSARDecoder'
'.forward_test',
backend='default')
def sequential_sar_decoder__forward_test(
self,
feat: torch.Tensor,
out_enc: torch.Tensor,
data_samples: Optional[Sequence[TextRecogDataSample]] = None):
"""Rewrite `forward_test` of SequentialSARDecoder for default backend.
Rewrite this function because LSTMCell has been replaced with LSTM. The two
class have different forward functions. The `forward_test` need adapt to
this change.
"""
valid_ratios = None
if data_samples is not None:
valid_ratios = [
data_sample.get('valid_ratio', 1.0) for data_sample in data_samples
] if self.mask else None
outputs = []
start_token = torch.full((feat.size(0), ),
self.start_idx,
device=feat.device,
dtype=torch.long)
start_token = self.embedding(start_token)
for i in range(-1, self.max_seq_len):
if i == -1:
if self.dec_gru:
hx1 = cx1 = self.rnn_decoder_layer1(out_enc)
hx2 = cx2 = self.rnn_decoder_layer2(hx1)
else:
# has replaced LSTMCell with LSTM, forward func need rewrite
_, (hx1, cx1) = self.rnn_decoder_layer1(out_enc.unsqueeze(0))
_, (hx2, cx2) = self.rnn_decoder_layer2(hx1)
y_prev = start_token
else:
y, hx1, cx1, hx2, cx2 = self._2d_attention(
y_prev,
feat,
out_enc,
hx1,
cx1,
hx2,
cx2,
valid_ratios=valid_ratios)
_, max_idx = torch.max(y, dim=1, keepdim=False)
char_embedding = self.embedding(max_idx)
y_prev = char_embedding
outputs.append(y)
outputs = torch.stack(outputs, 1)
return outputs
@MODULE_REWRITER.register_rewrite_module(
'mmocr.models.textrecog.decoders.SequentialSARDecoder', backend='default')
class SequentialSARDecoder(nn.Module):
"""Rewrite Sequential Decoder module in `SAR.
SequentialSARDecoder apply nn.LSTMCell inside, which brings obstacles to
deployment. LSTMCell can be only exported to onnx in cpu device. To make it
exportable to gpu device, LSTM is used to replace LSTMCell.
<https://arxiv.org/abs/1811.00751>`_.
"""
def __init__(self, module, deploy_cfg, **kwargs):
super(SequentialSARDecoder, self).__init__()
def lstmcell2lstm_params(lstm_mod, lstmcell_mod):
lstm_mod.weight_ih_l0 = nn.Parameter(lstmcell_mod.weight_ih)
lstm_mod.weight_hh_l0 = nn.Parameter(lstmcell_mod.weight_hh)
lstm_mod.bias_ih_l0 = nn.Parameter(lstmcell_mod.bias_ih)
lstm_mod.bias_hh_l0 = nn.Parameter(lstmcell_mod.bias_hh)
self._module = module
self.deploy_cfg = deploy_cfg
if not self._module.dec_gru:
rnn_decoder_layer1 = copy.deepcopy(self._module.rnn_decoder_layer1)
rnn_decoder_layer2 = copy.deepcopy(self._module.rnn_decoder_layer2)
self._module.rnn_decoder_layer1 = nn.LSTM(
rnn_decoder_layer1.input_size, rnn_decoder_layer1.hidden_size,
1)
self._module.rnn_decoder_layer2 = nn.LSTM(
rnn_decoder_layer2.input_size, rnn_decoder_layer2.hidden_size,
1)
lstmcell2lstm_params(self._module.rnn_decoder_layer1,
rnn_decoder_layer1)
lstmcell2lstm_params(self._module.rnn_decoder_layer2,
rnn_decoder_layer2)
self._module.train_mode = False
def forward(self,
feat: Optional[torch.Tensor] = None,
out_enc: Optional[torch.Tensor] = None,
data_samples: Optional[Sequence[TextRecogDataSample]] = None):
return self._module.forward_test(feat, out_enc, data_samples)
def predict(
self,
feat: Optional[torch.Tensor] = None,
out_enc: Optional[torch.Tensor] = None,
data_samples: Optional[Sequence[TextRecogDataSample]] = None
) -> Sequence[TextRecogDataSample]:
"""Perform forward propagation of the decoder and postprocessor.
Args:
feat (Tensor, optional): Features from the backbone. Defaults
to None.
out_enc (Tensor, optional): Features from the encoder. Defaults
to None.
data_samples (list[TextRecogDataSample]): A list of N datasamples,
containing meta information and gold annotations for each of
the images. Defaults to None.
Returns:
list[TextRecogDataSample]: A list of N datasamples of prediction
results. Results are stored in ``pred_text``.
"""
out_dec = self(feat, out_enc, data_samples)
return out_dec
|
994,767 | 8f6cf9f95b7bab520045985c6bd260af001fd172 | """
CP1404/CP5632 - Practical
Program gets a password from user and displays the number of characters as asterisks.
"""
MINIMUM_LENGTH = 3
def main():
"""Passes the password through the necessary functions"""
password = get_password(MINIMUM_LENGTH)
convert_to_asterisks(password)
def get_password(MINIMUM_LENGTH):
password = input("Enter a password with at least {} characters: ".format(MINIMUM_LENGTH))
while len(password) < MINIMUM_LENGTH:
print("That password is too short, it must be at least {} characters.".format(MINIMUM_LENGTH))
password = input("Enter a password: ")
return password
def convert_to_asterisks(password):
print('*' * len(password))
main()
|
994,768 | c8a197232ff5fea3cc00c84bde66d670624c0f6e | from fractions import gcd
def int_input():
return int(raw_input())
def list_int_input():
return map(int, raw_input().split())
def get_years():
years = list_int_input()
del years[0]
years = list(set(years))
years.sort(reverse=True)
return years
def get_diff(years):
diff = list()
for i, y in enumerate(years):
if i > 0:
d = years[i-1] - years[i]
if d > 0:
diff.append(d)
diff.sort()
return diff
def get_diff_gcd(diff):
diff_gcd = None
for d in diff:
if diff_gcd is None:
diff_gcd = d
else:
diff_gcd = gcd(diff_gcd, d)
return diff_gcd
def dividable(years, diff_gcd):
for y in years:
if y % diff_gcd != 0:
return False
return True
def main():
for c in range(int_input()):
years = get_years()
diff = get_diff(years)
diff_gcd = get_diff_gcd(diff)
if diff_gcd == 1:
answer = 0
elif dividable(years, diff_gcd):
answer = 0
else:
answer = diff_gcd - (years[0] % diff_gcd)
print 'Case #%d: %d' % (c+1, answer)
main()
|
994,769 | 1502067b1e6a2d394baf7c6b223f13cf56fb4eec | """Intersect a single CSV file with multiple shapefiles."""
import argparse
from concurrent.futures import ProcessPoolExecutor
import os
from random import randint
from time import sleep
from bison.common import BisonFiller
from bison.common import get_logger, get_chunk_files, get_line_count
# .............................................................................
def intersect_csv_and_shapefiles(in_csv_filename, geodata1, geodata2,
ancillary_path, out_csv_filename, from_gbif):
"""Intersect the records in the csv file with the two provided shapefiles.
Args:
csv_filename (str): Path to a CSV file of records.
shapefile_1_filename (str): Path to the first shapefile to check for
intersection.
shapefile_2_filename (str): Path to the second shapefile to check for
intersection.
out_csv_filename (str): Path for output CSV records.
"""
pth, basefname = os.path.split(out_csv_filename)
logbasename, _ = os.path.splitext(basefname)
logfname = os.path.join(pth, '{}.log'.format(logbasename))
logger = get_logger(logbasename, logfname)
bf = BisonFiller(log=logger)
# Pass 4 of CSV transform, final step, point-in-polygon intersection
bf.update_point_in_polygons(
geodata1, geodata2, ancillary_path, in_csv_filename, out_csv_filename,
from_gbif=from_gbif)
# Do intersection here
sleep(randint(0, 10))
print(' - {}'.format(out_csv_filename))
# .............................................................................
def step_parallel(in_csv_filename, terrestrial_data, marine_data, ancillary_path,
out_csv_filename, from_gbif=True):
"""Main method for parallel execution of geo-referencing script"""
csv_filename_pairs, header = get_chunk_files(
in_csv_filename, out_csv_filename=out_csv_filename)
# in_csv_fn, out_csv_fn = csv_filename_pairs[0]
# intersect_csv_and_shapefiles(in_csv_fn, terrestrial_data,
# marine_data, ancillary_path, out_csv_fn, False)
with ProcessPoolExecutor() as executor:
for in_csv_fn, out_csv_fn in csv_filename_pairs:
executor.submit(
intersect_csv_and_shapefiles, in_csv_fn, terrestrial_data,
marine_data, ancillary_path, out_csv_fn, from_gbif)
try:
outf = open(out_csv_filename, 'w', encoding='utf-8')
outf.write('{}'.format(header))
smfile_linecount = 0
for _, small_csv_fn in csv_filename_pairs:
curr_linecount = get_line_count(small_csv_fn) - 1
print('Appending {} records from {}'.format(
curr_linecount, small_csv_fn))
# Do not count header
smfile_linecount += (curr_linecount)
lineno = 0
try:
for line in open(small_csv_fn, 'r', encoding='utf-8'):
# Skip header in each file
if lineno == 0:
pass
else:
outf.write('{}'.format(line))
lineno += 1
except Exception as inner_err:
print('Failed to write {} to merged file; {}'.format(small_csv_fn, inner_err))
except Exception as outer_err:
print('Failed to write to {}; {}'.format(out_csv_filename, outer_err))
finally:
outf.close()
lgfile_linecount = get_line_count(out_csv_filename) - 1
print('Total {} of {} records written to {}'.format(
lgfile_linecount, smfile_linecount, out_csv_filename))
# .............................................................................
def main():
"""Main method for script."""
parser = argparse.ArgumentParser()
parser.add_argument(
'csv_filename', type=str, help='Input record CSV file path.')
parser.add_argument(
'terrestrial_shapefile_path', type=str,
help='Terrestrial shapefile for intersection.')
parser.add_argument(
'marine_shapefile_path', type=str,
help='Marine shapefile for intersection.')
parser.add_argument(
'out_csv_path', type=str,
help='File path for output recordds CSV file.')
args = parser.parse_args()
intersect_csv_and_shapefiles(
args.csv_filename, args.terrestrial_shapefile_path,
args.marine_shapefile_path, args.out_csv_path)
# .............................................................................
if __name__ == '__main__':
main()
|
994,770 | 8a6334ebd06db59dc1da769b9c16ca0723b64c5a | # coding:utf-8
from . import app_seller
from saihan import db
from flask import render_template,request, url_for, current_app, jsonify
from saihan.models import Product, Profile,ProductMedia,Order
from flask_login import current_user, login_required
import os
import datetime
# from saihan.models import ...
@app_seller.route("/items")
@login_required
def seller_items():
profile = Profile.query.filter_by(user_id=current_user.id).first()
products = Product.query.filter_by(seller_id=current_user.id)
prod_stat = {
"SELLING":"出售中",
"SELLED":"已售出"
}
# print(url_for("static", filename="images/products/"+products[0].attachments[0].filename))
return render_template("seller_items.html",
profile=profile,
products=products,
prod_stat=prod_stat)
@app_seller.route("/order",methods=["GET","POST"])
def seller_order():
# name = Product.query.filter_by(seller_id=current_user.id).all()
products = Product.query.filter_by(seller_id=current_user.id).all()
# print(current_app.config["UPLOAD_FOLDER"])
order_list = []
product_list = []
user_list = []
for product in products:
orders = Order.query.filter_by(product_id=product.id).all()
for order in orders:
user_profile = Profile.query.filter_by(user_id=order.buyer_id).first()
user_list.append(user_profile)
order_list.append(order)
product_list.append(product)
stat_dict = {
"PENDING":"未付款",
"PURCHASED":'买家已付款',
"DELIVERED":'卖家已发货',
"COMPLETED":'已完成',
"CANCELLED":"已取消"
}
return render_template("seller_order.html",
products=product_list,
orders=order_list,
users=user_list,
length=len(order_list),
stat_dict=stat_dict)
@app_seller.route("/release", methods=["GET", "POST"])
def seller_release():
if request.method == "GET":
return render_template("seller_release.html")
else:
# 使用POST方法,发送FromData对象,
# 获取商品文字信息,并存储
name = request.form.get('name')
description = request.form.get('description')
price = int(request.form.get('price'))
print(name, description, price)
product = Product(seller_id =current_user.id,name=name,description=description,price=price)
db.session.add(product)
db.session.commit()
# 获取商品图片并存储到saihan/static/images/products/...文件中
upload_folder = current_app.config['UPLOAD_FOLDER']
prod_img = request.files.get('picture')
# 更改文件名,使用时间字符串防止重名
ftime = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
ext = prod_img.filename.split(".")[-1] # 获取文件扩展名
prod_filename = ftime+"."+ext
prod_img.save(os.path.join(upload_folder, "products/"+prod_filename))
picture = ProductMedia(product_id = product.id,filename = prod_filename)
db.session.add(picture)
db.session.commit()
return jsonify({"resultCode":200})
|
994,771 | b890a6e4832dc4203da3e7d49e7204d6f5c35474 | from flask import Blueprint, request # create a blueprint for the routes to be registered to, not necessary but ood for modularization of routes
from back_end.models import create_db_connection, Groups # calling our helper function to create a connection to the databse to execute a request
from botocore.exceptions import ClientError # for exception handling
import random, string, os, boto3, logging, random, string, uuid, io
# used to group a bunch of relted views together
# views in this case can count as code written to create various endpoints
bp = Blueprint('upload', __name__, url_prefix='/api')
@bp.route('/upload_image', methods=["POST"])
def upload():
db_connection = create_db_connection()
if db_connection:
# Need to move the contents from the request.data to the request.files
request.get_data(parse_form_data = True)
# Now can access the files
data = (request.files['file'])
# convert the randomized value into a string
# give it a random name, cause same file names will replace each other
letters = str(uuid.uuid4())
# Need to use Boto3 to use AWS services
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
BUCKET = os.getenv('BUCKET')
# FOLDER = os.getenv('FOLDER')
s3 = boto3.client(
's3',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY
) # specifying amazon resource
object_url = None
if data:
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html
response = s3.upload_fileobj(data, BUCKET, letters, ExtraArgs={ "ContentType": "image/jpeg"}) # (our img, name of aws bucket, and object url name would be awsurl+random letter we generated)), returns true or false
object_url = f"https://{BUCKET}.s3.amazonaws.com/{letters}"
except ClientError as e:
logging.error(e)
result = {'error': 'Image Upload Failed'}
return result, 400
db_connection.close()
return {"message":f"{object_url}"}, 200
else:
result = {'error': 'Connection to Database Failed'}
return result, 400
db_connection.close()
|
994,772 | 755aa5af078f213238e87e1439c67411c9f30e57 | __package_name__ = 'gdal-utils'
gdal_utils_version = (3, 3, 2, 0)
__version__ = '.'.join(str(i) for i in gdal_utils_version)
__author__ = "Frank Warmerdam"
__author_email__ = "warmerdam@pobox.com"
__maintainer__ = "Idan Miara"
__maintainer_email__ = "idan@miara.com"
__description__ = "gdal-utils: An extension library for GDAL - Geospatial Data Abstraction Library"
__license_type__ = "MIT"
__url__ = "http://www.gdal.org"
|
994,773 | 055c74bb3d98ed8155fb7556895b8c2aba135b19 | def sequencia(x):
if x==1 or x==0:
return 1
elif x%2==0:
y=x/2
return y
elif x%2!=0:
y=3*x+1
return y
n=1
s=0
while n<1000:
temp=n
i=0
while temp!=1:
temp=sequencia(temp)
i+=1
if s<i:
s=i
w=n
n+=1
print(w)
|
994,774 | 781235f0bc80e8eb4b0bfe952f70379c48f4ccb2 | import numpy as np
import collections
import cPickle as pickle
import os
import random
from gensim.models import word2vec
def bulid_vocab(dataset, vocab_size):
f1 = open("dataset/%s/train_source.txt" % (dataset, ), "r")
f2 = open("dataset/%s/valid_source.txt" % (dataset, ), "r")
f1_2 = open("dataset/%s/train_source_2.txt" % (dataset, ), "r")
f2_2 = open("dataset/%s/valid_source_2.txt" % (dataset, ), "r")
f3 = open("dataset/%s/train_target.txt" % (dataset, ), "r")
f4 = open("dataset/%s/valid_target.txt" % (dataset, ), "r")
data = f1.read().split()
data += f2.read().split()
data += f1_2.read().split()
data += f2_2.read().split()
data_target = []
for line in f3.readlines():
line = ["<sos>"] + line.split() + ["<eos>"]
data_target += line
for line in f4.readlines():
line = ["<sos>"] + line.split() + ["<eos>"]
data_target += line
data += data_target
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
words = list(words[:vocab_size-1])
id_to_word = ["<unk>"] + words
word_to_id = dict(zip(id_to_word, range(len(id_to_word))))
pickle.dump( id_to_word, open("data/%s/id2word.p" % (dataset, ), "wb") )
pickle.dump( word_to_id, open("data/%s/word2id.p" % (dataset, ), "wb") )
def bulid_field_vocab(dataset):
f1 = open("dataset/%s/train_field.txt" % (dataset, ), "r")
f2 = open("dataset/%s/valid_field.txt" % (dataset, ), "r")
f3 = open("dataset/%s/test_field.txt" % (dataset, ), "r")
data = f1.read().split()
data += f2.read().split()
data += f3.read().split()
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
words = list(words)
id_to_word = words
word_to_id = dict(zip(id_to_word, range(len(id_to_word))))
pickle.dump( id_to_word, open("data/%s/id2field.p" % (dataset, ), "wb") )
pickle.dump( word_to_id, open("data/%s/field2id.p" % (dataset, ), "wb") )
def bulid_pos_vocab(dataset):
f1 = open("dataset/%s/train_pos1.txt" % (dataset, ), "r")
f2 = open("dataset/%s/valid_pos1.txt" % (dataset, ), "r")
f3 = open("dataset/%s/test_pos1.txt" % (dataset, ), "r")
f4 = open("dataset/%s/train_pos2.txt" % (dataset, ), "r")
f5 = open("dataset/%s/valid_pos2.txt" % (dataset, ), "r")
f6 = open("dataset/%s/test_pos2.txt" % (dataset, ), "r")
data = f1.read().split()
data += f2.read().split()
data += f3.read().split()
data += f4.read().split()
data += f5.read().split()
data += f6.read().split()
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
words = list(words)
id_to_word = words
word_to_id = dict(zip(id_to_word, range(len(id_to_word))))
pickle.dump( id_to_word, open("data/%s/id2pos.p" % (dataset, ), "wb") )
pickle.dump( word_to_id, open("data/%s/pos2id.p" % (dataset, ), "wb") )
def get_embedding(dataset, vocab_size, embedding_size):
if os.path.exists("data/%s/embedding_%d_%d.p" % (dataset, vocab_size, embedding_size)) == False:
f1 = open("dataset/%s/train_source.txt" % (dataset, ), "r")
f1_2 = open("dataset/%s/train_source_2.txt" % (dataset, ), "r")
f2 = open("dataset/%s/train_target.txt" % (dataset, ), "r")
f3 = open("dataset/%s/valid_source.txt" % (dataset, ), "r")
f3_2 = open("dataset/%s/valid_source_2.txt" % (dataset, ), "r")
f4 = open("dataset/%s/valid_target.txt" % (dataset, ), "r")
data = []
data.extend([line.split() for line in f1.readlines()])
data.extend([line.split() for line in f1_2.readlines()])
data.extend([["<sos>"] + line.split() + ["<eos>"] for line in f2.readlines()])
data.extend([line.split() for line in f3.readlines()])
data.extend([line.split() for line in f3_2.readlines()])
data.extend([["<sos>"] + line.split() + ["<eos>"] for line in f4.readlines()])
model = word2vec.Word2Vec(data, size=embedding_size, workers=16, min_count=1, window=8, iter=20)
id2word = pickle.load( open("data/%s/id2word.p" % (dataset, ), "rb") )
embedding = np.random.normal(0, 1, (len(id2word), embedding_size))
for i in range(1, len(id2word)):
embedding[i] = model[id2word[i]]
pickle.dump( embedding, open("data/%s/embedding_%d_%d.p" % (dataset, vocab_size, embedding_size), "wb") )
else:
embedding = pickle.load( open("data/%s/embedding_%d_%d.p" % (dataset, vocab_size, embedding_size), "rb") )
return embedding
def get_train_and_valid(dataset):
if os.path.exists("data/%s/train_X.p" % (dataset, )) == False:
word2id = pickle.load( open("data/%s/word2id.p" % (dataset, ), "rb") )
unk_id = word2id["<unk>"]
f_train_X = open("dataset/%s/train_source.txt" % (dataset, ), "r")
f_train_Y = open("dataset/%s/train_target.txt" % (dataset, ), "r")
f_valid_X = open("dataset/%s/valid_source.txt" % (dataset, ), "r")
f_valid_Y = open("dataset/%s/valid_target.txt" % (dataset, ), "r")
train_lines_X = f_train_X.readlines()
train_lines_Y = []
for line in f_train_Y.readlines():
line = line.strip()
line = "<sos> " + line + " <eos>"
train_lines_Y.append(line)
valid_lines_X = f_valid_X.readlines()
valid_lines_Y = []
for line in f_valid_Y.readlines():
line = line.strip()
line = "<sos> " + line + " <eos>"
valid_lines_Y.append(line)
train_X = map(lambda x: map(lambda y: word2id.get(y, unk_id), x.split()), train_lines_X)
train_Y = map(lambda x: map(lambda y: word2id.get(y, unk_id), x.split()), train_lines_Y)
valid_X = map(lambda x: map(lambda y: word2id.get(y, unk_id), x.split()), valid_lines_X)
valid_Y = map(lambda x: map(lambda y: word2id.get(y, unk_id), x.split()), valid_lines_Y)
pickle.dump( train_X, open("data/%s/train_X.p" % (dataset, ), "wb") )
pickle.dump( train_Y, open("data/%s/train_Y.p" % (dataset, ), "wb") )
pickle.dump( valid_X, open("data/%s/valid_X.p" % (dataset, ), "wb") )
pickle.dump( valid_Y, open("data/%s/valid_Y.p" % (dataset, ), "wb") )
else:
train_X = pickle.load( open("data/%s/train_X.p" % (dataset, ), "rb") )
train_Y = pickle.load( open("data/%s/train_Y.p" % (dataset, ), "rb") )
valid_X = pickle.load( open("data/%s/valid_X.p" % (dataset, ), "rb") )
valid_Y = pickle.load( open("data/%s/valid_Y.p" % (dataset, ), "rb") )
return train_X, train_Y, valid_X, valid_Y
def get_train_and_valid_2(dataset):
if os.path.exists("data/%s/train_X_2.p" % (dataset, )) == False:
word2id = pickle.load( open("data/%s/word2id.p" % (dataset, ), "rb") )
unk_id = word2id["<unk>"]
f_train_X = open("dataset/%s/train_source_2.txt" % (dataset, ), "r")
f_valid_X = open("dataset/%s/valid_source_2.txt" % (dataset, ), "r")
train_lines_X = f_train_X.readlines()
valid_lines_X = f_valid_X.readlines()
train_X = map(lambda x: map(lambda y: word2id.get(y, unk_id), x.split()), train_lines_X)
valid_X = map(lambda x: map(lambda y: word2id.get(y, unk_id), x.split()), valid_lines_X)
pickle.dump( train_X, open("data/%s/train_X_2.p" % (dataset, ), "wb") )
pickle.dump( valid_X, open("data/%s/valid_X_2.p" % (dataset, ), "wb") )
else:
train_X = pickle.load( open("data/%s/train_X_2.p" % (dataset, ), "rb") )
valid_X = pickle.load( open("data/%s/valid_X_2.p" % (dataset, ), "rb") )
return train_X, valid_X
def get_field_train_and_valid(dataset):
if os.path.exists("data/%s/train_X_f.p" % (dataset, )) == False:
field2id = pickle.load( open("data/%s/field2id.p" % (dataset, ), "rb") )
f_train_X_f = open("dataset/%s/train_field.txt" % (dataset, ), "r")
f_valid_X_f = open("dataset/%s/valid_field.txt" % (dataset, ), "r")
train_lines_X_f = f_train_X_f.readlines()
valid_lines_X_f = f_valid_X_f.readlines()
train_X_f = map(lambda x: map(lambda y: field2id[y], x.split()), train_lines_X_f)
valid_X_f = map(lambda x: map(lambda y: field2id[y], x.split()), valid_lines_X_f)
pickle.dump( train_X_f, open("data/%s/train_X_f.p" % (dataset, ), "wb") )
pickle.dump( valid_X_f, open("data/%s/valid_X_f.p" % (dataset, ), "wb") )
else:
train_X_f = pickle.load( open("data/%s/train_X_f.p" % (dataset, ), "rb") )
valid_X_f = pickle.load( open("data/%s/valid_X_f.p" % (dataset, ), "rb") )
return train_X_f, valid_X_f
def get_pos_train_and_valid(dataset):
if os.path.exists("data/%s/train_X_pos1.p" % (dataset, )) == False:
pos2id = pickle.load( open("data/%s/pos2id.p" % (dataset, ), "rb") )
f_train_X_pos1 = open("dataset/%s/train_pos1.txt" % (dataset, ), "r")
f_valid_X_pos1 = open("dataset/%s/valid_pos1.txt" % (dataset, ), "r")
f_train_X_pos2 = open("dataset/%s/train_pos2.txt" % (dataset, ), "r")
f_valid_X_pos2 = open("dataset/%s/valid_pos2.txt" % (dataset, ), "r")
train_lines_X_pos1 = f_train_X_pos1.readlines()
valid_lines_X_pos1 = f_valid_X_pos1.readlines()
train_lines_X_pos2 = f_train_X_pos2.readlines()
valid_lines_X_pos2 = f_valid_X_pos2.readlines()
train_X_pos1 = map(lambda x: map(lambda y: pos2id[y], x.split()), train_lines_X_pos1)
valid_X_pos1 = map(lambda x: map(lambda y: pos2id[y], x.split()), valid_lines_X_pos1)
train_X_pos2 = map(lambda x: map(lambda y: pos2id[y], x.split()), train_lines_X_pos2)
valid_X_pos2 = map(lambda x: map(lambda y: pos2id[y], x.split()), valid_lines_X_pos2)
pickle.dump( train_X_pos1, open("data/%s/train_X_pos1.p" % (dataset, ), "wb") )
pickle.dump( valid_X_pos1, open("data/%s/valid_X_pos1.p" % (dataset, ), "wb") )
pickle.dump( train_X_pos2, open("data/%s/train_X_pos2.p" % (dataset, ), "wb") )
pickle.dump( valid_X_pos2, open("data/%s/valid_X_pos2.p" % (dataset, ), "wb") )
else:
train_X_pos1 = pickle.load( open("data/%s/train_X_pos1.p" % (dataset, ), "rb") )
valid_X_pos1 = pickle.load( open("data/%s/valid_X_pos1.p" % (dataset, ), "rb") )
train_X_pos2 = pickle.load( open("data/%s/train_X_pos2.p" % (dataset, ), "rb") )
valid_X_pos2 = pickle.load( open("data/%s/valid_X_pos2.p" % (dataset, ), "rb") )
return train_X_pos1, valid_X_pos1, train_X_pos2, valid_X_pos2
def get_test(dataset):
if os.path.exists("data/%s/test_X.p" % (dataset, )) == False:
word2id = pickle.load( open("data/%s/word2id.p" % (dataset, ), "rb") )
unk_id = word2id["<unk>"]
f_test_X = open("dataset/%s/test_source.txt" % (dataset, ), "r")
f_test_Y = open("dataset/%s/test_target.txt" % (dataset, ), "r")
test_lines_X = f_test_X.readlines()
test_lines_Y = []
for line in f_test_Y.readlines():
line = line.strip()
line = "<sos> " + line + " <eos>"
test_lines_Y.append(line)
test_X = map(lambda x: map(lambda y: word2id.get(y, unk_id), x.split()), test_lines_X)
test_Y = map(lambda x: map(lambda y: word2id.get(y, unk_id), x.split()), test_lines_Y)
pickle.dump( test_X, open("data/%s/test_X.p" % (dataset, ), "wb") )
pickle.dump( test_Y, open("data/%s/test_Y.p" % (dataset, ), "wb") )
else:
test_X = pickle.load( open("data/%s/test_X.p" % (dataset, ), "rb") )
test_Y = pickle.load( open("data/%s/test_Y.p" % (dataset, ), "rb") )
return test_X, test_Y
def get_test_2(dataset):
if os.path.exists("data/%s/test_X_2.p" % (dataset, )) == False:
word2id = pickle.load( open("data/%s/word2id.p" % (dataset, ), "rb") )
unk_id = word2id["<unk>"]
f_test_X = open("dataset/%s/test_source_2.txt" % (dataset, ), "r")
test_lines_X = f_test_X.readlines()
test_X = map(lambda x: map(lambda y: word2id.get(y, unk_id), x.split()), test_lines_X)
pickle.dump( test_X, open("data/%s/test_X_2.p" % (dataset, ), "wb") )
else:
test_X = pickle.load( open("data/%s/test_X_2.p" % (dataset, ), "rb") )
return test_X
def get_field_test(dataset):
if os.path.exists("data/%s/test_X_f.p" % (dataset, )) == False:
field2id = pickle.load( open("data/%s/field2id.p" % (dataset, ), "rb") )
f_test_X_f = open("dataset/%s/test_field.txt" % (dataset, ), "r")
test_lines_X_f = f_test_X_f.readlines()
test_X_f = map(lambda x: map(lambda y: field2id[y], x.split()), test_lines_X_f)
pickle.dump( test_X_f, open("data/%s/test_X_f.p" % (dataset, ), "wb") )
else:
test_X_f = pickle.load( open("data/%s/test_X_f.p" % (dataset, ), "rb") )
return test_X_f
def get_pos_test(dataset):
if os.path.exists("data/%s/test_X_pos1.p" % (dataset, )) == False:
pos2id = pickle.load( open("data/%s/pos2id.p" % (dataset, ), "rb") )
f_test_X_pos1 = open("dataset/%s/test_pos1.txt" % (dataset, ), "r")
f_test_X_pos2 = open("dataset/%s/test_pos2.txt" % (dataset, ), "r")
test_lines_X_pos1 = f_test_X_pos1.readlines()
test_lines_X_pos2 = f_test_X_pos2.readlines()
test_X_pos1 = map(lambda x: map(lambda y: pos2id[y], x.split()), test_lines_X_pos1)
test_X_pos2 = map(lambda x: map(lambda y: pos2id[y], x.split()), test_lines_X_pos2)
pickle.dump( test_X_pos1, open("data/%s/test_X_pos1.p" % (dataset, ), "wb") )
pickle.dump( test_X_pos2, open("data/%s/test_X_pos2.p" % (dataset, ), "wb") )
else:
test_X_pos1 = pickle.load( open("data/%s/test_X_pos1.p" % (dataset, ), "rb") )
test_X_pos2 = pickle.load( open("data/%s/test_X_pos2.p" % (dataset, ), "rb") )
return test_X_pos1, test_X_pos2
def shuffle_list(a, b, c, d, e, f):
"""
shuffle a, b, c, d, e, f simultaneously
"""
z = list(zip(a, b, c, d, e, f))
random.shuffle(z)
a, b, c, d, e, f = zip(*z)
return a, b, c, d, e, f
def padding(X):
max_len = 0
for x in X:
if len(x) > max_len:
max_len = len(x)
padded_X = np.ones((len(X), max_len), dtype=np.int32) * 0
len_ = 0
X_len = []
for i in range(len(X)):
len_ += len(X[i])
X_len.append(len(X[i]))
for j in range(len(X[i])):
padded_X[i, j] = X[i][j]
return padded_X, X_len, len_
def data_iterator(X, X_2, X_f, X_pos1, X_pos2, Y, batch_size, shuffle=True):
if shuffle == True:
X, X_2, X_f, X_pos1, X_pos2, Y = shuffle_list(X, X_2, X_f, X_pos1, X_pos2, Y)
Y_ipt = [y[:-1] for y in Y]
Y_tgt = [y[1:] for y in Y]
data_len = len(X)
batch_len = data_len / batch_size
for i in range(batch_len):
batch_X = X[i*batch_size:(i+1)*batch_size]
batch_X_2 = X_2[i*batch_size:(i+1)*batch_size]
batch_X_f = X_f[i*batch_size:(i+1)*batch_size]
batch_X_pos1 = X_pos1[i*batch_size:(i+1)*batch_size]
batch_X_pos2 = X_pos2[i*batch_size:(i+1)*batch_size]
batch_Y_ipt = Y_ipt[i*batch_size:(i+1)*batch_size]
batch_Y_tgt = Y_tgt[i*batch_size:(i+1)*batch_size]
padded_X, X_len, _ = padding(batch_X)
padded_X_2, X_2_len, _ = padding(batch_X_2)
padded_X_f, _, _ = padding(batch_X_f)
padded_X_pos1, _, _ = padding(batch_X_pos1)
padded_X_pos2, _, _ = padding(batch_X_pos2)
padded_Y_ipt, Y_ipt_len, _ = padding(batch_Y_ipt)
padded_Y_tgt, _, total_len_Y_tgt = padding(batch_Y_tgt)
yield np.transpose(padded_X), np.array(X_len, dtype=np.int32), \
np.transpose(padded_X_2), np.array(X_2_len, dtype=np.int32), \
np.transpose(padded_X_f), np.transpose(padded_X_pos1), np.transpose(padded_X_pos2), \
np.transpose(padded_Y_ipt), np.array(Y_ipt_len, dtype=np.int32), \
np.transpose(padded_Y_tgt), total_len_Y_tgt
if batch_len*batch_size != data_len:
batch_X = X[batch_len*batch_size:]
batch_X_2 = X_2[batch_len*batch_size:]
batch_X_f = X_f[batch_len*batch_size:]
batch_X_pos1 = X_pos1[batch_len*batch_size:]
batch_X_pos2 = X_pos2[batch_len*batch_size:]
batch_Y_ipt = Y_ipt[batch_len*batch_size:]
batch_Y_tgt = Y_tgt[batch_len*batch_size:]
padded_X, X_len, _ = padding(batch_X)
padded_X_2, X_2_len, _ = padding(batch_X_2)
padded_X_f, _, _ = padding(batch_X_f)
padded_X_pos1, _, _ = padding(batch_X_pos1)
padded_X_pos2, _, _ = padding(batch_X_pos2)
padded_Y_ipt, Y_ipt_len, _ = padding(batch_Y_ipt)
padded_Y_tgt, _, total_len_Y_tgt = padding(batch_Y_tgt)
yield np.transpose(padded_X), np.array(X_len, dtype=np.int32), \
np.transpose(padded_X_2), np.array(X_2_len, dtype=np.int32), \
np.transpose(padded_X_f), np.transpose(padded_X_pos1), np.transpose(padded_X_pos2), \
np.transpose(padded_Y_ipt), np.array(Y_ipt_len, dtype=np.int32), \
np.transpose(padded_Y_tgt), total_len_Y_tgt
|
994,775 | 9931cbef977d1dd7e725b1d2e9bb72a075c40692 | #!/usr/bin/env python
# encoding: utf-8
# for stderr
# from TreeBuilder import show_tree
# from english_parser import result, comment, condition, root
import sys
try:
import readline
except:
print('readline not available')
py2 = sys.version < '3'
py3 = sys.version >= '3'
import tokenize
import english_tokens
import re
import token as _token
import collections # py3
import context
import extensions
from exception import *
from extension_functions import is_string
# import nodes
from nodes import Argument, Variable, Compare, FunctionCall, FunctionDef
# from nodes import *
import context as the
from context import * # NOO! 2 different!
# Beware of decorator classes. They don't work on methods unless you manually reinvent the logic of instancemethod descriptors.
class Starttokens(object):
def __init__(self, starttokens):
if not isinstance(starttokens, list):
starttokens = [starttokens]
self.starttokens = starttokens
def __call__(self, original_func):
decorator_self = self
if context.starttokens_done:
return original_func
for t in self.starttokens:
if t in the.token_map:
verbose("ALREADY MAPPED \"%s\" to %s, now %s" % (t, the.token_map[t], original_func))
the.token_map[t] = original_func
return original_func
# def wrappee( *args, **kwargs):
# print 'in decorator with',decorator_self.flag
# original_func(*args,**kwargs)
# return wrappee
# def starttokens(keywords,fun):
# for t in keywords:
# token_map[t]=fun
# return fun
#
# class NotMatching(StandardError):
# pass
#
# class StandardError(Exception):
# pass
#
#
# class Error(Exception):
# pass
#
#
# class MethodMissingError(StandardError):
# pass
#
#
# class InternalError(StandardError):
# pass
#
#
# class NotMatching(StandardError):
# pass
#
#
# class UnknownCommandError(StandardError):
# pass
#
#
# class SecurityError(StandardError):
# pass
#
#
# # NotPassing = Class.new StandardError
# class NotPassing(StandardError):
# pass
#
#
# class NoResult(NotMatching):
# pass
#
#
# class EndOfDocument(StandardError):
# pass
#
#
# class EndOfLine(NotMatching):
# pass
#
#
# class EndOfStatement(EndOfLine):
# pass
#
#
# class MaxRecursionReached(StandardError):
# pass
#
#
# class EndOfBlock(NotMatching):
# pass
#
#
# class GivingUp(StandardError):
# pass
#
#
# class MustNotMatchKeyword(NotMatching):
# pass
#
#
# class KeywordNotExpected(NotMatching):
# pass
#
#
# class UndefinedRubyMethod(NotMatching):
# pass
#
#
# class WrongType(StandardError):
# pass
#
#
# class ImmutableVaribale(StandardError):
# pass
#
#
# class SystemStackError(StandardError):
# pass
def app_path():
return "./"
def dictionary_path():
app_path() + "word-lists/"
def isnumeric(start):
return isinstance(start, int) or isinstance(start, float) # or isinstance(start, long)
# def current_context():
# context: tree / per node
# def javascript:
# maybe(script_block)
# __(current_context)=='javascript' ? 'script' : 'java script', 'javascript', 'js'
# no_rollback() 10
# javascript+=rest_of_line+';'
# newline22
# return javascript
# #if not javascript: block and done
#
# _try=maybe
def star(lamb, giveUp=False):
if (depth > max_depth): raise SystemStackError("if(len(nodes)>max_depth)")
good = []
old = current_token
old_state = current_value # ?
try:
while not checkEndOfLine(): # many statements, so ';' is ok! but: MULTILINE!?!
match = lamb() # yield # <------!!!!!!!!!!!!!!!!!!!
if not match: break
old = current_token
good.append(match)
if (the.token == ')'): break
max = 20 # no list of >100 ints !?! WOW exclude lists!! TODO OOO!
if len(good) > max:
raise Exception(" too many occurrences of " + to_source(lamb))
except GivingUp as e:
if giveUp:
raise
verbose("GivingUp ok in star") # ok in star!
set_token(old)
return good
except NotMatching as e:
set_token(old)
if very_verbose and not good:
verbose("NotMatching star " + str(e))
# if verbose: print_pointer()
except EndOfDocument as e:
verbose("EndOfDocument") # ok in star!
except IgnoreException as e:
error(e)
error("error in star " + to_source(lamb))
if len(good) == 1: return good[0]
if good: return good
# else: restore!
set_token(old)
# invalidate_obsolete(old_nodes)
return old_state
def ignore_rest_of_line():
while not checkEndOfLine():
next_token()
def pointer_string():
if not the.current_token:
offset = len(the.current_line)
l = 3
else:
offset = the.current_offset
l = the.current_token[3][1] - offset
lineNo = the.current_token[2][0]
filep = ' File "' + the.current_file + '", line ' + str(lineNo) + "\n" if the.current_file != "(String)" else ""
return the.current_line[offset:] + "\n" + the.current_line + "\n" + " " * (offset) + "^" * l + "\n" + filep
def print_pointer(force=False):
if the.current_token and (force or the._verbose):
print(the.current_token) # , file=sys.stderr)
print(pointer_string()) # , file=sys.stderr)
# print(the.current_token, file=sys.stderr)
# print(pointer_string(), file=sys.stderr)
return OK
def error(e, force=False):
if isinstance(e, GivingUp): raise e # hand through!
if isinstance(e, NotMatching): raise e
if is_string(e): print(e)
if isinstance(e, Exception):
# print(e.str(clazz )+" "+e.str(message))
# print(clean_backtrace e.backtrace)
# print(e.str( class )+" "+e.str(message))
print_pointer()
# if context.use_tree:
# import TreeBuilder
# TreeBuilder.show_tree()
if not context._verbose:
raise e
def warn(e):
print(e)
def caller():
import inspect
curframe = inspect.currentframe()
try:
return inspect.getouterframes(curframe, 2)
except:
pass
calframe = curframe.f_back
if calframe.f_back:
calframe = curframe.f_back
return calframe
def verbose(info):
if context._verbose:
print(info)
def debug(info='debug'):
if context._debug:
print(info)
def info(info):
if the._verbose:
print(info)
def to_source(block):
return str(block)
def filter_backtrace(e):
return e
def tokens(tokenz):
raiseEnd()
ok = maybe_tokens(tokenz)
if (ok): return ok
raise NotMatching(result)
# so much cheaper!!! -> copy to ruby
# TODO: COLLECT ALL
def maybe_tokens(tokens0):
# tokens = flatten(tokens0)
for t in tokens0:
if t == the.token or t.lower() == the.token.lower():
next_token()
return t
if " " in t: # EXPENSIVE
old = the.current_token
for to in t.split(" "):
if to != the.token:
t = None
break
else:
next_token()
if not t:
set_token(old)
continue
return t
return False
def __(x):
return tokens(x)
# shortcut: method missing (and maybe(}?)
# def maybe_tokens(*x):
# # DANGER!! Obviously very different semantics from maybe(tokens}!!
# # remove_tokens x # shortcut
# return maybe(tokens, x)
# class Parser(object): # <MethodInterception:
# import
# attr_accessor :lines, :verbose, :original_string
# def __init__():
def next_token(check=True):
# if check: check_comment()
the.token_number = the.token_number + 1
if (the.token_number >= len(the.tokenstream)):
if not check: return EndOfDocument()
raise EndOfDocument()
token = the.tokenstream[the.token_number]
the.previous_word = the.token
return set_token(token)
def set_token(token):
global current_token, current_type, current_word, current_line, token_number
the.current_token = current_token = token
the.current_type = current_type = token[0]
the.token = the.current_word = current_word = token[1]
the.line_number, the.current_offset = token[2]
end_pointer = token[3]
the.current_line = current_line = token[4]
the.token_number = token_number = token[5]
the.string = current_word # hack, kinda
return token[1]
# TODO: we need a tokenizer which is agnostic to Python !
# SEE test_variable_scope
# end""") # IndentationError: unindent does not match any outer indentation level TOKENIZER WTF
def parse_tokens(s):
import tokenize
from io import BytesIO
the.tokenstream = []
def token_eater(token_type, token_str, start_row_col, end_row_col, line):
if py3 and token_type == tokenize.ENCODING:
return
# if token_type != tokenize.COMMENT \
# and not line.startswith("#") and not line.startswith("//"):
the.tokenstream.append((token_type, token_str, start_row_col, end_row_col, line, len(the.tokenstream)))
if end_row_col[1]==len(line) and token_type!=_token.ENDMARKER and token_type!=_token.NEWLINE: # Hack, where did _token.NEWLINE go???
the.tokenstream.append((_token.NEWLINE, '\n', end_row_col, end_row_col, line, len(the.tokenstream)))
s = s.replace("⦠", "")
global done
if py2:
_lines = s.decode('utf-8').split('\n')
else:
_lines = s.split('\n') # AH, LOST NEWLINE!
global i
i = -1
def readlines():
global i
i += 1
while i < len(_lines) and (_lines[i].startswith("#") or _lines[i].startswith("//") or not _lines[i]):
i += 1 # remove comments early! BAD: /*////*/ !! DANGER: DEDENT?
if i < len(_lines):
if py2:
return _lines[i]
else:
line = _lines[i]
return str.encode(line) # py3 wants bytes wtf
else:
return b''
if py2:
tokenize.tokenize(readlines, token_eater) # tokenize the string
else:
all=[]
# for line in readlines(): all+=[token_eater(*t) for t in tokenize.tokenize(line)]
all+=[token_eater(*t) for t in tokenize.tokenize(readlines)]
# else: map(token_eater,tokenize.tokenize(readline))
return the.tokenstream
def x_comment(token):
drop = True # False # keep comments?
if drop:
the.tokenstream.remove(token)
else:
token[0] = tokenize.COMMENT # TypeError: 'tuple' object does not support item assignment
# the.tokenstream[i]=(token[0],token[1],token[2],token[3],token[4],i) #renumber!!
# '#' DONE BY TOKENIZER! (54, '\n', (1, 20), (1, 21), '#!/usr/bin/env angle\n', 0)
# rest done here: // -- /*
def drop_comments():
in_comment_block = False
in_comment_line = False
i = 0
prev = ""
for token in extensions.xlist(the.tokenstream):
is_beginning_of_line = token[2][1] == 0 # 1??
# line = token[4]
str = token[1]
token_type = token[0]
if str == "//" or str == "#":
x_comment(token)
in_comment_line = True
elif prev == "*" and str.endswith("/"):
x_comment(token)
in_comment_block = False
elif in_comment_block or in_comment_line:
x_comment(token)
elif prev == "/" and str.startswith("*"):
i = i - 1 # drop prev_token too!!
x_comment(prev_token) # '/' too ;)
x_comment(token)
in_comment_block = True
else:
if str == '\n': # keep !
in_comment_line = False
# token[-1] =i #renumber!! 'tuple' object does not support item assignment
the.tokenstream[i] = (token[0], token[1], token[2], token[3], token[4], i) # renumber!!
i = i + 1
prev = str
prev_token = token
def init(strings):
# global is ok within one file but do not use it across different files
global no_rollback_depth, rollback_depths, line_number, original_string, root, lines, depth, left, right, comp
if not the.moduleMethods:
load_module_methods()
the.no_rollback_depth = -1
the.rollback_depths = []
the.line_number = 0
if isinstance(strings, list):
the.lines = strings
if (strings[0].endswith("\n")):
parse_tokens("".join(strings))
else:
parse_tokens("\n".join(strings))
if is_string(strings):
the.lines = strings.split("\n")
parse_tokens(strings)
drop_comments()
the.tokens_len = len(the.tokenstream)
the.token_number = -1
next_token()
the.string = the.lines[0].strip() # Postpone angel.problem
the.original_string = the.string
the.root = None
the.nodes = []
the.depth = 0
left = right = comp = None
for nr in english_tokens.numbers:
the.token_map[nr] = number
def error_position():
pass
def raiseEnd():
if current_type == _token.ENDMARKER:
raise EndOfDocument()
if (the.token_number >= len(the.tokenstream)):
raise EndOfDocument()
# if not the.string or len(the.string)==0:
# if line_number >= len(lines): raise EndOfDocument()
# #the.string=lines[++line_number];
# raise EndOfLine()
def remove_tokens(*tokenz):
while (the.token in tokenz):
next_token()
# for t in flatten(tokenz):
# the.string = the.string.replace(r' *%s *' % t, " ")
def must_contain(args, do_raise=True): # before ;\n
if isinstance(args[-1], dict):
return must_contain_before(args[0:-2], args[-1]['before']) # BAD style!!
if is_string(args): args = [args]
old = current_token
pre = the.previous_word
while not (checkEndOfLine()):
for x in args:
if current_word == x:
set_token(old)
return x
next_token()
if do_raise and (current_word == ';' or current_word == '\n'):
break
set_token(old)
the.previous_word = pre
if do_raise:
raise NotMatching("must_contain " + str(args))
return False
def must_contain_before(args, before): # ,before():None
old = current_token
good = None
while not (checkEndOfLine() or current_word in before and not current_word in args):
if current_word in args:
good = current_word
break
next_token()
set_token(old)
if not good: raise NotMatching
return good
def must_contain_before_old(before, *args): # ,before():None
raiseEnd()
good = False
if before and is_string(before): before = [before]
if before: before = flatten(before) + [';']
args = flatten(args)
for x in flatten(args):
if re.search(r'^\s*\w+\s*$', x):
good = good or re.search(r'[^\w]%s[^\w]' % x, the.string)
if (type(good).__name__ == "SRE_Match"):
good = good.start()
if good and before and good.pre_match in before and before.index(good.pre_match):
good = None
else: # token
good = good or re.search(escape_token(x), the.string)
if (type(good).__name__ == "SRE_Match"):
good = good.start()
sub = the.string[0:good]
if good and before and sub in before and before.index(sub):
good = None
if good: break
if not good: raise NotMatching
for nl in english_tokens.newline_tokens:
if nl in str(good): raise NotMatching # ;while
# if nl in str(good.pre_match): raise (NotMatching(x)) # ;while
return OK
def starts_with_(param):
return maybe(lambda: starts_with(param))
# ~ look_ahead 0
def starts_with(tokenz):
if checkEndOfLine(): return False
if is_string(tokenz):
return tokenz == the.token
if the.token in tokenz:
return the.token
# for t in tokenz:
# if t == the.current_word:
# return t
return False
# NOT starts_with!!!
# expect_next token(s)
def look_1_ahead(expect_next, doraise=False, must_not_be=False, offset=1):
if the.token == '': return False
if the.token_number + 1 >= the.tokens_len:
print("BUG: this should not happen")
return False
token = the.tokenstream[the.token_number + offset]
if expect_next == token[1]:
return True
elif isinstance(expect_next, list) and token[1] in expect_next:
return True
else:
if must_not_be:
return OK # NOT FOUND
if doraise:
raise NotMatching(doraise)
return False
def _(x):
return token(x)
def lastmaybe(stack):
for s in stack:
if re.search("try", s):
return s
def caller_name():
return caller()
# remove the border, if above border
def adjust_interpret():
depth = caller_depth()
if (context.interpret_border > depth - 2):
context.interpret = context.did_interpret
context.interpret_border = -1 # remove the border
do_interpret()
def do_interpret():
if context.use_tree: return
if (context.did_interpret != context.interpret):
context.did_interpret = context.interpret
context.interpret = True
def dont_interpret():
depth = caller_depth()
if context.interpret_border < 0:
context.interpret_border = depth
context.did_interpret = context.interpret
context.interpret = False
def interpreting():
if context.use_tree: return False
return context.interpret
def check_rollback_allowed():
c = caller_depth
throwing = True # []
level = 0
return c < no_rollback_depth or c > no_rollback_depth + 2
def read_source(x):
if last_pattern or not x: return last_pattern
# proc=block.to_source(:strip()_enclosure => True) except "Sourcify::MultipleMatchingProcsPerLineError"
res = x.source_location[0] + ":" + x.source_location[1].to_s + "\n"
lines = IO.readlines(x.source_location[0])
i = x.source_location[1] - 1
while True:
res += lines[i]
if i >= len(lines) or lines[i].match("}") or lines[i].match("end"): break
i = i + 1
return res
def caller_depth():
# c= depth #if angel.use_tree doesn't speed up:
# if angel.use_tree: c= depth
try:
c = caller().f_code.co_stacksize - 7
except:
c = len(caller())
if c > max_depth:
raise SystemStackError("depth overflow")
return c
# filter_stack(caller).count #-1
def no_rollback():
depth = caller_depth() - 1
the.no_rollback_depth = depth
the.rollback_depths.append(depth)
def adjust_rollback(depth=-1):
try:
if depth == -1: depth = caller_depth()
if depth <= the.no_rollback_depth:
allow_rollback(1) # 1 extra depth for this method!
except Exception as e:
error(e)
except Error as e:
error(e)
def allow_rollback(n=0):
if n < 0: the.rollback_depths = []
depth = caller_depth() - 1 - n
if len(the.rollback_depths) > 0:
the.no_rollback_depth = the.rollback_depths[-1]
while the.rollback_depths[-1] >= depth:
the.no_rollback_depth = the.rollback_depths.pop()
if len(the.rollback_depths) == 0:
if the.no_rollback_depth >= depth:
the.no_rollback_depth = -1
break
else:
the.no_rollback_depth = -1
# todo ? trial and error -> evidence based 'parsing' ?
def invalidate_obsolete(old_nodes):
# DANGER RETURNING false as VALUE!! use RAISE ONLY todo
# (nodes - old_nodes).each(lambda n: n.invalid())
for old in old_nodes:
if old in nodes:
nodes.remove(old)
for n in nodes:
n.invalid()
n.destroy()
# start_block INCLUDED!! (optional !?!)
def beginning_of_line():
# if previous_word
if the.token_number > 1:
previous_offset = the.tokenstream[the.token_number - 1][2][1]
if previous_offset > the.current_offset:
return True
return the.current_type == _token.INDENT or the.current_offset == 0
def block(multiple=False): # type):
global last_result, original_string
from english_parser import statement, end_of_statement, end_block
maybe_newline() or not "=>" in the.current_line and maybe_tokens(
english_tokens.start_block_words) # NEWLINE ALONE / OPTIONAL!!!???
start = pointer()
# maybe(comment_block)
statement0 = statement(False)
statements = [statement0] if statement0 else []
# content = pointer() - start
end_of_block = maybe(end_block) # ___ done_words
while (multiple or not end_of_block) and not checkEndOfFile():
end_of_statement() # danger, might act as block end!
no_rollback() # if ...
if multiple: maybe_newline()
# star(end_of_statement)
def lamb():
try:
# print_pointer(True)
maybe_indent()
s = statement()
statements.append(s)
except NotMatching as e:
if starts_with(english_tokens.done_words) or checkNewline():
return False # ALL GOOD
print("Giving up block")
print_pointer(True)
raise Exception(str(e) + "\nGiving up block\n" + pointer_string())
# content = pointer() - start
return end_of_statement()
star(lamb, giveUp=True)
# maybe(end_of_statement)
end_of_block = end_block()
if not multiple: break
the.last_result = the.result
if interpreting(): return statements[-1]
if len(statements) == 1: statements = statements[0]
if context.use_tree:
the.result = statements #
# if context.debug:print_pointer(True)
return statements # content
# if angel.use_tree:
# p=parent_node()
# if p: p.content=content
# p
def maybe(expr):
global original_string, last_node, current_value, depth, current_node, last_token
if not isinstance(expr, collections.abc.Callable): # duck!
return maybe_tokens(expr)
the.current_expression = expr
depth = depth + 1
if (depth > context.max_depth): raise SystemStackError("len(nodes)>max_depth)")
old = current_token
try:
result = expr() # yield <<<<<<<<<<<<<<<<<<<<<<<<<<<<
adjust_rollback()
if context._debug and (isinstance(result, collections.Callable)) and not isinstance(result, type):
raise Exception("BUG!? returned CALLABLE " + str(result))
if result or result == 0: # and result!='False'
verbose("GOT result " + str(expr) + " : " + str(result))
else:
verbose("No result " + str(expr))
set_token(old)
# the.string = old
last_node = current_node
return result
except EndOfLine as e:
if verbose: verbose("Tried %d %s %s, got %s" % (the.current_offset, the.token, expr, e))
adjust_interpret() # remove the border, if above border
# if verbose: verbose(e)
# if verbose: string_pointer()
cc = caller_depth()
rb = the.no_rollback_depth
if cc >= rb:
set_token(old) # OK
current_value = None
if cc < rb: # and not cc+2<rb # not check_rollback_allowed:
error("NO ROLLBACK, GIVING UP!!!")
# if context._verbose:
# print(last_token)
# print_pointer() # ALWAYS!
# if context.use_tree:
# import TreeBuilder
# TreeBuilder.show_tree() # Not reached
ex = GivingUp(str(e) + "\n" + to_source(expr) + "\n" + pointer_string())
raise ex
# error e #exit
# raise SyntaxError(e)
except EndOfDocument as e:
set_token(old)
verbose("EndOfDocument")
# error(e)
# raise e,None, sys.exc_info()[2]
return False
# return True
# except GivingUp as e:
# the.string=old #to mark??
# maybe => OK !?
# error(e)
# if not check_rollback_allowed:
# if rollback[len(caller)-1]!="NO" #:
except (NotMatching, EndOfLine) as e:
set_token(old)
except IgnoreException as e: # NoMethodError etc
set_token(old)
error(e)
verbose(e)
except Exception as e:
error(e)
raise # reraise!!! with traceback backtrace !!!!
except Error as e:
error(e)
raise # reraise!!! with traceback backtrace !!!!
finally:
depth = depth - 1
# except Exception as e:
# error(block)
# import traceback
# traceback.print_stack() # backtrace
# error(e)
# error(block)
# print("-------------------------")
# quit()
# finally:
adjust_rollback()
set_token(old) # if rollback:
return False
def one_or_more(expressions):
all = [expressions()]
more = the.current_offset and star(expressions)
if more:
all.append(more)
return all
def to_source(block):
return str(block)
# def many(block): # see star
# global old_tree,result
# while True:
# try:
# maybe(comment)
# old_tree = list(nodes)#.clone
# result = block() # yield
# # puts "------------------"
# #puts nodes-old_tree
# if(not the.string or len(the.string)==0 ):break # TODO! loop criterion too week: break
# if not result or result == []:
# raise NotMatching(to_source(block) + "\n" + string_pointer_s())
# except IgnoreException as e:
# import traceback
# traceback.print_stack() # backtrace
# error(e)
# GETS messed up BY the.string.strip()! !!! ???
def pointer():
if not current_token or the.current_token:
return ''
return current_token[2] or the.current_token[2]
# global parser
# if not lines or line_number >= len(lines): return Pointer(line_number, 0, parser)
# # line_number copy by ref?????????
# line = lines[line_number] + "$$$"
# offset = line.find(the.string + "$$$") # len(original_string)-(the.string or "").length
# return Pointer(line_number, offset or 0,parser)
def isnumeric(start):
return start.isdigit()
# isinstance(start)
def app_path():
pass
# File.expand_path(File.dirname(__FILE__)).to_s
def clear():
global variables, variableValues
verbose("clear all variables, methods, ...")
variables = {}
variableValues = {}
# the._verbose=True # False
context.testing = True
the.variables.clear()
the.variableTypes.clear()
the.variableValues.clear()
context.in_hash = False
context.in_list = False
context.in_condition = False
context.in_args = False
context.in_params = False
context.in_pipe = False
if not context.use_tree:
do_interpret()
import io
try:
file_types = (extensions.file, extensions.xfile, io.IOBase)
except NameError:
file_types = (io.IOBase,) # py3 --
# noinspection PyTypeChecker
def parse(s, target_file=None):
global last_result, result
if not s: return
verbose("PARSING " + s)
if (isinstance(s, file_types)):
source_file = s.name
s = s.readlines()
elif s.endswith(".e") or s.endswith(".an"):
target_file = target_file or s + ".pyc"
source_file = s
with open(s) as f:
s = f.readlines()
else:
source_file = 'out/inline'
try:
with open(source_file, 'wt') as f:
f.write(s)
except:
debug("no out directory")
if context._debug:
print(" File \"%s\", line 1" % source_file)
if (len(s) < 1000):
verbose("--------PARSING:---------")
verbose(s)
verbose("-------------------------")
try:
import english_parser
if isinstance(s, file_types):
source_file = str(s)
target_file = source_file + ".pyc"
s = s.readlines()
if not is_string(s) and not isinstance(s, list):
the.result = s
return english_parser.interpretation() # result, hack
allow_rollback()
init(s)
the.result = english_parser.rooty()
if isinstance(the.result, FunctionCall):
the.result = english_parser.do_execute_block(the.result)
if the.result in ['True', 'true']: the.result = True
if the.result in ['False', 'false']: the.result = False
if isinstance(the.result, Variable): the.result = the.result.value
import ast
got_ast = isinstance(the.result, ast.AST)
if isinstance(the.result, list) and len(the.result) > 0:
got_ast = isinstance(the.result[0], ast.AST)
if context.use_tree and got_ast:
import pyc_emitter
the.result = pyc_emitter.eval_ast(the.result, {}, source_file, target_file, run=True)
else:
if isinstance(the.result, ast.Num): the.result = the.result.n
if isinstance(the.result, ast.Str): the.result = the.result.s
the.last_result = the.result
except Exception as e:
error(target_file)
print_pointer(True)
raise # blank reraises e with stacktrace
# except NotMatching as e:
# import traceback
# traceback.print_stack() # backtrace
# the.last_result = the.result = None
# e=filter_backtrace(e)
# error(e)
# print_pointer(True)
except IgnoreException as e:
pass
verbose("PARSED SUCCESSFULLY!")
if context._debug:
print(" File \"%s\", line 1" % source_file)
# show_tree()
# puts svg
return english_parser.interpretation() # # result
# def start_parser:
# a=ARGV[0] or app_path+"/../examples/test.e"
# if (File.exists? a):
# lines=IO.readlines(a)
# else:
# lines=a.split("\n")
#
# parse lines[0]
def token(t, expected=''): # _new
if isinstance(t, list):
return tokens(t)
raiseEnd()
if current_word == t:
next_token()
return t
else:
# verbose('expected ' + str(result)) #
# print_pointer()
raise NotMatching(expected + " " + t + "\n" + pointer_string())
def tokens(tokenz):
raiseEnd()
ok = maybe_tokens(tokenz)
if (ok): return ok
raise NotMatching(str(tokenz) + "\n" + pointer_string())
def escape_token(t):
z = re.sub(r'([^\w])', "\\\\\\1", t)
return z
def raiseNewline():
if checkEndOfLine(): raise EndOfLine()
# see checkEndOfLine
def checkNewline():
return checkEndOfLine()
# if (current_type == _token.NEWLINE):
# return english_tokens.NEWLINE
# return False
def checkEndOfLine():
return current_type == _token.NEWLINE or \
current_type == _token.ENDMARKER or \
the.token == '\n' or \
the.token == '' or \
the.token_number >= len(the.tokenstream)
# if the.string.blank? # no:try,try,try see raiseEnd: raise EndOfDocument.new
# return not the.string or len(the.string)==0
def checkEndOfFile():
return current_type == _token.ENDMARKER or the.token_number >= len(the.tokenstream)
# return line_number >= len(lines) and not the.string
def maybe_newline():
return checkEndOfFile() or newline(doraise=False)
def newline(doraise=False):
if checkNewline() == english_tokens.NEWLINE or the.token == ';' or the.token == '':
next_token()
if (the.current_type == 54):
next_token() # ??? \r\n ? or what is this, python?
while (the.current_type == _token.INDENT):
next_token() # IGNORE FOR NOW!!!!
return english_tokens.NEWLINE
found = maybe_tokens(english_tokens.newline_tokens)
if found: return found # todo CLEANUP!!!
if checkNewline() == english_tokens.NEWLINE: # get new line: return NEWLINE
next_token()
return found
if not found and doraise: raise_not_matching("no newline")
return False
def newlines():
# one_or_more{newline)
return star(newline)
def NL():
return tokens('\n', '\r')
def NLs():
return tokens('\n', '\r')
def rest_of_statement():
current_value = re.search(r'(.*?)([\r\n;]|done)', the.string)[1].strip()
the.string = the.string[len(current_value):-1]
return current_value
# todo merge ^> :
def rest_of_line():
rest = ""
while not checkEndOfLine() and not current_word == ';':
rest += current_word + " "
next_token(False)
return rest.strip()
def comment_block():
token('/')
token('*')
while True:
if the.token == '*':
next_token()
if the.token == '/':
return True
next_token()
@Starttokens(['//', '#', '\'', '--']) # , '/' regex!
def skip_comments():
if the.token is None: return
l = len(the.token)
if l == 0: return
if the.current_type == tokenize.COMMENT:
next_token()
# if the.current_word[0]=="#": ^^ OK!
# return rest_of_line()
if l > 1:
# if current_word[0]=="#": rest_of_line()
if the.token[0:2] == "--": return rest_of_line()
if the.token[0:2] == "//": return rest_of_line()
# if current_word[0:2]=="' ": rest_of_line() and ...
# the.string = the.string.replace(r' -- .*', '')
# the.string = the.string.replace(r'\/\/.*', '') # todo
# the.string = the.string.replace(r'#.*', '')
# if not the.string: checkNewline()
def raise_not_matching(msg=None):
raise NotMatching(msg)
_try = maybe
def number():
n = maybe(real) or maybe(fraction) or maybe(integer) or maybe(number_word) or raise_not_matching("number")
return n
def number_word():
n = tokens(english_tokens.numbers)
return extensions.xstr(n).parse_number() # except NotMatching.new "no number"
@Starttokens(u'\xbd') # todo python2 wtf
def fraction():
f = maybe(integer) or 0
m = starts_with(["¼", "½", "¾", "⅓", "⅔", "⅕", "⅖", "⅗", "⅘", "⅙", "⅚", "⅛", "⅜", "⅝", "⅞"])
# m = m or starts_with(["\xc2\xbc", "\xc2\xbd", "\xc2\xbe", "\xe2\x85\x93", "\xe2\x85\x94", "\xe2\x85\x95", "\xe2\x85\x96", "\xe2\x85\x97", "\xe2\x85\x98", "\xe2\x85\x99", "\xe2\x85\x9a", "\xe2\x85\x9b", "\xe2\x85\x9c", "\xe2\x85\x9d", "\xe2\x85\x9e"])
# m = m or starts_with(['\xc2'])
if not m:
# if f==ZERO: return 0 NOT YET!
if f != 0:
return f
raise NotMatching()
else:
next_token()
# AttributeError: 'unicode' object has no attribute 'parse_number'
from extensions import xstr
m = xstr(m).parse_number()
the.result = float(f) + m
return the.result
# maybe(complex) or
ZERO = '0'
def integer():
match = re.search(r'^\s*(-?\d+)', the.string)
if match:
current_value = int(match.groups()[0])
next_token(False) # Advancing by hand, its not a regular token
# "E20": kast.Pow(10,20),
# if not interpreting(): return ast.Num(current_value)
if context.use_tree:
from kast import kast
return kast.Num(current_value)
# if context.use_tree: return ast.Num(current_value)
if current_value == 0:
current_value = ZERO
return current_value
raise NotMatching("no integer")
# plus{tokens('1','2','3','4','5','6','7','8','9','0'))
def real():
## global the.string
match = re.search(r'^\s*(-?\d*\.\d+)', the.string)
if match:
current_value = float(match.groups()[0])
next_token(False)
return current_value
# return false
raise NotMatching("no real (unreal)")
def complex():
s = the.string.strip().replace("i", "j") # python!
match = re.search(r'^(\d+j)', s) # 3i
if not match: match = re.search(r'^(\d*\.\d+j)', s) # 3.3i
if not match: match = re.search(r'^(\d+\s*\+\s*\d+j)', s) # 3+3i
if not match: match = re.search(r'^(\d*\.\d+\s*\+\s*\d*\.\d+j)', s) # 3+3i
if match:
the.current_value = complex(match[0].groups())
next_token(False)
return current_value
return False
def maybe_indent():
while the.current_type == _token.INDENT or the.token == ' ':
next_token()
def method_allowed(meth):
if len(meth) < 2: return False
if meth in ["print"]: return True
if meth in ["evaluate", "eval", "int", "True", "False", "true", "false", "the", "Invert", "char"]: return False
if meth in english_tokens.keywords: return False
return True
def load_module_methods():
import warnings
warnings.filterwarnings("ignore", category=UnicodeWarning)
try:
import pickle as pickle
except:
import pickle
def deserialize(file):
with open(file, 'rb') as f:
return pickle.load(f)
# static, load only once, create with module_method_map.py
# context.home = ".." if context.home=='.' else '.'
if context._debug:
context.home="/me/dev/angles/angle/"
the.methodToModulesMap = deserialize(context.home + "/data/method_modules.bin")
the.moduleMethods = deserialize(context.home + "/data/module_methods.bin")
the.moduleNames = deserialize(context.home + "/data/module_names.bin")
the.moduleClasses = deserialize(context.home + "/data/module_classes.bin")
import english_parser
for mo, mes in list(the.moduleMethods.items()):
if not method_allowed(mo): continue
the.method_token_map[mo] = english_parser.method_call
for meth in mes:
if method_allowed(meth):
the.method_token_map[meth] = english_parser.method_call
for mo, cls in list(the.moduleClasses.items()):
for meth in cls: # class as CONSTRUCTOR
if method_allowed(meth):
the.method_token_map[meth] = english_parser.method_call
# if not the.method_names: # todo pickle
the.constructors = list(the.classes.keys()) + english_tokens.type_names
the.method_names = list(the.methods.keys()) + c_methods + list(
methods.keys()) + core_methods + builtin_methods + list(the.methodToModulesMap.keys())
# for c in constructors:
# if not c in the.method_names: the.method_names.append(c)
for x in dir(extensions):
the.method_names.append(x)
context.extensionMap = extensions.extensionMap
for _type in context.extensionMap:
ex = context.extensionMap[_type]
for method in dir(ex):
# if not method in the.method_names: #the.methods:
# the.methods[method]=getattr(ex,method)
# else:
# pass # TODOOO!
the.method_names.append(method)
the.method_names = [meth for meth in the.method_names if method_allowed(meth)]
# if method_allowed(method):
# the.token_map[method] = english_parser.method_call
# try:
# the.methods[method]=getattr(ex,method).im_func #wow, as function!
# except:
# print("wrapper_descriptor not a function %s"%method)
# context.starttokens_done=True
def main():
print(caller_depth())
if __name__ == '__main__':
main() # debug
|
994,776 | 6cd1896916763ea918a1b10911cde6304cd4fa7a | #Programa de inventario
def menu():
print("---------------------------------")
print("Bienvenido Escoge la opcion:")
print("1 : Agregar Producto")
print("2 : Quitar Producto")
print("3 : Inventario")
print("0 : Salir")
def addProducto():
print("Agregar Producto")
blMenuProducto = True
while blMenuProducto:
menuProducto = input("¿Que desea hacer? Agregar un producto : A / Salir : S -> ")
if(menuProducto == "A"):
try:
strNombreProducto = input("Digita el nombre del Producto: ")
flValorProducto = float(input(f"Digita el valor de {strNombreProducto}: "))
intCantidadProducto = int(input(f"Digita la cantidad de {strNombreProducto}: "))
dicProducto = {}
dicProducto.update({"NombreProducto":strNombreProducto})
dicProducto.update({"ValorProducto":flValorProducto})
dicProducto.update({"CantidadProducto":intCantidadProducto})
print("Usted agregó este producto:")
print(dicProducto)
lstProductos.append(dicProducto)
print("Ahora tiene estos productos:")
print(lstProductos)
except:
print("Hay un error: Digite bien")
else:
print("Salió de Añadir Producto")
blMenuProducto = False
def delProducto():
print("Entró a Quitar Producto")
while True:
menuProducto = input("¿Que desea hacer? Quitar : Q / Salir : S -> ")
if(menuProducto == "Q"):
print("Busca en la lista el producto que deseas quitar")
for p in lstProductos:
for (key, value) in p.items():
print(key , " :: ", value )
print("Escribe el nombre del Producto que quieres Eliminar")
strNombreEliminar = input()
for p in lstProductos:
for (key, value) in p.items():
if(value == strNombreEliminar):
print(f"{value} eliminado")
lstProductos.remove(p)
print("Tiene estos productos aún:")
print(lstProductos)
else:
print("Salió de Quitar Producto")
break
def showInventario():
print("Entró al Inventario")
while True:
menuProducto = input("¿Que desea hacer? Ver Inventario : V / Salir : S -> ")
if(menuProducto == "V"):
#Cantidad total de productos
cantproducto = 0
for i in range(len(lstProductos)):
Elemento = lstProductos[i]
valor = Elemento["CantidadProducto"]
cantproducto = cantproducto + valor
print(f"Usted tiene {cantproducto} productos")
#Valorización de cada producto
for i in range(len(lstProductos)):
Elemento = lstProductos[i]
Cantidad = Elemento['CantidadProducto']
Precio = Elemento['ValorProducto']
valortotal = Cantidad * Precio
Elemento.update({'MontoTotal':valortotal})
#Sacando values del diccionario Elemento
producto=Elemento.get('NombreProducto')
precio=Elemento.get('ValorProducto')
cantidad=Elemento.get('CantidadProducto')
total=Elemento.get('MontoTotal')
#Declarando las variables de los valores del diccionario Elemento
producto=str(producto)
precio=float(precio)
cantidad=int(cantidad)
total=float(total)
#Agregando valores a sus listas respectivas
listaProducto.append(producto)
listaPrecio.append(precio)
listaCantidad.append(cantidad)
listaTotal.append(total)
#Imprimiento listas con un formato tabla
sep = '|{}|{}|{}|{}|'.format('-'*16, '-'*10, '-'*10, '-'*16)
print('{0}\n| Producto | Precio | Cantidad | MontoTotal |\n{0}'.format(sep))
for producto, precio, cantidad, total in zip(listaProducto, listaPrecio, listaCantidad, listaTotal):
print('| {:>14.5} | {:>8} | {:>8} | {:>14.5} |\n{}'.format(producto, precio, cantidad, total, sep))
#Monto total del inventario
s = 0
for i in range(len(lstProductos)):
Elemento = lstProductos[i]
valor = Elemento["MontoTotal"]
s = s + valor
print(f"El monto total es de {s} soles")
else:
print("Salió del Inventario")
break
strMenuPrincipal = "0"
dicProducto={}
lstProductos=[]
#Listas para showInventario
listaProducto=[]
listaPrecio=[]
listaCantidad=[]
listaTotal=[]
try:
while True:
menu()
strMenuPrincipal = input()
if(strMenuPrincipal == "1"):
addProducto()
elif(strMenuPrincipal == "2"):
delProducto()
elif(strMenuPrincipal == "3"):
showInventario()
elif(strMenuPrincipal == "0"):
break
else:
opcionSalir = input("No escogio las opciones indicadas; desea salir s/n -> ")
if(opcionSalir == "s"):
break
except:
print("Error")
finally:
print("Hasta su próxima visita") |
994,777 | bc735711ff4ea77610706b2a95f849671deacb19 | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.docker.goals.tailor import rules as tailor_rules
from pants.backend.docker.rules import rules as docker_rules
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.docker.target_types import rules as target_types_rules
def rules():
return (
*docker_rules(),
*tailor_rules(),
*target_types_rules(),
)
def target_types():
return (DockerImageTarget,)
|
994,778 | d752f357849c3808748a0cab021c7722f1c5b3b9 | import itertools
from classes_pieces import King, Queen, Rook, Knight, Pawn, Bishop, Pieces
class TwoPlayers:
"""
A class used to start playing between two players
...
Attributes
----------
name_player1, name_player2 : str
names of players
start_positions : dict
dict for start positions for pieces
"""
def __init__(self, name_player1, name_player2):
"""
Description
"""
self.name_player1, self.name_player2 = name_player1, name_player2
self.start_positions = {}
pieces = {"Ki(king)": ['e1', 'm1', 'e12', 'm12'],
"Q(queen)": ['d1', 'l1', 'd12', 'l12'],
"R(rook)": ['a1', 'h1', 'i1', 'p1', 'a12', 'h12', 'i12', 'p12'],
"Kn(knight)": ['b1', 'g1', 'j1', 'o1', 'b12', 'g12', 'j12', 'o12'],
"B(bishop)": ['c1', 'f1', 'k1', 'n1', 'c12', 'f12', 'k12', 'n12'],
"P(pawn)": [chr(i+97)+'2' for i in range(0, 16)] + [chr(i+97)+'11' for i in range(0, 16)]
}
board = Board("two_players_double_board", pieces)
board.display()
class ThreePlayers:
"""
A class used to start playing between two players
...
Attributes
----------
name_player1, name_player2, name_player3 : str
names of players
start_positions : dict
dict for start positions for pieces
"""
def __init__(self, name_player1, name_player2, name_player3):
"""
Description
"""
self.name_player1, self.name_player2, self.name_player3 = name_player1, name_player2, name_player3
self.start_positions = {}
pieces = {"Ki(king)": ['d12', 'r13', 'l1'],
"Q(queen)": ['e13', 's12', 'k1'],
"R(rook)": ['a9', 'h16', 'o16', 'v9', 'o1', 'h1'],
"Kn(knight)": ['b10', 'p15', 'u10', 'g15', 'i1', 'n1'],
"B(bishop)": ['c11', 'q14', 't11', 'f14', 'j1', 'm1'],
"P(pawn)": [chr(i+97)+str(i + 7) for i in range(1, 9)] + [chr(i+97)+str(28 - i) for i in range(13, 21)] +
[chr(i+97)+'2' for i in range(7, 15)]
}
board = Board("three_players_board", pieces)
board.display()
class FourPlayers:
"""
A class used to start playing between two players
...
Attributes
----------
name_player1, name_player2, name_player3, name_player4 : str
names of players
start_positions : dict
dict for start positions for pieces
"""
def __init__(self, name_player1, name_player2, name_player3, name_player4):
"""
Description
"""
self.name_player1, self.name_player2 = name_player1, name_player2
self.name_player3, self.name_player4 = name_player3, name_player4
self.start_positions = {}
pieces = {"Ki(king)": ['g1', 'h14', 'a7', 'n8'],
"Q(queen)": ['h1', 'g14', 'a8', 'n7'],
"R(rook)": ['d1', 'k1', 'd14', 'k14', 'a4', 'n4', 'a11', 'n11'],
"Kn(knight)": ['e1', 'j1', 'e14', 'j14', 'a5', 'n5', 'a10', 'n10'],
"B(bishop)": ['f1', 'i1', 'f14', 'i14', 'a6', 'n6', 'a9', 'n9'],
"P(pawn)": [chr(i+97)+'2' for i in range(3, 11)] + [chr(i+97)+'13' for i in range(3, 11)] +
['b'+str(i) for i in range(4, 12)] + ['m'+str(i) for i in range(4, 12)]
}
board = Board("four_players_board", pieces)
board.display()
class Board(Pieces):
"""
A class used to create a chessboard
...
Attributes
----------
name : str
name of board
super().__init__(pieces) : dicts
dicts from father class
Methods
-------
display()
Prints different types of boards based on board.name and start_positions
"""
def __init__(self, name, pieces):
"""
Description
"""
self.name = name
super().__init__(pieces)
def display(self):
""" Prints different types of boards based on board.name and start_positions"""
board_dict = dict()
if self.name == 'two_players_double_board':
# fill all cells of board_dict with " " for empty cells
letters = [chr(i+97) for i in range(0, 16)]
for number in range(0, 12):
for letter in letters:
position = letter + str(number + 1)
board_dict[position] = ' '
board_dict = self.set_piece_in_cell(board_dict)
# first line of board
board_str = " |"
for i in range(0, 16):
board_str += chr(i + 97).ljust(2, ' ') + " | "
print(board_str)
# print board
for number in range(0, 12):
print("-" * 82)
print(str(number + 1).rjust(2, ' '), end="|")
for letter in letters:
position = letter + str(number + 1)
piece = board_dict[position]
print(str(piece).ljust(2, ' ') + ' |', end=" ")
print()
print("-" * 82)
print("\n")
print("END OF TWO_PLAYERS BOARD")
elif self.name == "four_players_board":
# fill all cells of board_dict with " " for empty cells
letters = [chr(i+97) for i in range(0, 14)]
for number in range(0, 14):
for letter in letters:
position = letter + str(number + 1)
board_dict[position] = ' '
board_dict = self.set_piece_in_cell(board_dict)
# first line of board
board_str = " |"
for i in range(0, 14):
board_str += chr(i + 97).ljust(2, ' ') + " | "
print(board_str)
empty_letters, empty_numbers = ['a', 'b', 'c', 'l', 'm', 'n'], ['1', '2', '3', '12', '13', '14']
empty_cells_tuples = list(itertools.product(empty_letters, empty_numbers))
empty_cells = []
for tupl in empty_cells_tuples:
empty_cells.append(tupl[0] + tupl[1])
# print board
for number in range(0, 14):
print("-" * 76)
print(str(number + 1).rjust(2, ' '), end="|")
for letter in letters:
position = letter + str(number + 1)
piece = board_dict[position]
if position not in empty_cells:
print(str(piece).ljust(2, ' ') + ' |', end=" ")
else:
if position.startswith('c'):
print(' ', end='| ')
else:
print(' ', end=' ')
print()
print("-" * 76)
print("\n")
print("END OF FOUR_PLAYERS BOARD")
elif self.name == "three_players_board":
# fill all cells of board_dict with " " for empty cells
letters = [chr(i + 97) for i in range(0, 22)]
for number in range(0, 22):
for letter in letters:
position = letter + str(number + 1)
board_dict[position] = ' '
# first line of board
board_str = " |"
for i in range(0, 22):
board_str += chr(i + 97).ljust(2, ' ') + " | "
print(board_str)
empty_cells = []
for i in range(7):
for j in range(1, 7 - i + 1):
position = letters[i] + str(j)
empty_cells.append(position)
for j in range(1, i + 2):
position = letters[i + 15] + str(j)
empty_cells.append(position)
for j in range(10 + i, 17):
position = letters[i] + str(j)
empty_cells.append(position)
for j in range(16 - i, 17):
position = letters[i + 15] + str(j)
empty_cells.append(position)
board_dict = self.set_piece_in_cell(board_dict)
# print board
for number in range(0, 16):
print("-" * 106)
print(str(number + 1).rjust(2, ' '), end="|")
for letter in letters:
position = letter + str(number + 1)
piece = board_dict[position]
if position not in empty_cells:
print(str(piece).ljust(2, ' ') + ' |', end=" ")
else:
if position == 'g1' or position == 'g16':
print(' ', end='| ')
elif position.startswith('c'):
print(' ', end=' ')
else:
print(' ', end=' ')
print()
print("-" * 106)
print("\n")
print("END OF THREE_PLAYERS BOARD")
def set_piece_in_cell(self, board_dict):
pieces_data = self.collect_all_data_in_lst()
for piece in pieces_data:
for position in piece.positions:
board_dict[position] = piece.mark
return board_dict
if __name__ == '__main__':
two_players_double_board = TwoPlayers("Denys", "Olexander")
print("\n\n\n" + '-' * 120)
three_players = ThreePlayers("Denys", "Olexander", "Bohdan")
print("\n\n\n" + '-' * 120)
four_players = FourPlayers("Denys", "Olexander", "Andriy", "Bohdan")
|
994,779 | be5fe4dcab58504acacf1977a402690bc42f86c8 | #!/usr/bin/env python3
""" Convert H5 matrix to NxN3p TSV file."""
import sys
import argparse
import numpy as np
from hicmatrix import HiCMatrix as hm
from utilities import setDefaults, createMainParent
__version__ = '1.0.0'
def H5_to_NxN3p(matrix):
hic = hm.hiCMatrix(matrix)
chrom = hic.getChrNames()[0]
binSize = hic.getBinSize()
matrix = hic.matrix.toarray()
# Looping over numpy is slow but more memory efficient than
# converting to pandas to append columns
for i, row in enumerate(matrix):
start = i * binSize
end = start + binSize
row = '\t'.join(row.astype(str))
print(f'{chrom}\t{start}\t{end}\t{row}')
def parseArgs():
epilog = 'Stephen Richer, University of Bath, Bath, UK (sr467@bath.ac.uk)'
mainParent = createMainParent(verbose=False, version=__version__)
parser = argparse.ArgumentParser(
epilog=epilog, description=__doc__, parents=[mainParent])
parser.set_defaults(function=H5_to_NxN3p)
parser.add_argument('matrix', help='HiC matrix in H5 format.')
return setDefaults(parser)
if __name__ == '__main__':
args, function = parseArgs()
sys.exit(function(**vars(args)))
|
994,780 | 34dde960bf0995b4cc0b9888475dd70df8803c7b |
# Testing of Generalized Feed Forward Neural Network
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import tensorflow as tf
import numpy as np
import gzip, cPickle, random
import fnn_class_noise as NN_class
# Getting the data
path_here = '/usr/local/home/krm9c/shwetaNew/data/'
datasets = ['arcene', 'cifar10', 'cifar100', 'gas', 'gisette', 'madelon',\
'mnist', 'notmnist', 'rolling', 'sensorless', 'SVHN']
datasetName = 'mnist'
def load_data(datasetName):
print datasetName
f = gzip.open(path_here+datasetName+'.pkl.gz','rb')
dataset = cPickle.load(f)
X_train = dataset[0]
X_test = dataset[1]
y_train = dataset[2]
y_test = dataset[3]
print X_train.shape, y_train.shape, X_test.shape, y_test.shape
try:
if X_train.shape[2]:
X_train = X_train.reshape((X_train.shape[0],(X_train.shape[1]*X_train.shape[2]*X_train.shape[3])))
X_test = X_test.reshape((X_test.shape[0],(X_test.shape[1]*X_test.shape[2]*X_test.shape[3])))
except IndexError:
pass
print X_train.shape, y_train.shape, X_test.shape, y_test.shape
return X_train, y_train, X_test, y_test
X_train, y_train, X_test, y_test = load_data(datasetName)
# Give file name to get parameters
fileName = "para.txt"
# Getting the parameters
mydict = {'num_NNlayers':'',\
'num_features':'', 'hidden_nodes':'',\
'num_labels':'', 'learning_rate':'', 'steps':'',\
'display_step':'', 'batch_size':'', 'optimizer':'', 'activation':''}
def getParas(fileName):
dictNew = {}
with open(fileName, 'r') as f:
for line in f:
for i in mydict.keys():
if i in line:
dictNew[i] = line.split(':')[1].split('\n')[0].strip()
break;
f.close()
for i in dictNew.keys():
if i is "learning_rate":
dictNew[i] = float(dictNew[i])
elif i is "optimizer" or i is "activation":
pass
else:
dictNew[i] = int(dictNew[i])
dictNew['num_features'] = X_train.shape[1]
dictNew['num_labels'] = y_train.shape[1]
return dictNew
paras = getParas(fileName)
#print paras
# Activation Function
def act_ftn(name):
if(name == "tanh"):
return(tf.nn.tanh)
elif(name == "relu"):
return(tf.nn.relu)
elif(name == 'sigmoid'):
return(tf.nn.sigmoid)
else:
print("no activation")
def perturbData(X, m, n):
return (X+np.random.normal(1, 1, size=[m, n]))
#return (X+(10*np.random.uniform(-4,4,size=[m,n])))
# Define Model parameters
depth = []
classes = paras['num_labels']
lr = paras['learning_rate']
depth.append(paras['num_features'])
#print depth
if [paras['num_NNlayers'] > 2]:
for i in range(paras['num_NNlayers']-2):
depth.append(paras['hidden_nodes'])
#print depth
batch_size = paras['batch_size']
op = paras['optimizer']
act = act_ftn(paras['activation'])
# Define model
model = NN_class.learners()
model = model.init_NN_custom(classes, lr, depth, act, batch_size, optimizer=op)
# Training
for j in range(paras['steps']):
for k in xrange(100):
x_batch = []
y_batch = []
arr = random.sample(range(0, len(X_train)), paras['batch_size'])
for idx in arr:
x_batch.append(X_train[idx])
y_batch.append(y_train[idx])
x_batch = np.asarray(x_batch)
y_batch = np.asarray(y_batch)
model.sess.run([model.Trainer["Grad_op"]],\
feed_dict={model.Deep['FL_layer_10']: x_batch, model.classifier['Target']: y_batch,\
model.classifier['learning_rate']: lr})
# model.sess.run([model.Trainer["Grad_No_Noise_op"]],\
# feed_dict={model.Deep['FL_layer_10']: x_batch, model.classifier['Target']: y_batch,\
# model.classifier['learning_rate']: lr})
model.sess.run([model.Trainer["Noise_op"]],\
feed_dict={model.Deep['FL_layer_10']: x_batch, model.classifier['Target']: y_batch,\
model.classifier['learning_rate']: lr})
if j%paras['display_step'] == 0:
print "Step", j
#X_test_perturbed = perturbData(X_test, X_test.shape[0], X_test.shape[1])
acc_test = model.sess.run([model.Evaluation['accuracy']], \
feed_dict={model.Deep['FL_layer_10']: X_test, model.classifier['Target']:\
y_test, model.classifier["learning_rate"]:lr})
acc_train = model.sess.run([ model.Evaluation['accuracy']],\
feed_dict={model.Deep['FL_layer_10']: X_train, model.classifier['Target']:\
y_train, model.classifier["learning_rate"]:lr})
cost_error_noise = model.sess.run([model.classifier["Overall_cost"]],\
feed_dict={model.Deep['FL_layer_10']: x_batch, model.classifier['Target']:\
y_batch, model.classifier["learning_rate"]:lr})
cost_error = model.sess.run([model.classifier["Overall_cost_Grad"]],\
feed_dict={model.Deep['FL_layer_10']: x_batch, model.classifier['Target']:\
y_batch, model.classifier["learning_rate"]:lr})
# Print all the outputs
print("Loss w/o Noise:", cost_error[0], "Loss with Noise:", cost_error_noise[0])
print("Train Acc:", acc_train[0]*100, "Test Acc:", acc_test[0]*100)
''' print("Final Test Accuracy", model.sess.run([ model.Evaluation['accuracy']], \
feed_dict={model.Deep['FL_layer_10']: X_test, model.classifier['Target']: \
y_test, model.classifier["learning_rate"]:lr})[0]*100) '''
|
994,781 | 6c89a44ad65663330b609302600cb1b6e31175bb | from django import forms
from django.forms import ModelForm
from .models import Item, Patron, Author
class SearchModeForm(forms.Form):
SEARCH_MODES = (
('item', 'Item'),
('patron', 'Patron'),
)
mode = forms.ChoiceField(label='', choices=SEARCH_MODES)
class SearchForm(forms.Form):
query = forms.CharField(label='', max_length=100, required=False)
class PatronForm(ModelForm):
class Meta:
model = Patron
fields = ['patron_name', 'email']
class ItemForm(ModelForm):
class Meta:
model = Item
fields = ['media_type', 'catalog_id', 'isbn', 'upc', 'condition', 'notes', 'title', 'authors', 'shelf_location', 'publication_date', 'lost', 'api_link']
#, 'aquisition_date', 'last_modified_date', |
994,782 | 4886ef3fbc3398886fe91305f0f37dbe1350fd85 | import argparse
import catalyst as ct
import pandas as pd
import torch
from pathlib import Path
from catalyst.dl import SupervisedRunner
from catalyst.utils import get_device
from src.callbacks import get_callbacks
from src.dataset import get_base_loader
from src.losses import get_loss
from src.models import get_model
from src.optimizers import get_optimizer
from src.schedulers import get_scheduler
from src.transforms import get_transforms
from src.utils import load_config
from src.validation import get_validation
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True)
parser.add_argument("--folds", nargs="*", type=int, required=True)
parser.add_argument("--device", default="auto")
parser.add_argument(
"--debug", action="store_true", help="Whether to use debug mode")
args = parser.parse_args()
config = load_config(args.config)
ct.utils.set_global_seed(config.seed)
ct.utils.prepare_cudnn(deterministic=True)
if args.device == "auto":
device = get_device()
else:
device = torch.device(args.device)
output_root_dir = Path("output")
output_base_dir = output_root_dir / args.config.replace(".yml",
"").split("/")[-1]
output_base_dir.mkdir(exist_ok=True, parents=True)
train_images_path = Path(config.data.train_images_path)
df = pd.read_csv(config.data.train_df_path)
splits = get_validation(df, config)
transforms_dict = {
phase: get_transforms(config, phase)
for phase in ["train", "valid"]
}
cls_levels = {
"grapheme": df.grapheme_root.nunique(),
"vowel": df.vowel_diacritic.nunique(),
"consonant": df.consonant_diacritic.nunique()
}
for i, (trn_idx, val_idx) in enumerate(splits):
if i not in args.folds:
continue
print(f"Fold: {i}")
output_dir = output_base_dir / f"fold{i}"
output_dir.mkdir(exist_ok=True, parents=True)
trn_df = df.loc[trn_idx, :].reset_index(drop=True)
val_df = df.loc[val_idx, :].reset_index(drop=True)
if args.debug:
trn_df = trn_df.loc[:1000, :].reset_index(drop=True)
val_df = val_df.loc[:1000, :].reset_index(drop=True)
data_loaders = {
phase: get_base_loader(
df,
train_images_path,
phase=phase,
size=(config.img_size, config.img_size),
batch_size=config.train.batch_size,
num_workers=config.num_workers,
transforms=transforms_dict[phase])
for phase, df in zip(["train", "valid"], [trn_df, val_df])
}
model = get_model(config).to(device)
criterion = get_loss(config).to(device)
optimizer = get_optimizer(model, config)
scheduler = get_scheduler(optimizer, config)
callbacks = get_callbacks(config)
runner = SupervisedRunner(
device=device,
input_key="images",
input_target_key="targets",
output_key="logits")
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=data_loaders,
logdir=output_dir,
scheduler=scheduler,
num_epochs=config.train.num_epochs,
callbacks=callbacks,
main_metric=config.main_metric,
minimize_metric=False,
monitoring_params=None,
verbose=False)
|
994,783 | 39a777aa07b67d7a6d9428106ba27f37b275cd29 | # How to validate Amazon access key and secret key is correct?
get_all_regions()
|
994,784 | ef29deea3e6f9a7cbf83db25b2120b249178070c | import sys
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# The ID and range of a sample spreadsheet.
#SAMPLE_SPREADSHEET_ID = '1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms'
SAMPLE_SPREADSHEET_ID = '1TE4mjYDjDBeLbHbSlOEeU5k-H9lP2A9DEae7sktgGbo'
SAMPLE_RANGE_NAME = 'A2:H12'
class InvestmentPortfolio():
def __init__(self):
self.google_spreadsheet_id = '1TE4mjYDjDBeLbHbSlOEeU5k-H9lP2A9DEae7sktgGbo'
self.google_sheet_id = 0
self.service = ''
self.portfolio = dict() #need to read csv file and then add to it
def __validate_google_credentials(self):
"""validates user credentials
user must give authorization of user data
"""
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)
creds = flow.run_local_server()
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
self.service = build('sheets', 'v4', credentials=creds)
def company_share_owned(self, ticker):
"""returns true if company share is owned"""
self.__validate_google_credentials()
sheet = self.service.spreadsheets()
result = sheet.values().get(spreadsheetId=self.google_spreadsheet_id,
range=SAMPLE_RANGE_NAME).execute()
values = result.get('values', [])
for row in values:
if ticker == row[1]:
return True
return False
def add_option_purchase(self, ticker):
#get updated price
#update excel sheet
pass
def add_share_purchase(self, ticker):
pass
def print_portfolio(self):
"""printing out values pulled from sheet"""
self.__validate_google_credentials()
sheet = self.service.spreadsheets()
result = sheet.values().get(spreadsheetId=self.google_spreadsheet_id,
range=SAMPLE_RANGE_NAME).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print('pulled data:')
print('----------------')
print('ALL VALUES\n', '-----------------\n', values)
print('ONLY PRICES\n', '----------------')
print('{:25} {}'.format('name', 'price'))
for row in values:
if len(row) < 8 or row[2] != '[OWN]':
continue
else:
print('{:25} {}'.format(row[0], row[6]))
def print_update_queue():
pass
def update_portfolio():
pass
if __name__ == "__main__":
test = InvestmentPortfolio()
ticker = sys.argv[1]
if test.company_share_owned(ticker):
test.print_portfolio()
else:
print('You do not own shares of this company yet!')
|
994,785 | dede18394c20f3a0e0edc6a6a44b4b7d77850eab | #Programme to find whether source and target word are transliterate word or not. If its a transliterate word then we get the shortest path that matched the target word.
#Written by Roja(13-07-17)
#RUN:: python get_transliterate_wrds.py eng hnd Sound-list.txt out > transliterate.txt
#OUTPUT: out file contains path if found or else NO PATH if not transliterate wrd
# transliterate.txt file contains all the words that are transliterate in src and tgt
#############################################################################################
import sys
import networkx as nx
eng_lst = []
hnd_lst = []
sound_lst = []
sound_dic = {}
dic = {}
graph_dict = []
index = 0
max_key_len = 0
pos = 0
hlen = 0
for line in open(sys.argv[1]): #eng sentences (or eng left over wrds)
eng_lst = line.strip().split()
for line in open(sys.argv[2]): #hindi sentences (or hnd left over wrds)
hnd_lst = line.strip().split()
#Sound dictionary
for line in open(sys.argv[3]):
sound_lst = line.strip().split('\t')
sound_dic[sound_lst[0]] = sound_lst[1]
if len(sound_lst[0]) > max_key_len:
max_key_len = len(sound_lst[0])
#Storing graph output
g_file = open(sys.argv[4], "w")
#func to get sound dic in tgt:
def check_sound_in_tgt(lst, hnd, wrd, tup):
l = 0
for each in lst:
if each in hnd:
l = len(each)
if tup not in graph_dict:
graph_dict.append(tup)
dic[tup] = l
def check_sound_in_src(eng, index, pos, hnd):
start_pos = index + 1
end_pos = index + len(eng) + 1
if end_pos > pos: #Storing last index of a word
pos = end_pos
tup = (start_pos, end_pos, 1)
val = sound_dic[eng].split('/')
check_sound_in_tgt(val, hnd, eng, tup)
return pos
def check_transliterate(eng_wrd, hnd_wrd):
pos = 0
dic = {}
for i in range(0, len(eng_wrd)):
if len(eng_wrd[i:i+max_key_len]) == max_key_len:
if eng_wrd[i:i+max_key_len] in sound_dic.keys():
wrd = eng_wrd[i:i+max_key_len]
pos = check_sound_in_src(wrd, i, pos, hnd_wrd)
if len(eng_wrd[i:i+max_key_len-1]) == max_key_len-1:
if eng_wrd[i:i+max_key_len-1] in sound_dic.keys():
wrd = eng_wrd[i:i+max_key_len-1]
pos = check_sound_in_src(wrd, i, pos, hnd_wrd)
if len(eng_wrd[i:i+max_key_len-2]) == max_key_len-2:
if eng_wrd[i:i+max_key_len-2] in sound_dic.keys():
wrd = eng_wrd[i:i+max_key_len-2]
pos = check_sound_in_src(wrd, i, pos, hnd_wrd)
return pos
#print sorted(graph_dict), pos
for eng in eng_lst:
for hin in hnd_lst:
graph_dict = []
hlen = 0
dic = {}
index = check_transliterate(eng.lower(), hin)
# print eng, hin
#Usage of Multigraph
MG = nx.MultiGraph()
MG.add_weighted_edges_from(graph_dict)
#Converting Multigraph to normal graph
GG=nx.Graph()
for n,nbrs in MG.adjacency_iter():
for nbr,edict in nbrs.items():
minvalue=min([d['weight'] for d in edict.values() ])
GG.add_edge(n,nbr, weight = minvalue)
try:
output= nx.shortest_path(GG,1,int(index))
for i in range(0, len(output)-1):
t = (output[i] , output[i+1], 1)
hlen = hlen + dic[t]
# print output, hlen, len(hin), dic
if hlen == len(hin) or hlen-1 == len(hin): #Ex: association asosIeSana, Lodha loDZA
#if hlen == len(hin) : #Ex: association asosIeSana, Lodha loDZA
g_file.write('%s\n' % output)
# print 'Shortest path' , output
print '(eng_word-tran_word\t' + eng + '\t' + hin + ' )'
else:
g_file.write("NO PATH\n")
except:
g_file.write("NO PATH\n")
|
994,786 | 14db769b61a00ce17734df032d244d681452b5a2 | from django.forms import *
#from django.forms.extras.widgets import SelectDateWidget
from .models import *
from django.forms import CheckboxInput
from django.contrib.auth.forms import UserChangeForm
from django.contrib.auth.models import User
import datetime
class VolunteerForm(ModelForm):
def __init__(self, *args, **kwargs):
super(ModelForm, self).__init__(*args, **kwargs)
self.fields["job"].queryset = Job.objects.filter(full=False).order_by('id')
self.fields['name'].required = True
self.fields['waiver'].required = True
#self.fields['vancap'].widget.attrs['required'] = False
#self.fields['vancap'].widget.attrs['disabled'] = True
#self.fields['vancap'].widget.attrs['placeholder'] = "Capacity & Type"
rel = {'doctor':('dSpecialty','Specialty'),
'nurse':('nSpecialty','Specialty'),
'satphone':('satnum','Phone #'),
'otherLang':('lang','Language'),
'dataEntry':('software','Software'),
'functional':('fneeds','Description'),
'van':('vancap','Capacity & Type'),
'boat':('btype','Capacity & Type'),
'rv':('rvtype','Capacity & Type'),
'cdl':('cdlnum','Class and License'),
'operate':('eqtype','Types'),
'truck':('tdescription','Description'),
'truck':('tdescription','Description'),
}
for field in rel:
self.fields[field].widget.attrs['onclick'] = "document.getElementById('id_" + rel[field][0] + "').disabled = !this.checked"
self.fields[rel[field][0]].widget.attrs['disabled'] = True
self.fields[rel[field][0]].widget.attrs['placeholder'] = rel[field][1]
birthday = DateField(required=False, widget=SelectDateWidget(years=range(datetime.date.today().year,1900,-1)))
job = ModelChoiceField(queryset=Job.objects.none(), to_field_name="title", required=False)
distance = IntegerField(widget=NumberInput(attrs={'type':'range', 'step': '10', 'min': '5', 'max':'2000'}))
class Meta:
model = Volunteer
fields = '__all__'
exclude = ['picture']
required = {'name':True}
def disabled(_self):
form = _self
for field in form.fields:
if form.fields[field].widget.__class__.__name__ == CheckboxInput().__class__.__name__:
form.fields[field].widget.attrs['disabled'] = True
else:
form.fields[field].widget.attrs['readonly'] = True
form.fields[field].widget.attrs['disabled'] = True
return form
class OrganizationForm(ModelForm):
#agency = ModelChoiceField(queryset=Organization.objects.all().order_by('id'), to_field_name="name")
class Meta:
model = Organization
#fields = '__all__'
exclude = ['agency']
def disabled(_self):
form = _self
# form.fields['agency'].widget.attrs['readonly'] = True
for field in form.fields:
if form.fields[field].widget.__class__.__name__ == CheckboxInput().__class__.__name__:
form.fields[field].widget.attrs['disabled'] = True
else:
form.fields[field].widget.attrs['readonly'] = True
form.fields[field].widget.attrs['disabled'] = True
return form
class JobForm(ModelForm):
sdate = DateField(required=False, initial=datetime.date.today, widget=SelectDateWidget(years=range(datetime.date.today().year+2,datetime.date.today().year-2,-1)))
edate = DateField(required=False, initial=datetime.date.today, widget=SelectDateWidget(years=range(datetime.date.today().year+2,datetime.date.today().year-2,-1)))
class Meta:
model = Job
#fields = '__all__'
exclude = ['full']
def disabled(_self):
form = _self
for field in form.fields:
if form.fields[field].widget.__class__.__name__ == CheckboxInput().__class__.__name__:
form.fields[field].widget.attrs['disabled'] = True
else:
form.fields[field].widget.attrs['readonly'] = True
form.fields[field].widget.attrs['disabled'] = True
return form
#class UserForm(UserChangeForm):
|
994,787 | db632f2ea08349cc9ca60131e5020f35936133c4 | import tkinter
import point
from game_logic import *
class board:
def __init__(self,GameState):
self.root_window = tkinter.Tk()
self.GameState = GameState
self.state = GameState.board
self.row = GameState.height
self.column = GameState.width
self.turn = GameState.turn
self.deter = GameState.deter
self.left_end,self.up_end = 0,0
self.right_end,self.lower_end = 1,1
self.init = "B"
if self.column <= self.row:
self.left_end = (self.row - self.column)/2/self.row
self.right_end = (self.row + self.column)/2/self.row
elif self.column > self.row:
self.up_end = (self.column - self.row)/2/self.column
self.lower_end = (self.column + self.row)/2/self.column
self.swtchbtn = tkinter.Button(master = self.root_window,text = "Switch to White",command=self.sbutton)
self.swtchbtn.grid(
row = 2,column = 0,sticky = tkinter.W)
self.quitbtn = tkinter.Button(master = self.root_window,text = "Quit",command = lambda:self.root_window.destroy())
self.quitbtn.grid(
row = 2,column = 0,sticky = tkinter.E)
self.canvas = tkinter.Canvas(
master = self.root_window, width = 500, height = 500,
background = "#93e2ff")
self.canvas.grid(
row = 0,column = 0,
sticky = tkinter.N + tkinter.S + tkinter.E + tkinter.W)
self.canvas2 =tkinter.Canvas(
master = self.root_window, width = 500,height =200,
background = "#66ccff")
self.canvas_height = 500
self.canvas_width = 500
self.canvas2_height = 200
self.canvas2_width = 500
self.canvas2.grid(
row = 1,column = 0,
sticky = tkinter.N + tkinter.S + tkinter.E + tkinter.W)
self.show_status()
self.canvas.bind("<Configure>", self.resize)
## self.canvas.bind("<Motion>",self.track_mouse)
self.canvas.bind("<Button-1>",self.move)
self.canvas2.bind("<Configure>", self.resize_canvas2)
self.root_window.rowconfigure(0,weight = 2)
self.root_window.rowconfigure(1,weight = 1)
self.root_window.rowconfigure(2,weight = 1)
self.root_window.columnconfigure(0,weight = 2)
def sbutton(self):
if self.init == "B":
self.init = "W"
self.swtchbtn["text"] = "Start Game"
elif self.init == "W":
GameState.game_end(self.GameState)
self.init = "SBSBSBSBSB"
self.swtchbtn["text"] = "Game Started"
self.show_status()
pass
## def track_mouse(self,event):
## print(event)
def resize_canvas2(self,event):
self.canvas2_height = event.height
self.canvas2_width = event.width
self.show_status()
def resize(self,event):
self.canvas_height = event.height
self.canvas_width = event.width
self.draw_grid(self.row,self.column)
self.draw_pieces(self.state)
def move(self,event):
try:
if self.init == "B":
change_row,change_column = self._on_grid(event)
self.GameState.board[change_row-1][change_column-1] = "B"
self.draw_pieces(self.state)
self.show_status()
pass
elif self.init == "W":
change_row,change_column = self._on_grid(event)
self.GameState.board[change_row-1][change_column-1] = "W"
self.draw_pieces(self.state)
self.show_status()
pass
else:
if board._in_x_range(self,event) and board._in_y_range(self,event):
## position_order_as_gamelogic = [(n-th_of_the_column,n-th_of_row)]第几行的第几个先向下再向右
if board._on_grid(self,event) in GameState._valid_positions(self.GameState,self.GameState.turn):
GameState.move(self.GameState,board._on_grid(self,event),self.GameState.turn)
self.draw_pieces(self.state)
self.show_status()
except IndexError:
pass
def _on_grid(self,event):
if self.column<=self.row:
grid_position = (int(((event.y/self.canvas_height)-self.up_end)*self.row)+1,
int(((event.x/self.canvas_width)-self.left_end)*self.row)+1)
elif self.column>self.row:
grid_position = (int(((event.y/self.canvas_height)-self.up_end)*self.column)+1,
int(((event.x/self.canvas_width)-self.left_end)*self.column)+1)
print(grid_position)
##grid_position=(down_how_many_row,right_how_many_column)
return grid_position
def _in_x_range(self,event):
if self.column<=self.row:
if self.left_end<event.x/self.canvas_width<self.right_end:
return True
else:
return False
else:
return True
def _in_y_range(self,event):
if self.column>self.row:
if self.up_end<event.y/self.canvas_height<self.lower_end:
return True
else:
return False
else:
return True
def draw_grid(self,row,column):
major = self.row if self.row >= self.column else self.column
y_parl_begin_frac = [point.Point(((major-column)/2+i)/major,(major-row)/2/major) for i in range(column+1)]
y_parl_end_frac = [point.Point(((major-column)/2+i)/major,(major+row)/2/major) for i in range(column+1)]
x_parl_begin_frac = [point.Point((major-column)/2/major,((major-row)/2+i)/major) for i in range(row+1)]
x_parl_end_frac = [point.Point((major+column)/2/major,((major-row)/2+i)/major) for i in range(row+1)]
wpix,hpix = self.canvas.winfo_width(),self.canvas.winfo_height()
k = [y_parl_begin_frac,y_parl_end_frac,x_parl_begin_frac,x_parl_end_frac]
self.canvas.delete(tkinter.ALL)
for i in range(len(y_parl_begin_frac)):
self.canvas.create_line(y_parl_begin_frac[i].pixel(wpix,hpix),
y_parl_end_frac[i].pixel(wpix,hpix),
fill = "white")
for i in range(len(x_parl_begin_frac)):
self.canvas.create_line(x_parl_begin_frac[i].pixel(wpix,hpix),
x_parl_end_frac[i].pixel(wpix,hpix),
fill = "white")
pass
def show_status(self):
self.canvas2.delete(tkinter.ALL)
wpix,hpix = self.canvas2_width,self.canvas2_height
status_middle = point.Point(1/2,1/2).pixel(wpix,hpix)
self.canvas2.create_text(status_middle,
text = "FULL VERSION\nBLACK: {}\nWHITE: {}\nTURN:{}\nWINNER:{}".format(self.GameState.count_pieces("B"),
self.GameState.count_pieces("W"),
self.GameState.turn,
self.GameState.winner),
font=("calibri",20),
fill = "white")
def draw_pieces(self,state):
major = self.row if self.row >= self.column else self.column
grid_unit_height = 1/major*self.canvas_height
grid_unit_width = 1/major*self.canvas_width
white_list = []##(row,column) start from 1
black_list = []##(row,column) start from 1
if self.row >= self.column:
n_column_to_right = int((self.row-self.column)/2)
for i in range(len(state)):
for j in range(len(state[i])):
if state[i][j]=="B":
black_list.append((i+1,j+1+n_column_to_right))
elif state[i][j]=="W":
white_list.append((i+1,j+1+n_column_to_right))
elif self.row < self.column:
n_row_lower = int((self.column-self.row)/2)
for i in range(len(state)):
for j in range(len(state[i])):
if state[i][j]=="B":
black_list.append((i+1+n_row_lower,j+1))
elif state[i][j]=="W":
white_list.append((i+1+n_row_lower,j+1))
for white_piece in white_list:
self.canvas.create_oval((white_piece[1]-1)*grid_unit_width,
(white_piece[0]-1)*grid_unit_height,
white_piece[1]*grid_unit_width,
white_piece[0]*grid_unit_height,
fill = "white",
outline = "white")
for black_piece in black_list:
self.canvas.create_oval((black_piece[1]-1)*grid_unit_width,
(black_piece[0]-1)*grid_unit_height,
black_piece[1]*grid_unit_width,
black_piece[0]*grid_unit_height,
fill = "black",
outline = "black")
def run(self)->None:
self.root_window.mainloop()
|
994,788 | ea27f5d36f23c5fea5008d638bda8790684adc11 | from django.test import TestCase, Client
from django.urls import reverse
import json
class BookViewTest(TestCase):
def setUp(self):
print("Test started")
def testListView(self):
client = Client()
response = client.get(reverse("books:create"))
def testCreateView(self):
query = {
"title": "title",
"content": "content",
"author_name": "author_name",
"price": 10000
}
client = Client()
response = client.post(reverse("books:create"), data=json.dumps(query), content_type="application/json")
def tearDown(self):
print("Test Ended")
|
994,789 | 4906486bf5d1acbcdb58b661c2451f41f2f7e8d4 | name = ""
while name != "Michael":
name = input("What is your name?")
|
994,790 | 5e4785cddbf5b4fd8670bee90c1e0894428fcf74 | '''
@Author: dong.zhili
@Date: 1970-01-01 08:00:00
@LastEditors : dong.zhili
@LastEditTime : 2020-01-17 18:48:52
@Description:
'''
import time
import requests
from dateutil.relativedelta import relativedelta
import datetime
import json
import pandas as pd
import requests
def get_hist(code, start, end, timeout = 10,
retry_count=3, pause=0.001):
'''
code 股票代码
'''
def _code_to_symbol(code):
'''
生成 symbol 代码标志
'''
if len(code) != 6 :
return code
else:
return 'sh%s'%code if code[:1] in ['5', '6', '9'] or code[:2] in ['11', '13'] else 'sz%s'%code
code = _code_to_symbol(code)
url = "http://api.finance.ifeng.com/akdaily/?code=%s&type=last" % code
for _ in range(retry_count):
time.sleep(pause)
try:
text = requests.get(url, timeout=timeout).text
if len(text) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(text)
cols = ['date', 'open', 'high', 'close', 'low', 'volume',
'price_change', 'p_change', 'ma5', 'ma10', 'ma20', 'v_ma5', 'v_ma10', 'v_ma20']
df = pd.DataFrame(js['record'], columns=cols)
df = df.applymap(lambda x: x.replace(u',', u''))
df[df==''] = 0
for col in cols[1:]:
df[col] = df[col].astype(float)
if start is not None:
df = df[df.date >= start]
if end is not None:
df = df[df.date <= end]
df = df.set_index('date')
df = df.sort_index()
df = df.reset_index(drop=False)
return df
raise IOError('获取失败 请检查网络')
def create_dataframe2(code):
today = datetime.datetime.now() # .strftime("%Y%m%d")
two_month_ago = today - relativedelta(months=2)
df = get_hist(code, two_month_ago.strftime("%Y-%m-%d"), today.strftime("%Y-%m-%d"))
df2 = pd.DataFrame({'Date' : df['date'], 'Open' : df['open'],
'High' : df['high'], 'Low' : df['low'],
'Close' : df['close'],'Volume' : df['volume'],
'Adj Close':df['close']})
param = {"Referer": "https://finance.sina.com.cn"}
resp = requests.get("https://hq.sinajs.cn/list=sz%s,s_sz%s" % (code, code), headers=param)
data = resp.content.decode(encoding='gb2312')
lines = data.splitlines()
list1 = lines[0].split("\"")[1].split(',')
list2 = lines[1].split("\"")[1].split(',')
# print(list1)
# print(list2)
date = list1[30]
open = list1[1]
high = list1[4]
low = list1[5]
close = list1[3]
volume = list2[4]
new=pd.DataFrame({'Date':date,
'Open':round(float(open),2),
'High':round(float(high),2),
'Low':round(float(low),2),
'Close':round(float(close),2),
'Volume':round(float(volume),1),
'Adj Close':round(float(close),2)},index=[1])
if new.iat[0, 0] != df2.iat[-1, 0]:
df2 = pd.concat([df2,new], axis=0 ,ignore_index=True)
return list1[0], df2
def strategy(code: str, name: str, df: pd.DataFrame, lastday=10):
print(code, name)
date_index = df['Date']
sma_df = pd.DataFrame({'Date':df['Date'], 'sma21':df['Close'].rolling(21).mean(), 'sma23':df['Close'].rolling(23).mean(), 'close': df['Close'], 'max20': df['Close'].rolling(23).max()}).tail(lastday+2)
sma_df['sma21diff'] = sma_df['sma21'].diff()
sma_df['sma23diff'] = sma_df['sma23'].diff()
# print(sma_df)
buy_flag_count = 0
sell_flag_count = 0
cur_flag = ""
for i in range(2, sma_df.shape[0]):
# self.__sma23[-1] > self.__sma23[-2] > self.__sma23[-3] and self.__sma23[-1] - self.__sma23[-2] > self.__sma23[-2] - self.__sma23[-3] and bar.getClose() >= self.__maxN[-1]:
date = sma_df.iloc[i, 0]
cur_sma21 = sma_df.iloc[i, 1]
pre_sma21 = sma_df.iloc[i-1, 1]
pre_pre_sma21 = sma_df.iloc[i-2, 1]
close = sma_df.iloc[i, 3]
max20 = sma_df.iloc[i-1, 4] # 去上一行的max20值,用于与过去20天的最大值比较
cur_sma23 = sma_df.iloc[i, 2]
pre_sma23 = sma_df.iloc[i-1, 2]
pre_pre_sma23 = sma_df.iloc[i-2, 2]
if i == sma_df.shape[0] - 1:
cur_flag = "-> "
if cur_sma23 > pre_sma23 > pre_pre_sma23 and cur_sma23 - pre_sma23 > pre_sma23 - pre_pre_sma23 and close > max20:
buy_flag_count += 1
print(cur_flag + "Buy at %s %s %d" % (date, close, buy_flag_count))
continue
if cur_sma21 < pre_sma21 < pre_pre_sma21 and pre_sma21 - cur_sma21 > pre_pre_sma21 - pre_sma21:
sell_flag_count += 1
print(cur_flag + "Sell at %s %s %d" % (date, close, sell_flag_count))
continue
buy_flag_count = 0
sell_flag_count = 0
'''
def strategy2(code: str, name: str, df: pd.DataFrame, lastday=10):
print(code, name)
date_index = df['Date']
sma_df = pd.DataFrame({'Date':df['Date'], 'sma21':df['Close'].rolling(21).mean(), 'sma23':df['Close'].rolling(23).mean(), 'close': df['Close'], 'max20': df['Close'].rolling(23).max()}).tail(lastday+2)
sma_df['sma21diff'] = sma_df['sma21'].diff()
sma_df['sma23diff'] = sma_df['sma23'].diff()
# print(sma_df)
buy_flag_count = 0
sell_flag_count = 0
cur_flag = ""
for i in range(2, sma_df.shape[0]):
# self.__sma23[-1] > self.__sma23[-2] > self.__sma23[-3] and self.__sma23[-1] - self.__sma23[-2] > self.__sma23[-2] - self.__sma23[-3] and bar.getClose() >= self.__maxN[-1]:
date_list = sma_df['Date']
date = date_list[i-1]
close_list = sma_df['close']
close = close_list[i-1]
max20_list = sma_df['max20']
max20 = max20_list[i-1]
sma21_list = sma_df['sma21']
sma23_list = sma_df['sma23']
sma21diff_list = sma_df['sma21diff']
sma23diff_list = sma_df['sma23diff']
if i == sma_df.shape[0] - 1:
cur_flag = "-> "
if sma23_list[i] > pre_sma23 > pre_pre_sma23 and cur_sma23 - pre_sma23 > pre_sma23 - pre_pre_sma23 and close > max20:
buy_flag_count += 1
print(cur_flag + "Buy at %s %s %d" % (date, close, buy_flag_count))
continue
if cur_sma21 < pre_sma21 < pre_pre_sma21 and pre_sma21 - cur_sma21 > pre_pre_sma21 - pre_sma21:
sell_flag_count += 1
print(cur_flag + "Sell at %s %s %d" % (date, close, sell_flag_count))
continue
buy_flag_count = 0
sell_flag_count = 0
'''
if __name__ == "__main__":
arr_code = ["399006", "399300", "399363", "399997", "399933", "399417", "399935", "399989"]
for code in arr_code:
try:
name, df = create_dataframe2(code)
if name == None:
continue
strategy(code, name, df, 10)
except Exception as e:
print("error happend", e)
|
994,791 | 6640588dbc59dfd23bc1346c0367b87fcc376df4 | # coding=utf-8
from __future__ import absolute_import, unicode_literals, division
from fcc.ast.expressions import (Expression, IntExpression, CharExpression,
FloatExpression)
from fcc.ast.variables import (VariableDefinition, IntVariableDefinition,
CharVariableDefinition, FloatVariableDefinition)
from fcc.ast.base import Block, GlobalBlock, Statement
# ---------
# functions
# ---------
class FunctionDefinition(Block):
"""A function definition is a block that stores the function's name, type
and argument list."""
return_type = Expression
def __init__(self, name, parent):
assert isinstance(parent, GlobalBlock), \
"Functions must be defined globally"
super(FunctionDefinition, self).__init__(parent)
self.name = name
self.arguments = []
self.return_address = IntVariableDefinition("__return_address__", self)
self.children.pop()
self.parent.add_symbol(name, self)
def validate(self):
super(FunctionDefinition, self).validate()
# compute addresses
sp = -self.return_address.expression_type.size
self.return_address.sp = sp
for arg in reversed(self.arguments):
sp -= arg.expression_type.size
arg.sp = sp
if hasattr(self, "result"):
sp -= self.return_type.size
self.result.sp = sp
def add_argument(self, name, expression_type):
assert issubclass(expression_type, VariableDefinition)
self.arguments.append(expression_type(name, self))
self.children.pop()
def assert_argument_list(self, arguments):
for index, argument in enumerate(self.arguments):
type_ = argument.expression_type
assert isinstance(arguments[index], type_), \
"Expected " + type_.name() + " for argument " + index
@classmethod
def name(self):
return "void function"
class IntFunctionDefinition(FunctionDefinition):
return_type = IntExpression
def __init__(self, name, parent):
super(IntFunctionDefinition, self).__init__(name, parent)
self.result = IntVariableDefinition("__result__", self)
self.children.pop()
@classmethod
def name(self):
return "int function"
class CharFunctionDefinition(FunctionDefinition):
return_type = CharExpression
def __init__(self, name, parent):
super(CharFunctionDefinition, self).__init__(name, parent)
self.result = CharVariableDefinition("__result__", self)
self.children.pop()
@classmethod
def name(self):
return "char function"
class FloatFunctionDefinition(FunctionDefinition):
return_type = FloatExpression
def __init__(self, name, parent):
super(FloatFunctionDefinition, self).__init__(name, parent)
self.result = FloatVariableDefinition("__result__", self)
self.children.pop()
@classmethod
def name(self):
return "float function"
class FunctionCall(Statement):
definition_type = FunctionDefinition
def __init__(self, definition, parent):
super(FunctionCall, self).__init__(parent)
self.definition = definition
def validate(self):
super(FunctionCall, self).validate()
self.definition.assert_argument_list(self.children)
def generate(self, sp):
osp = sp
# allocate space for result if any
result, sp = self.definition_type.return_type.alloc(sp)
# push arguments
for child in self.children:
code, sp = child.generate(sp)
result.extend(code)
# determine total stack offset that needs to be cleaned up
size = sp - osp - self.definition_type.return_type.size
# use the procedure's definition as address placeholder before linking
temp_addr = self.definition.name
result.extend([
("loadi", 2), # push 2
("puship", ), # push ip
("addi", ), # store ip + 2 as return address
("jmp", temp_addr), # unconditional jump to function address
("release", size) # rewind to initial stack size
])
return result, osp + self.definition_type.return_type.size
class IntFunctionCall(FunctionCall, IntExpression):
definition_type = IntFunctionDefinition
class CharFunctionCall(FunctionCall, CharExpression):
definition_type = CharFunctionDefinition
class FloatFunctionCall(FunctionCall, FloatExpression):
definition_type = FloatFunctionDefinition
class FunctionReturn(Statement):
def __init__(self, parent):
super(FunctionReturn, self).__init__(parent)
parent = self.parent
while parent is not None:
if isinstance(parent, FunctionDefinition):
self.definition = parent
return
parent = parent.parent
assert False, "Return statements must be inside a function."
def validate(self):
super(FunctionReturn, self).validate()
if self.definition.return_type is Expression:
assert len(self.children) == 0, \
"This function does not have a return value"
else:
assert len(self.children) == 1, "Expression expected"
assert isinstance(self.children[0], self.definition.return_type), \
self.definition.return_type.name() + " expected"
def generate(self, sp):
# determine result
osp = sp
result = []
# pop result to its address if not void
if hasattr(self.definition, "result"):
code, sp = self.children[0].generate(sp)
assert sp - osp == self.definition.return_type.size, \
"Code generator error"
result.extend(code)
code, sp = self.definition.return_type.pop(
self.definition.result.addr(sp), sp)
assert sp == osp, "Code generation error"
result.extend(code)
# jump back to caller after clearing the stack
code, sp = self.definition.finalize(osp)
result.extend(code)
result.append(("popip", ))
return result, osp
|
994,792 | 49903764b06c2eba8aed47afe17977bca2dc18cf | """Constants for the Wallbox integration."""
DOMAIN = "wallbox"
CONF_STATION = "station"
CONF_ADDED_ENERGY_KEY = "added_energy"
CONF_ADDED_RANGE_KEY = "added_range"
CONF_CHARGING_POWER_KEY = "charging_power"
CONF_CHARGING_SPEED_KEY = "charging_speed"
CONF_CHARGING_TIME_KEY = "charging_time"
CONF_COST_KEY = "cost"
CONF_CURRENT_MODE_KEY = "current_mode"
CONF_DATA_KEY = "config_data"
CONF_DEPOT_PRICE_KEY = "depot_price"
CONF_MAX_AVAILABLE_POWER_KEY = "max_available_power"
CONF_MAX_CHARGING_CURRENT_KEY = "max_charging_current"
CONF_STATE_OF_CHARGE_KEY = "state_of_charge"
CONF_STATUS_DESCRIPTION_KEY = "status_description"
CONF_CONNECTIONS = "connections"
|
994,793 | afe2cec0a22b6c586b89042f788d0ee877ab4ddd | import math
from keyloop.api.v1.exceptions import PermissionAlreadyExists, PermissionNotFound
from keyloop.utils import generate_uuid
class Page(object):
def __init__(self, items, page, page_size, total):
self.items = items
self.previous_page = None
self.next_page = None
self.has_previous = page > 1
if self.has_previous:
self.previous_page = page - 1
previous_items = (page - 1) * page_size
self.has_next = previous_items + len(items) < total
if self.has_next:
self.next_page = page + 1
self.total = total
self.pages = int(math.ceil(total / float(page_size)))
class FakePermission:
PERMISSIONS = {}
def __init__(self, name, description, uuid=None, document_meta=None):
self.uuid = uuid if uuid else generate_uuid()
self.name = name
self.description = description
self.document_meta = document_meta
@classmethod
def test_reset(cls):
cls.PERMISSIONS = {}
@classmethod
def _get_by_name(cls, name):
for perm in cls.PERMISSIONS.values():
if perm['name'] == name:
return cls(**perm)
raise PermissionNotFound()
@classmethod
def get(cls, uuid=None, name=None):
if name:
return cls._get_by_name(name)
elif uuid:
params = cls.PERMISSIONS.get(uuid)
if not params:
raise PermissionNotFound()
return cls(**params)
@classmethod
def create(cls, name, description):
try:
cls.get(name=name)
except PermissionNotFound:
permission = cls(name, description)
cls.PERMISSIONS.update({permission.uuid: permission.__dict__})
return permission
else:
raise PermissionAlreadyExists
@classmethod
def list(cls, page, limit):
params = []
total = len(cls.PERMISSIONS) if limit > len(cls.PERMISSIONS) else limit
current_page = 0 if page in (0, 1) else page
if not cls.PERMISSIONS:
return Page([], current_page, limit, total)
params.append(list(cls.PERMISSIONS.items())[0][1])
return Page(params[current_page:total], current_page, limit, total)
|
994,794 | 2347be4a07d9895d50b82213202af743b7c23eda | n, k = map(int, input().split()) # n : [1, 1000]
infoList = []
for i in range(1, n + 1):
infoList.append(list(map(int, input().split())))
infoList = sorted(infoList, key = lambda x : (-x[1], -x[2], -x[3]))
print(infoList)
|
994,795 | 03783a571cf917b55d4083b3ce71e757f11ea25b | # -*- coding: utf-8 -*-
__author__ = 'hyd'
from abc import ABCMeta,abstractmethod
from lib.exceptions import ArgumentTypeException
from uuid import uuid4
'''
schemas :
container : 标识一个容器
portId : 对应的交换机端口号
mac : container的mac地址
hostId : 容器对应的主机ID
dpId : 容器所连接到的ovs的dpid
netnsId : 容器所属的网络命名空间ID
id : 容器引擎赋予容器的ID
_id : persistent赋予容器的ID
servicePort : ''
create_time : 创建时间
private_ip : 每个容器所专有的私有IP,用于IP包的转发
netns :
ip : Namespace对应的IP地址
cidrMask : IP地址对应的CIDR掩码
containerPortMapping : 每个netns中开放的端口和服务所在容器的对应关系
flag : 唯一性标识,用来租户隔离,暂时使用vlanId
hosts : 存在该netns所属容器的主机
containers : 所属容器,存容器的_id字段
initHostId : 初始主机ID
_id :
Host :
_id : 容器ID
containers : 所包含的容器,记录_id
mac : 主机的mac地址
transIp : 发送请求的IP地址
switchIp : 交换机所属隧道对应的本地IP
dpid : 主机对应的datapath id
bridge : 主机对应网桥
targetPorts : 面向其他主机的隧道端口
{
hostId : port_to_the_host,
...
}
portNameList : 端口名列表
flag : 标识host的数字,用于创建gateway到主机的隧道时使用
toGatePort : 链接gate的端口
'''
class DataPersistent(object) :
__meta__ = ABCMeta
@abstractmethod
def save(self,schema,data):
pass
@abstractmethod
def remove(self,schema,id):
pass
@abstractmethod
def update(self,schema,old,current):
pass
@abstractmethod
def query(self,schema,conditions):
pass
@abstractmethod
def findOne(self,schema,conditions):
pass
class TestPersistent(DataPersistent) :
def __init__(self):
self.persistent = {}
def save(self,schema,data):
self.persistent.setdefault(schema,{})
if not isinstance(data,dict) :
raise ArgumentTypeException(data)
if '_id' not in data :
data['_id'] = str(uuid4())
if data['_id'] not in self.persistent[schema]:
self.persistent[schema][data['_id']] = data
return data
def remove(self,schema,id):
if schema not in self.persistent :
return 0
if not isinstance(id,list) :
id = [id]
for item in id :
del self.persistent[schema][item]
return len(id)
def update(self,schema,old,current):
self.persistent.setdefault(schema,{})
if '_id' in old :
t = self.persistent[schema].get(old['_id'])
if t == None :
return 0
t.update(current)
return 1
else :
c = 0
for item in self.persistent[schema].values() :
if self._dict_partial_equals(old,item) :
item.update(current)
c += 1
return c
def query(self,schema,conditions):
self.persistent.setdefault(schema,{})
targets = []
if '_id' in conditions :
#print 'condition is :',conditions
key = conditions['_id']
targets.append(self.persistent[schema].get(key))
return targets
for item in self.persistent[schema].values() :
if self._dict_partial_equals(conditions,item) :
targets.append(item)
return targets
def findOne(self,schema,conditions):
results = self.query(schema,conditions)
if results :
return results[0]
return None
def findAll(self,schema):
sch = self.persistent.get(schema)
if sch and isinstance(sch,dict):
return sch.values()
return None
def _dict_partial_equals(self,dict1,dict2):
first,last = (dict2,dict1) if len(dict1.keys()) > len(dict2.keys()) else (dict1,dict2)
for key in first :
if first[key] != last.get(key):
return False
return True
|
994,796 | 3f7e698bfabcc1f5bf7f445d7c905f7908258f69 | import SCons.Action
import subprocess
Import('env')
Import('lib_env')
asm_objs = env.Glob('blake2b-asm/zcblake2_avx*.o')
blake2_src = Glob('blake2/*.c')
benchmark_src = ['benchmark.cpp']
benchmark_objs = env.Object(benchmark_src + blake2_src + env['COMMON_SRC'])
benchmark = env.Program('zceq_benchmark', benchmark_objs + asm_objs)
if 'PROFILE_RAW_FILE' in env:
profile_raw_file = env.Command('${PROFILE_RAW_FILE}', benchmark,
action=SCons.Action.Action(
'LD_LIBRARY_PATH=$VARIANT_DIR ' \
'./$SOURCE --profiling -i3 --no-warmup',
"""
**********************************************************
PROFILING run - please, do NOT stop the process.
This is SLOWER then normal run.
Alternatively, run the build as "scons --no-profiling"
**********************************************************"""))
# Unfortunately, on some distributions, llvm-profdata alias
# doesn't exist, there is only binary with a version hardcoded
version_str = subprocess.check_output(['llvm-config', '--version'])
maj_min_version = '.'.join(version_str.split('.')[:2])
profile_data_file = env.Command('${PROFILE_DATA_FILE}', profile_raw_file[0],
action=SCons.Action.Action(
'llvm-profdata-{} merge --output=$TARGET $SOURCE'.format(maj_min_version),
'Processing profiler raw data: $SOURCE into $TARGET'
))
env['PROFILE_DATA_FILE'] = profile_data_file[0]
else:
lib_src = ['lib_main.cpp']
lib_objs = lib_env.SharedObject(lib_env['COMMON_SRC'] +
lib_src)
blake2_lib_objs = lib_env.SharedObject(blake2_src)
shared_lib = lib_env.SharedLibrary('zceqsolver', lib_objs + blake2_lib_objs + asm_objs)
# Extend dependencies if profiler data is needed
if 'ADD_PROFILE_DATA_DEPS' in env:
lib_env.Depends(benchmark_objs + lib_objs, env['PROFILE_DATA_FILE'])
# install the shared library into the python package
lib_env.Install('#pyzceqsolver', shared_lib)
lib_env.Alias('pyinstall', '#pyzceqsolver')
|
994,797 | 0b7cb1addd04b8c0518d1cec1acdaea86ef96fed | from django.contrib import admin
from .models import Question, Source, Recommendation, Choice
for model in [Question, Source, Recommendation, Choice]:
admin.site.register(model) |
994,798 | 0613de32318aeddb9cfcb810fe548c699bb508cb | # coding: utf8
__version__ = "0.4"
|
994,799 | a98d028306b87ae4f1fc429e548ee3a10743fa9e | import scrapy
from locations.items import Feature
from locations.spiders.mcdonalds import McDonaldsSpider
class McDonaldsZASpider(scrapy.Spider):
name = "mcdonalds_za"
item_attributes = McDonaldsSpider.item_attributes
allowed_domains = ["www.mcdonalds.co", "www.mcdonalds.co.za"]
start_urls = ("https://www.mcdonalds.co.za/restaurants",)
def parse(self, response):
stores = response.css(".row")
ref = 1
for store in stores:
name = store.xpath('.//div[@class="a"]/p/strong/text()').extract_first()
if not name:
continue
address = store.xpath('.//div[@class="b"]/p[2]/text()').extract_first().strip()
phone = store.xpath('.//div[@class="c"]/p[2]/text()').extract_first().strip()
properties = {
"ref": ref,
"addr_full": address,
"phone": phone,
"name": name,
}
yield Feature(**properties)
ref = ref + 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.