text stringlengths 38 1.54M |
|---|
import coverage
import simplest
import os
def run_tests():
simplest.foo(1, 2)
simplest.foo(1, 1)
simplest.foo(2, 1)
command = "sudo coverage html -d /var/www/html"
print "Executing {}".format(command)
os.system(command) |
from django.conf.urls import url
from views import *
urlpatterns = [
url(r'home/$' , home , name='home') ,
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import traceback
import discord
from discord.ext import commands
from MultilineBot import MultilineBot
from LakshmiHelpCommand import LakshmiHelpCommand
import LakshmiErrors
from contents.LakshmiBrainStorage import LakshmiBrainStorage
bot = MultilineBot(command_prefix=';', help_command=LakshmiHelpCommand())
bot.storage = LakshmiBrainStorage(bot)
extensions = [
'cogs.DiceBotCog',
'cogs.GamesCog',
'cogs.GreetingCog',
'cogs.CallOfCthulhuCog',
'cogs.ShortcutCallOfCthulhuCog',
'cogs.DebugMenuCog',
]
for extension in extensions:
bot.load_extension(extension)
@bot.event
async def on_ready():
print("Lakshmi.on_ready()")
print(discord.__version__)
@bot.event
async def on_command_error(context: commands.Context, error):
if isinstance(error, LakshmiErrors.ArgumentOutOfRangeException):
character_message = bot.storage.lexicon.get_character_message_for_argument_out_of_range_exception()
await context.send(f'{context.author.mention} {character_message}')
elif isinstance(error, LakshmiErrors.PermissionNotFoundException):
character_message = bot.storage.lexicon.get_character_message_for_permission_not_found_exception()
await context.send(f'{context.author.mention} {character_message}')
elif isinstance(error, commands.MissingRequiredArgument):
character_message = bot.storage.lexicon.get_character_message_for_missing_required_argument()
await context.send(f'{context.author.mention} {character_message}')
elif isinstance(error, LakshmiErrors.SubcommandNotFoundException):
character_message = bot.storage.lexicon.get_character_message_for_command_not_found()
await context.send(f'{context.author.mention} {character_message}')
elif isinstance(error, LakshmiErrors.UnsupportedSitesException):
character_message = bot.storage.lexicon.get_character_message_for_unsupported_sites()
await context.send(f'{context.author.mention} {character_message}')
elif isinstance(error, LakshmiErrors.NotCallOfCthulhuInvestigatorException):
character_message = bot.storage.lexicon.get_character_message_for_not_callofcthulhu_investigator()
await context.send(f'{context.author.mention} {character_message}')
elif isinstance(error, LakshmiErrors.CharacterNotFoundException):
character_message = bot.storage.lexicon.get_character_message_for_character_not_found()
await context.send(f'{context.author.mention} {character_message}')
elif isinstance(error, LakshmiErrors.ImageNotFoundException):
character_message = bot.storage.lexicon.get_character_message_for_image_not_found()
await context.send(f'{context.author.mention} {character_message}')
elif isinstance(error, commands.CommandNotFound):
character_message = bot.storage.lexicon.get_character_message_for_command_not_found()
await context.send(f'{context.author.mention} {character_message}')
else:
original_error = getattr(error, "original", error)
error_message = ''.join(traceback.TracebackException.from_exception(original_error).format())
error_message = "```py\n" + error_message + "\n```"
character_message = bot.storage.lexicon.get_character_message_to_ask_the_developer_for_help()
message = f'{context.author.mention}\n{character_message}\n{error_message}'
await context.send(message)
token = bot.storage.environment.get_discord_token()
bot.run(token)
|
rule call_variants:
input:
bam=get_sample_bams,
ref=config["ref"]["genome"],
int="targets_GATK.list"
output:
gvcf=protected("called/{sample}.g.vcf.gz")
log:
"logs/gatk/haplotypecaller/{sample}.log"
params:
gatk=config["modules"]["gatk"],
files=lambda wildcards, input: " -I ".join([s for s in input.bam if wildcards.sample in s]),
java_opts="-Xmx12G"
shell:
"""
{params.gatk} --java-options '{params.java_opts}' HaplotypeCaller -R {input.ref} -I {params.files} \
-O {output.gvcf} -ERC GVCF --heterozygosity 0.05 -L {input.int} > {log} 2>&1
"""
rule DBImport:
input:
gvcf="/home/bo4spe/Littorina_saxatilis/short_genetic_variants/sample_map.tsv"
# gvcf=expand("called/{sample}.g.vcf.gz", sample=samples.index)
# reg="targets_GATK.list"
output:
# directory("gatkDBI")
directory("gatkDBI/gatkDBI_{reg}")
params:
gatk=config["modules"]["gatk"]
# files=lambda wildcards, input: " -V ".join(input.gvcf)
shell:
"""
{params.gatk} --java-options '-Xmx28g -Xms28g' GenomicsDBImport --sample-name-map {input.gvcf} --genomicsdb-workspace-path {output} \
--intervals {wildcards.reg} --batch-size 60 --reader-threads 4
"""
rule genotype_variants:
input:
ref=config["ref"]["genome"],
dbi="gatkDBI/gatkDBI_{reg}"
# dbi=directory("gatkDBI")
output:
vcf="genotyped/{reg}_GATK.vcf.gz"
params:
gatk=config["modules"]["gatk"]
# dbis=lambda wildcards, input: " -V ".join(input.dbi)
shell:
"""
{params.gatk} --java-options '-Xmx28g -Xms28g' GenotypeGVCFs -R {input.ref} -V gendb://{input.dbi} -G StandardAnnotation \
-O {output.vcf}
"""
rule merge_variants:
input:
vcf=expand("genotyped/{reg}_GATK.vcf.gz", reg=ref_int)
output:
"all_GATK.vcf.gz"
log:
"logs/picard/merge-GATKgenotyped.log"
wrapper:
"0.36.0/bio/picard/mergevcfs"
|
from utils import convert_to_pdf, save_document
import io
import asyncio
from aiogram import Bot, Dispatcher, executor, types, filters
from aiogram.dispatcher import FSMContext
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from aiogram.dispatcher.filters.state import State, StatesGroup
import logging
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.types import InputFile
from aiogram import types
import aiogram
from operator import itemgetter
from aiogram.contrib.middlewares.logging import LoggingMiddleware
import os
TOKEN = os.getenv("PDFING_TOKEN")
if TOKEN is None:
raise Exception("No bot token is provided!")
logging.basicConfig(level=logging.INFO)
bot = Bot(token=TOKEN)
dp = Dispatcher(bot, storage=MemoryStorage())
dp.middleware.setup(LoggingMiddleware())
class CreatePDF(StatesGroup):
getting_pictures = State()
getting_name = State()
@dp.message_handler(commands=['start', 'help'], state='*')
async def start(message: types.Message, state: FSMContext):
await state.finish()
keyboard = InlineKeyboardMarkup(row_width=2)
keyboard.add(InlineKeyboardButton("Создать", callback_data="create"))
await message.answer("Я помогу тебе сконвертировать твои картинки в pdf-файл. Нажми на кнопку ниже или отправь /create, дальше разберемя", reply_markup=keyboard)
@dp.message_handler(commands=['create'], state='*')
async def create(message: types.Message, state: FSMContext):
await state.finish()
await CreatePDF.getting_pictures.set()
keyboard = InlineKeyboardMarkup(row_width=2)
keyboard.add(InlineKeyboardButton("Отмена", callback_data="cancel"))
await message.answer("Присылай картинки", reply_markup=keyboard)
@dp.callback_query_handler(lambda c: c.data and c.data == "create", state="*")
async def create_button(callback_query: types.CallbackQuery, state: FSMContext):
await state.finish()
await bot.answer_callback_query(callback_query.id)
await CreatePDF.getting_pictures.set()
keyboard = InlineKeyboardMarkup(row_width=2)
keyboard.add(InlineKeyboardButton("Отмена", callback_data="cancel"))
await callback_query.message.answer("Присылай картинки", reply_markup=keyboard)
@dp.message_handler(content_types=["photo"], state=CreatePDF.getting_pictures)
async def get_images(message: types.Message, state: FSMContext):
photo = await message.photo[-1].download(destination=io.BytesIO())
try:
photos = (await state.get_data())["photos"]
except KeyError:
await state.update_data(photos=[(message.message_id, photo)])
photos = (await state.get_data())["photos"]
else:
photos.append((message.message_id, photo,))
await state.update_data(photos=photos)
n_photos = len(photos)
keyboard = InlineKeyboardMarkup(row_width=2)
keyboard.add(InlineKeyboardButton("Отмена", callback_data="cancel"))
keyboard.add(InlineKeyboardButton("Конвертировать", callback_data="convert"))
new_message_to_delete = await message.answer(f"Получил! У меня есть {n_photos} фотографий.", reply_markup=keyboard)
try:
messages_to_delete = (await state.get_data())["mtd"]
except KeyError:
await state.update_data(mtd=[])
messages_to_delete = (await state.get_data())["mtd"]
else:
for m in messages_to_delete:
try:
await m.delete()
except aiogram.utils.exceptions.MessageToDeleteNotFound:
pass
messages_to_delete.append(new_message_to_delete)
await state.update_data(mtd=messages_to_delete)
@dp.callback_query_handler(lambda c: c.data and c.data == "convert", state=CreatePDF.getting_pictures)
async def get_name(callback_query: types.CallbackQuery, state=FSMContext):
await bot.answer_callback_query(callback_query.id)
await CreatePDF.getting_name.set()
await callback_query.message.answer("Как назвать файл?")
@dp.message_handler(state=CreatePDF.getting_name)
async def create_file(message: types.Message, state: FSMContext):
photos = (await state.get_data())["photos"]
sorted_photos = [photo[1] for photo in sorted(photos, key=itemgetter(0))]
pdf = await convert_to_pdf(sorted_photos)
filename = message.text if message.text.endswith(".pdf") else message.text+".pdf"
asyncio.create_task(save_document(pdf, filename))
document = InputFile(pdf, filename=filename)
keyboard = InlineKeyboardMarkup(row_width=2)
keyboard.add(InlineKeyboardButton("Создать еще", callback_data="create"))
await state.finish()
await message.answer_document(document=document, reply_markup=keyboard)
@dp.callback_query_handler(lambda c: c.data and c.data == "cancel", state="*")
async def cancel_converting(callback_query: types.CallbackQuery, state=FSMContext):
await bot.answer_callback_query(callback_query.id)
await state.finish()
keyboard = InlineKeyboardMarkup(row_width=2)
keyboard.add(InlineKeyboardButton("Начать", callback_data="create"))
await callback_query.message.answer("Сбросил состояние! Отправь /create или нажми на кнопку чтобы начать заново", reply_markup=keyboard)
@dp.message_handler(content_types=types.ContentTypes.ANY, state="*")
async def default(message: types.Message):
keyboard = InlineKeyboardMarkup(row_width=2)
keyboard.add(InlineKeyboardButton("Создать", callback_data="create"))
await message.answer("Я тебя не понял((( Возможно, ты хотел нажать /create или на кнопку и прислать картинки?", reply_markup=keyboard)
if __name__ == "__main__":
executor.start_polling(dp, skip_updates=True)
|
from keras.engine import Model
from keras.models import Sequential
from keras.layers import Flatten, Dense, Input, Activation, Dropout, Conv2D, MaxPooling2D, Lambda
from keras_vggface.vggface import VGGFace
from keras_vggface import utils
from keras.optimizers import Adadelta, rmsprop
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras import backend as K
from keras.utils import to_categorical
import numpy as np
import sys
from easydict import EasyDict as edict
import cv2
import face_model
class SiameseNetwork:
def __init__(self, shape, modelName, learningRate=1.0):
self.learningRate = learningRate
self.modelName = modelName
self.shape = shape
left_input = Input(shape)
right_input = Input(shape)
# Define Siamese Network using shared weights
L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))
L1_distance = L1_layer([left_input, right_input])
hidden = Dense(512, activation='relu')(L1_distance)
hidden2 = Dense(64, activation='relu')(hidden)
prediction = Dense(2)(hidden2)
prediction = Activation('softmax')(prediction)
self.siamese_net = Model(inputs=[left_input, right_input], outputs=prediction)
# Compile and prepare network
self.siamese_net.compile(loss="binary_crossentropy", optimizer=Adadelta(self.learningRate), metrics=['accuracy'])
def getDenseBarebones(self):
layers = []
layers.append(Dense(512, activation='relu'))
layers.append(Dense(64, activation='relu'))
layers.append(Dense(2))
return layers
def trainModel(self, trainDatagen, valGen, epochs, batch_size, verbose=1):
early_stop = EarlyStopping(monitor='val_loss', min_delta=0.1, patience=5, verbose=verbose)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.01, verbose=verbose)
self.siamese_net.fit_generator(trainDatagen
,steps_per_epoch=320000 / batch_size, epochs=epochs
#,validation_data=valGen, validation_steps = 80000 / batch_size
,callbacks=[early_stop, reduce_lr])
def finetune(self, X, Y, epochs, batch_size, verbose=1):
early_stop = EarlyStopping(monitor='val_loss', min_delta=0.1, patience=5, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.01, verbose=verbose)
# One-hot encoding
Y_encoded = to_categorical(Y, num_classes=2)
self.siamese_net.fit(self.preprocess(X), Y_encoded, batch_size=batch_size, epochs=epochs,
validation_split=0.2, verbose=verbose, callbacks=[early_stop, reduce_lr])
def testAccuracy(self, X, Y, batch_size=512):
n_correct, total = 0, 0
X_left, X_right, Y_send = [], [], []
for i, x in enumerate(X):
for j, y in enumerate(X):
X_left.append(x)
X_right.append(y)
Y_send.append(1*(Y[i] == Y[j]))
if len(X_left) == batch_size:
Y_send = np.stack(Y_send)
predictions = np.argmax(self.predict([np.stack(X_left), np.stack(X_right)]), axis=1)
n_correct += np.sum(predictions == Y_send)
total += len(X_left)
X_left, X_right, Y_send = [], [], []
if len(X_left) > 0:
Y_send = np.stack(Y_send)
predictions = np.argmax(self.predict([np.stack(X_left), np.stack(X_right)]), axis=1)
n_correct += np.sum(predictions == Y_send)
total += len(X_left)
return n_correct / float(total)
def customTrainModel(self, dataGen, epochs, batch_size, valRatio=0.2, n_steps=320000, preprocess=False):
steps_per_epoch = int(n_steps / batch_size)
for eno in range(epochs):
train_loss, val_loss = 0, 0
train_acc, val_acc = 0, 0
for i in range(steps_per_epoch):
x, y = dataGen.next()
if preprocess:
x = self.preprocess(x)
# Split into train and val
indices = np.random.permutation(len(y))
splitPoint = int(len(y) * valRatio)
x_train, y_train = [ pp[indices[splitPoint:]] for pp in x], y[indices[splitPoint:]]
x_test, y_test = [ pp[indices[:splitPoint]] for pp in x], y[indices[:splitPoint]]
class_1_weight = len(y_train) / np.sum(y_train == 1)
class_0_weight = len(y_train) / np.sum(y_train == 0)
scaling_factor = float(class_1_weight + class_0_weight)
class_weight = {0: class_0_weight / scaling_factor, 1:class_1_weight / scaling_factor}
# One-hot encoding
y_train = to_categorical(y_train, num_classes=2)
y_test = to_categorical(y_test, num_classes=2)
# Train on batch
train_metrics = self.siamese_net.train_on_batch(x_train, y_train, class_weight=class_weight)
train_loss += train_metrics[0]
train_acc += train_metrics[1]
# Test on batch
val_metrics = self.siamese_net.test_on_batch(x_test, y_test)
val_loss += val_metrics[0]
val_acc += val_metrics[1]
sys.stdout.write("Epoch %d : %d / %d : Tr loss: %.4f, Tr acc: %.4f, Vl loss: %.4f, Vl acc: %.4f \r" % (eno+1, i+1, steps_per_epoch, train_loss/(i+1), train_acc/(i+1), val_loss/(i+1), val_acc/(i+1)))
sys.stdout.flush()
print("\n")
def maybeLoadFromMemory(self):
try:
self.siamese_net.load_weights(self.modelName + ".h5")
return True
except:
return False
def save(self, customName=None):
if not customName:
self.siamese_net.save_weights(self.modelName + ".h5")
else:
self.siamese_net.save_weights(customName + ".h5")
def preprocess(self, X):
return X
def predict(self, X):
return self.siamese_net.predict(self.preprocess(X), batch_size=1024)
class SmallRes(SiameseNetwork, object):
def __init__(self, imageShape, featureShape, name, learningRate):
self.learningRate = learningRate
self.shape = imageShape
self.modelName = name
convnet = Sequential()
convnet.add(Conv2D(32, (3, 3), padding='same', input_shape=self.shape))
convnet.add(Activation('relu'))
convnet.add(Conv2D(32, (3, 3)))
convnet.add(Activation('relu'))
convnet.add(MaxPooling2D(pool_size=(2, 2)))
convnet.add(Dropout(0.25))
convnet.add(Conv2D(64, (3, 3), padding='same'))
convnet.add(Activation('relu'))
convnet.add(Conv2D(64, (3, 3)))
convnet.add(Activation('relu'))
convnet.add(MaxPooling2D(pool_size=(2, 2)))
convnet.add(Dropout(0.25))
convnet.add(Flatten())
convnet.add(Dense(featureShape[0]))
convnet.add(Activation('relu'))
left_input = Input(self.shape)
right_input = Input(self.shape)
encoded_l = convnet(left_input)
encoded_r = convnet(right_input)
L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))
L1_distance = L1_layer([encoded_l, encoded_r])
hidden = Dense(128, activation='relu')(L1_distance)
hidden2 = Dense(32, activation='relu')(hidden)
prediction = Dense(2)(hidden2)
prediction = Activation('softmax')(prediction)
self.siamese_net = Model(inputs=[left_input, right_input], outputs=prediction)
# Compile and prepare network
self.siamese_net.compile(loss="binary_crossentropy", optimizer=Adadelta(self.learningRate), metrics=['accuracy'])
def getDenseBarebones(self):
layers = []
layers.append(Dense(128, activation='relu'))
layers.append(Dense(32, activation='relu'))
layers.append(Dense(2))
return layers
def preprocess(self, X):
X_temp = [ (x - 128.) / 128. for x in X]
return X_temp
def predict(self, X):
return self.siamese_net.predict(self.preprocess(X), batch_size=1024)
class FaceVGG16:
def __init__(self, shape):
self.shape = shape + (3,)
vgg_model = VGGFace(model='vgg16', include_top=False, input_shape=self.shape)
last_layer = vgg_model.get_layer('pool5').output
out = Flatten(name='flatten')(last_layer)
self.model = Model(vgg_model.input, out)
def preprocess(self, X):
X_temp = np.copy(X)
return utils.preprocess_input(X_temp, version=1)
def process(self, X):
return self.model.predict(self.preprocess(X), batch_size=128)
class RESNET50:
def __init__(self, shape):
self.shape = shape + (3,)
vgg_model = VGGFace(model='resnet50', include_top=False, input_shape=self.shape)
last_layer = vgg_model.get_layer('avg_pool').output
out = Flatten(name='flatten')(last_layer)
self.model = Model(vgg_model.input, out)
def preprocess(self, X):
X_temp = np.copy(X)
return utils.preprocess_input(X_temp, version=2)
def process(self, X):
return self.model.predict(self.preprocess(X), batch_size=128)
class ArcFace:
def __init__(self, shape, model_path):
args = edict({
"image_size": "%d,%d" % (shape[0], shape[1]),
"model": model_path + ",0",
"gpu": 0, #1,
"threshold": 1.24,
})
self.model = face_model.FaceModel(args)
def preprocess(self, X):
return X
def process(self, X):
outputs = []
return np.array([self.model.get_feature(self.model.get_input(x)) for x in self.preprocess(X)])
|
from sqlalchemy import create_engine, Column, ForeignKey, Integer, String, Boolean, BLOB
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
engine = create_engine('sqlite:///game.db')
Base = declarative_base()
Session = sessionmaker(bind = engine)
# class Player(Base):
# __tablename__ = 'player'
# id = Column(Integer, primary_key = True)
class Monster(Base):
__tablename__ = 'monster'
id = Column(Integer, primary_key = True)
name = Column(String(32), nullable = False, unique = True)
attack = Column(Integer, nullable = False)
defense = Column(Integer, nullable = False)
regen = Column(Integer, nullable = False)
health = Column(Integer, nullable = False)
gold = Column(Integer, nullable = False)
room = Column(Integer, nullable = False)
description = Column(String(256), nullable = False)
def __init__(self, name, attack, defense, regen, health, gold, room, description):
self.name = name
self.attack = attack
self.defense = defense
self.regen = regen
self.health = health
# self.agro = agro
self.gold = gold
self.room = room
self.description = description
class Character:
__tablename__ = 'character'
id = Column(Integer, primary_key = True)
alive = Column(Boolean, nullable = False)
join = Column(Boolean, nullable = False)
monster = Column(Boolean, nullable = False)
started = Column(Boolean, nullable = False)
ready = Column(Boolean, nullable = False)
name = Column(String(32), nullable = False, unique = True)
attack = Column(Integer, nullable = False)
defense = Column(Integer, nullable = False)
regen = Column(Integer, nullable = False)
health = Column(Integer, nullable = False)
gold = Column(Integer, nullable = False)
room = Column(Integer, nullable = False)
description = Column(String(256), nullable = False)
def __init__(self, character_dict = None, table_object = None, monster = False):
self.monster = monster
if monster:
self.join = True
self.started = True
self.ready = True
else:
self.started = False
self.ready = False
self.alive = True
self.health = 100
self.gold = 0
self.room = 0
if character_dict: self.add_from_dict(character_dict)
elif table_object: self.add_from_table(table_object)
def add_from_dict(self, character_dict):
self.name = character_dict['name']
flags = character_dict['flags']
flags = f'{flags:08b}'
self.join = bool(int(flags[1]))
self.attack = character_dict['attack']
self.defense = character_dict['defense']
self.regen = character_dict['regen']
self.description = character_dict['text']
def add_from_table(self, table_object):
self.name = table_object.name
self.attack = table_object.attack
self.defense = table_object.defense
self.regen = table_object.regen
self.gold = table_object.gold
self.description = table_object.description
if self.monster:
self.agro = 0 #table_object.agro
def get_flags(self):
flags = 128 if self.alive else 0
flags += 64 if self.join else 0
flags += 32 if self.monster else 0
flags += 16 if self.started else 0
flags += 8 if self.ready else 0
return flags
def set_flags(self, flags):
flags = f'{flags:08b}'
self.join = bool(int(flags[1]))
def get_dict(self): # Returns a character dictionary ready to pass into LURKprot.encode()
return {
'name': self.name,
'flags': self.get_flags(),
'attack': self.attack,
'defense': self.defense,
'regen': self.regen,
'health': self.health,
'gold': self.gold,
'room': self.room,
'text': self.description
}
def get_fight_stats(self):
if self.join:
return {
'name': self.name,
'attack': self.attack,
'defense': self.defense,
'regen': self.regen,
'health': self.health
}
else: return None
class Room(Base):
__tablename__ = 'room'
id = Column(Integer, primary_key = True)
name = Column(String(32), nullable = False)
description = Column(String(256), nullable = False)
def __init__(self, name, description):
self.name = name
self.description = description
class Connection(Base):
__tablename__ = 'connection'
id = Column(Integer, primary_key = True)
room1 = Column(Integer, nullable = False)
room2 = Column(Integer, nullable = False)
condition = Column(String(256)) # monster_name: dead, capacity: > 10, capacity == 1
def __init__(self, room1, room2):
self.room1 = room1
self.room2 = room2
self.condition = ''
def __repr__(self):
return f'{self.room1} -> {self.room2}'
class Loot(Base):
__tablename__ = 'loot'
id = Column(Integer, primary_key = True)
room = Column(Integer, nullable = False)
name = Column(String(32), nullable = False)
value = Column(Integer, nullable = False)
rewards = Column(String(256))
message = Column(String(256))
condition = Column(String(256)) # monster_name: dead, capacity: > 10, capacity == 1
def __init__(self, name, value, rewards, message, condition):
self.name = name
self.value = value
self.rewards = rewards
self.message = message
self.condition = condition
Base.metadata.create_all(bind = engine) |
import json
from resources.user.service import UserService
from services.service_web import WebService
from flask_restful import Resource
from flask import request
class UserController(Resource):
url = "/user"
# Create
# TODO: Only Admin role can reach this endpoint
def post(self):
# Services
user_service = UserService()
web_service = WebService()
view = request.get_json()
stop = ""
pass
class UserIDController(Resource):
url = "/user/<id>"
# Get by ID
def get(self, id):
# Services
user_service = UserService()
web_service = WebService()
body = user_service.get_by_id(id)
return web_service.response(200, body)
# Update by ID
def put(self, id):
# Services
user_service = UserService()
web_service = WebService()
body = user_service.get_by_id(id)
return web_service.response(200, body)
def delete(self, id):
# Services
user_service = UserService()
web_service = WebService()
body = user_service.get_by_id(id)
return web_service.response(200, body)
def add_user_resource_table(api):
api.add_resource(UserController, UserController.url)
api.add_resource(UserIDController, UserIDController.url)
|
# 逆波兰表达式求值
class Solution:
def evalRPN(self, tokens: list) -> int:
if not tokens:
return 0
length = len(tokens)
num_stack = []
i = 0
while i < length:
if tokens[i] not in '+-*/':
num_stack.append(int(tokens[i]))
else:
num1 = num_stack.pop()
num2 = num_stack.pop()
if tokens[i] == '+':
num_stack.append(num1 + num2)
elif tokens[i] == '-':
num_stack.append(num2 - num1)
elif tokens[i] == '*':
num_stack.append(num1 * num2)
elif tokens[i] == '/':
num_stack.append(int(num2 / num1))
i += 1
return num_stack.pop()
if __name__ == '__main__':
print('6 // -132:', int(6 / -132)) |
import overloading
from overloading import *
from test_overloading import *
@overload
def f(*args):
return 'default'
@overload
def f(foo):
return ('any')
@overload
def f(foo, bar:int):
return ('any', 'int')
@overload
def f(foo, bar, baz):
return ('any', 'any', 'any')
@overload
def f(foo, bar:int, baz):
return ('any', 'int', 'any')
@overload
def f(foo:str, bar:int, baz):
return ('str', 'int', 'any')
@overload
def f(foo:str, bar:int, baz: X):
return ('str', 'int', 'X')
assert f.__name__ == 'f'
assert f.__doc__ == 'f(...)\n\n'
for _ in range(rounds):
assert f() == 'default'
assert f(a) == ('any')
assert f(a, b) == 'default'
assert f(a, b, c, d) == 'default'
assert f(a, 2) == ('any', 'int')
assert f(a, b, c) == ('any', 'any', 'any')
assert f(1, 2, c) == ('any', 'int', 'any')
assert f(a, 2, c) == ('str', 'int', 'any')
assert f(a, 2, x) == ('str', 'int', 'X')
assert f(a, 2, y) == ('str', 'int', 'X')
@overload
def g(foo):
return ('any')
@overload
def g(*args):
return 'default'
@overload
def g(foo, bar:int):
return ('any', 'int')
assert g.__name__ == 'g'
for _ in range(rounds):
assert g() == 'default'
assert g(a) == ('any')
assert g(a, 2) == ('any', 'int')
assert len(f.__cache) == 10
assert len(g.__cache) == 3
assert len(overloading.__registry) == 2
|
# this contains imports plugins that configure py.test for astropy tests.
# by importing them here in conftest.py they are discoverable by py.test
# no matter how it is invoked within the source tree.
from __future__ import print_function, absolute_import, division
import os
from distutils.version import LooseVersion
import pytest
import numpy as np
from astropy.io import fits
from astropy import wcs
from astropy import units
from astropy.version import version as astropy_version
from radio_beam import Beam
from spectral_cube import Projection
from .utils import (generate_testing_data,
generate_test_cube,
generate_test_fits,
singledish_observe_image,
interferometrically_observe_image,
generate_test_fits)
if astropy_version < '3.0':
from astropy.tests.pytest_plugins import *
del pytest_report_header
else:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
def pytest_configure(config):
config.option.astropy_header = True
PYTEST_HEADER_MODULES['Astropy'] = 'astropy'
@pytest.fixture
def fake_overlap_samples(size=1000):
np.random.seed(67848923)
lowres_pts = np.random.lognormal(size=size)
highres_pts = np.abs(lowres_pts + np.random.normal(scale=0.05, size=size))
return lowres_pts, highres_pts
@pytest.fixture
def plaw_test_data():
out = generate_testing_data(return_images=True,
powerlawindex=1.5,
largest_scale=56. * units.arcsec,
smallest_scale=3. * units.arcsec,
lowresfwhm=25. * units.arcsec,
pixel_scale=3 * units.arcsec,
imsize=512)
# angscales, ratios, lowres_pts, highres_pts = out
orig_hdu, lowres_hdu, highres_hdu = out
return orig_hdu, lowres_hdu, highres_hdu
def prepare_cube_data():
out = generate_test_cube(return_hdu=True,
powerlawindex=1.5,
largest_scale=56. * units.arcsec,
smallest_scale=3. * units.arcsec,
lowresfwhm=25. * units.arcsec,
pixel_scale=3 * units.arcsec,
imsize=512,
nchan=3)
orig_hdu, sd_hdu, interf_hdu = out
return orig_hdu, sd_hdu, interf_hdu
@pytest.fixture
def cube_data(tmp_path):
orig_hdu, sd_hdu, interf_hdu = prepare_cube_data()
orig_fname = tmp_path / "orig_cube.fits"
sd_fname = tmp_path / "sd_cube.fits"
interf_fname = tmp_path / "interf_cube.fits"
orig_hdu.writeto(orig_fname)
sd_hdu.writeto(sd_fname)
interf_hdu.writeto(interf_fname)
return orig_fname, sd_fname, interf_fname
@pytest.fixture
def image_sz512as_pl1p5_fwhm2as_scale1as(tmp_path):
pixel_scale = 1 * units.arcsec
restfreq = 100 * units.GHz
highres_major = 2 * units.arcsec
# Generate input image
input_hdu = generate_test_fits(imsize=512, powerlaw=1.5,
beamfwhm=highres_major,
pixel_scale=pixel_scale,
restfreq=restfreq,
brightness_unit=units.Jy / units.sr)
input_fn = tmp_path / "input_image_sz512as_pl1.5_fwhm2as_scale1as.fits"
input_hdu.writeto(input_fn, overwrite=True)
input_proj = Projection.from_hdu(input_hdu).to(units.Jy / units.beam)
# Make Interferometric image
intf_data = interferometrically_observe_image(image=input_hdu.data,
pixel_scale=pixel_scale,
largest_angular_scale=40*units.arcsec,
smallest_angular_scale=highres_major)[0].real
intf_hdu = fits.PrimaryHDU(data=intf_data.value if hasattr(intf_data, "value") else intf_data,
header=input_hdu.header)
intf_proj = Projection.from_hdu(intf_hdu).to(units.Jy / units.beam)
intf_fn = tmp_path / "input_image_sz512as_pl1.5_fwhm2as_scale1as_intf2to40as.fits"
intf_proj.write(intf_fn, overwrite=True)
# Make SD image
sd_header = input_hdu.header.copy()
major = 15*units.arcsec
# Eff SD diam (to compare with CASA in troubleshooting)
sd_beam = Beam(major=major)
sd_header.update(sd_beam.to_header_keywords())
sd_fn = tmp_path / "input_image_sz512as_pl1.5_fwhm2as_scale1as_sd15as.fits"
sd_data = singledish_observe_image(input_hdu.data,
pixel_scale=pixel_scale,
beam=sd_beam,
boundary='wrap')
sd_hdu = fits.PrimaryHDU(data=sd_data.value if hasattr(sd_data, "value") else sd_data,
header=sd_header)
sd_hdu.header.update(sd_beam.to_header_keywords())
sd_proj = Projection.from_hdu(sd_hdu).to(units.Jy / units.beam)
sd_proj.write(sd_fn, overwrite=True)
return tmp_path, input_fn, intf_fn, sd_fn
@pytest.fixture(params=[False, True])
def use_memmap(request):
# Fixture to run tests that use this fixture with and without memmap for
# feathering cubes
return request.param
@pytest.fixture(params=[False, True])
def use_dask(request):
# Fixture to run tests that use this fixture with and without memmap for
# feathering cubes
return request.param
|
# This is the file you'll use to submit most of Lab 0.
# Certain problems may ask you to modify other files to accomplish a certain
# task. There are also various other files that make the problem set work, and
# generally you will _not_ be expected to modify or even understand this code.
# Don't get bogged down with unnecessary work.
# Section 1: Problem set logistics ___________________________________________
# This is a multiple choice question. You answer by replacing
# the symbol 'fill-me-in' with a number, corresponding to your answer.
# You get to check multiple choice answers using the tester before you
# submit them! So there's no reason to worry about getting them wrong.
# Often, multiple-choice questions will be intended to make sure you have the
# right ideas going into the problem set. Run the tester right after you
# answer them, so that you can make sure you have the right answers.
# What version of Python do we *recommend* (not "require") for this course?
# 1. Python v2.3
# 2. Python v2.5 or Python v2.6
# 3. Python v3.0
# Fill in your answer in the next line of code ("1", "2", or "3"):
ANSWER_1 = "2"
# Section 2: Programming warmup _____________________________________________
# Problem 2.1: Warm-Up Stretch
import math
def cube(x):
return x ** 3
# Cheat way
def factorial_import(x):
return math.factorial(x)
# Probably a more intended way
def factorial(x):
if x < 0:
raise ValueError("x must be non-negative")
out = 1
for i in range(1, x + 1):
out *= i
return out
def count_pattern(pattern, lst):
n_matches = 0
for i in range(len(lst)):
check_lst = lst[i : i + len(pattern)]
if len(pattern) != len(check_lst):
break
match = True
for p, l in zip(pattern, check_lst):
if p != l:
match = False
break
if match:
n_matches += 1
return n_matches
# Problem 2.2: Expression depth
def depth(expr, cur_depth=0):
if isinstance(expr, (list, tuple)):
cur_depth += 1
return max(depth(e, cur_depth) for e in expr)
else:
return cur_depth
# Problem 2.3: Tree indexing
def tree_ref(tree, index):
tree_subset = tree.copy()
for i in index:
tree_subset = tree_subset[i]
return tree_subset
# Section 3: Symbolic algebra
# Your solution to this problem doesn't go in this file.
# Instead, you need to modify 'algebra.py' to complete the distributor.
from algebra import Sum, Product, simplify_if_possible
from algebra_utils import distribution, encode_sumprod, decode_sumprod
|
import functions
import sklearnClassify
import pandas as pd
import random
def learnfunction(path, pathTweet, numberUsedAll, numberUnlabeled, algorithm):
numberForTraining = int (0.8 * numberUsedAll)
numberForTesting = int (0.2 * numberUsedAll)
input_list, input_score = functions.readTestComment(path, numberUsedAll)
tweets = functions.readManyStrings(pathTweet)
randomSelect = random.sample(xrange(len(input_score)), numberUsedAll)
input_list = [input_list[i] for i in randomSelect]
input_score = [input_score[i] for i in randomSelect]
filtered, freq_words = functions.useFilter(input_list, True)
f_tweets = functions.useFilter(tweets, False)
f_tweets = f_tweets[0:numberUnlabeled]
raw = functions.formRawDict(filtered, input_score)
df = pd.DataFrame(raw)
wordList = list(df.itertuples(index = False, name = None))
wordList = functions.filterZeroScore(wordList)
accuracy = []
support = []
not_support = []
for i in range(1,5):
print i
random.shuffle(wordList)
wordList = wordList[0:numberUsedAll]
trainingList = wordList[:numberForTraining]
testList = wordList[numberForTraining:]
if algorithm == 3:
accur, suppor, not_suppor = sklearnClassify.bayes(filtered, input_score, numberForTraining, 'BernoulliNB', f_tweets)
elif algorithm == 2:
accur, suppor, not_suppor = sklearnClassify.bayes(filtered, input_score, numberForTraining, 'MultinomialNB', f_tweets)
elif algorithm == 1:
accur, suppor, not_suppor = sklearnClassify.svm(filtered, input_score, numberForTraining, f_tweets)
accuracy.append(accur)
support.append(suppor)
not_support.append(not_suppor)
print ""
ac = str(sum(accuracy)/len(accuracy))
sp = str(sum(support)/len(support))
nsp = str(sum(not_support)/len(not_support))
print ac + " " + sp + " " + nsp
return ac, sp, nsp
|
from django.db.models import (
CharField,
IntegerField,
Model,
)
from gbetext import day_of_week
class EmailFrequency(Model):
email_type = CharField(max_length=128)
weekday = IntegerField(choices=day_of_week)
class Meta:
app_label = "gbe"
|
# -- coding: utf-8 --
'''
===========================================================================
-- PROJECT NAME : Traxium
-- SOURCE FILE : lista_arb_fis.py
-- REVISION : 1.1
-- AUTHOR :
-- LAST UPDATE : Mayo 6, 2020
--===========================================================================
-- DESCRIPTION : Metodo que lista todos los arboles fisicos existentes
--===========================================================================
'''
import sys, os
import datetime, logging
sys.path.insert(0, "/home/sistema/clases")
from clsSession import Session
import json
from ARB_FISICO import ARB_FISICO
from ARB_LOGICO import ARB_LOGICO
import validations
rutalog="/home/sistema/log/Traxium"
def application(environ, start_response):
try:
coo = ""
jsdato = ""
status = "200 OK"
try:
dataIP = environ["HTTP_X_FORWARDED_FOR"].split(",")[-1].strip()
except KeyError:
dataIP = environ["REMOTE_ADDR"]
s = Session()
cookie = environ.get("HTTP_COOKIE", 0)
tk = s.getCookie(cookie, "token")
s.setToken(tk)
datosB = s.getCookie(cookie, "dato")
len_datosB = len(datosB)
datosC = json.loads(datosB[1:(len_datosB-1)])
if environ['REQUEST_METHOD'] != 'GET':
#status = "405 Method Not Allowed"
raise validations.HttpException(405)
if s.valToken(tk) and s.valIp(tk, str(dataIP)):
jsdato = s.get_Datos_Usu(str(tk))
diccionario = ARB_FISICO.consultar_lista()
for elem in diccionario:
new_key = "arb_id"
old_key = "fis_id"
elem[new_key] = elem.pop(old_key)
new_key = "arb_id_padre"
old_key = "fis_id_padre"
elem[new_key] = elem.pop(old_key)
new_key = "arb_desc"
old_key = "fis_desc"
elem[new_key] = elem.pop(old_key)
new_key = "arb_orden"
old_key = "fis_orden"
elem[new_key] = elem.pop(old_key)
elem.update({'tipo':'fisico'})
diccionario = ARB_LOGICO.consultar_lista()
for elem in diccionario:
new_key = "arb_id"
old_key = "log_id"
elem[new_key] = elem.pop(old_key)
new_key = "arb_id_padre"
old_key = "log_id_padre"
elem[new_key] = elem.pop(old_key)
new_key = "arb_desc"
old_key = "log_desc"
elem[new_key] = elem.pop(old_key)
new_key = "arb_orden"
old_key = "log_orden"
elem[new_key] = elem.pop(old_key)
elem.update({'tipo':'logico'})
if 'error' in diccionario :
status = "400 Bad Request"
# else:
# usu_id = s.get_id_Usu(str(tk))
# filename = os.path.basename(__file__).split('.')[0]
# obj_log = LOG_ACCIONES_USUARIO(log_usu_id=usu_id,log_desc ='Se obtiene la lista de arb_fisico',log_acc_id = 460)
# resp_log = obj_log.guardar_dato()
# if resp_log[0] == 'error':
# mensaje = s.mensaje_error(datosC['idioma'],103)
# diccionario['result'] = "failed"
# diccionario['error'] = "Sucedio un error"
# diccionario['error_cod'] = 103
# status = "400 Bad Request"
# diccionario['val_errors'] = str(mensaje[1][0][0])
else:
if s.valToken(tk) :
cod_error = 100
else :
cod_error = 101
mensaje = s.mensaje_error(datosC['idioma'],cod_error)
status = "401 Unauthorized"
diccionario = {}
diccionario["result"] = "failed"
diccionario["error"] = "Sucedio un error cookie"
diccionario["error_cod"] = cod_error
diccionario["val_errors"] = str(mensaje[1][0][0])
except validations.HttpException as e:
diccionario = {}
mensaje = s.mensaje_error(datosC['idioma'],51)
diccionario["result"] = "failed"
diccionario["error_cod"] = 51
diccionario["error"] = "Sucedio un error"
diccionario["val_errors"] = str(mensaje[1][0][0])
status = e.status_code
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
diccionario = {}
diccionario["result"] = "failed"
diccionario["error"] = "Sucedio un error"
diccionario["error_cod"] = 50
try :
mensaje = s.mensaje_error(datosC['idioma'],50)
diccionario["val_errors"] = str(mensaje[1][0][0])
except:
diccionario["val_errors"] = 'error de python'
status = "500 Internal Server Error"
datoError = str(e)+' - '+str(exc_type)+' - '+str(fname)+' - '+str(exc_tb.tb_lineno)
now = datetime.datetime.now()
fecha= datetime.date.today()
current_time = now.strftime("%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('__name__')
logger.setLevel(logging.ERROR)
nombre_log= rutalog+'_'+str(fecha)+'.log'
fh = logging.FileHandler(nombre_log)
fh.setLevel(logging.ERROR)
logger.addHandler(fh)
logger.error("Error: "+str(current_time) + datoError)
preoutput = json.dumps(diccionario)
output = bytes(preoutput, "utf-8")
cook = 'dato="' + str(jsdato) + '" ;path=/'
headers = [
('Content-Type', 'application/json'),
("Access-Control-Allow-Origin", "http://localhost:4200"),
("Access-Control-Allow-Credentials", "true"),
("set-cookie", cook),
]
start_response(status, headers)
return [output]
|
def if_neutral_planet_available(state):
return any(state.not_my_planets())
def have_largest_fleet(state):
return sum(planet.num_ships for planet in state.my_planets()) \
+ sum(fleet.num_ships for fleet in state.my_fleets()) \
> sum(planet.num_ships for planet in state.enemy_planets()) \
+ sum(fleet.num_ships for fleet in state.enemy_fleets())
def have_smallest_fleet(state):
return sum(planet.num_ships for planet in state.my_planets()) \
+ sum(fleet.num_ships for fleet in state.my_fleets()) \
<= sum(planet.num_ships for planet in state.enemy_planets()) \
+ sum(fleet.num_ships for fleet in state.enemy_fleets())
def enemy_attacks(state):
for planet in state.my_planets():
for fleet in state.enemy_fleets():
if fleet.destination_planet == planet.ID:
if planet.num_ships + state.distance(planet.ID, fleet.source_planet)*planet.growth_rate < fleet.num_ships:
return True
return False |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the artifact definitions validator."""
import glob
import os
import unittest
from artifacts import errors
from tools import validator
class ArtifactDefinitionsValidatorTest(unittest.TestCase):
"""Class to test the validator."""
def testArtifactDefinitionsValidator(self):
"""Runs the validator over all the YAML artifact definitions files."""
validator_object = validator.ArtifactDefinitionsValidator()
for definitions_file in glob.glob(os.path.join('definitions', '*.yaml')):
result = validator_object.CheckFile(definitions_file)
self.assertTrue(result, msg='in definitions file: {0:s}'.format(
definitions_file))
missing = (validator_object.artifact_name_references -
validator_object.defined_artifact_names)
if missing:
raise errors.MissingDependencyError(
"Meta artifacts reference undefined artifacts: %s" % missing)
# TODO: add tests that deliberately provide invalid definitions to see
# if the validator works correctly.
if __name__ == '__main__':
unittest.main()
|
# -*- encoding: utf-8 -*-
class GenericFilter:
def filter_queryset(self, request, queryset, view):
filter_fields = getattr(view, "filter_fields")
kw = {}
for field in filter_fields:
value = request.query_params.get(field)
if not value:
continue
kw[field] = value
return queryset.filter(**kw)
|
import os
import os.path
from datetime import datetime
from config_parse import Deleter_config
CONF_WAY = "./bc_deleter.conf"
DIR_WATCHERS = []
class ConfigDirError(Exception):
pass
class NonPositiveCountError(ValueError):
pass
class Dir_watcher:
def __init__(self, dir, num):
if not os.path.exists(dir):
raise ConfigDirError("Неверный параметр, папки %s не существует " % dir)
if num < 0:
raise NonPositiveCountError("Вы указали отрицательное количество файлов в папке %s" % dir)
self.__dir = dir
self.__num = num
def getfilelist(self):
lst_f = []
for f in os.listdir(self.__dir):
f = os.path.join(self.__dir,f)
if os.path.isfile(f):
lst_f.append((f,os.path.getctime(f)))
lst_f.sort(key=lambda x: x[1], reverse=True)
return lst_f
def files_for_delete(self):
return [f[0] for f in self.getfilelist()[self.__num:]]
class Deleter:
def __init__(self):
self.__DIR_WATCHERS = []
deleter_config = Deleter_config(CONF_WAY)
if deleter_config.isError():
raise ConfigDirError("Неверное значение параметра папки :" + \
deleter_config.get_err_description()[1] + " : " + deleter_config.get_err_description()[2])
deleter_config_dirs = deleter_config.get_dirs()
for d in deleter_config_dirs:
self.__DIR_WATCHERS.append(Dir_watcher(d,deleter_config_dirs[d]))
def get_list_dir_watches(self):
return self.__DIR_WATCHERS
def get_list_files_for_delete(self):
files = []
for dw in self.__DIR_WATCHERS:
files += dw.files_for_delete()
return files
def delete_old_files(self):
for f in self.get_list_files_for_delete():
os.remove(f)
if __name__ == '__main__':
try:
deleter = Deleter()
dlst = deleter.get_list_dir_watches()
for d in dlst:
d.getfilelist()
print(d.files_for_delete())
except ConfigDirError as cerr:
print(cerr)
#deleter.delete_old_files()
|
# # class Rectangle:
# # def __init__(self, b):
# # self.a =5
# # self.b = b
# # def perimeter(self):
# # return 2 * (self.a + self.b)
# # rec1 = Rectangle(2)
# # print(rec1.perimeter())
# class Pryamougolnik:
# def __init__(self, s1, s2):
# self.a = s1
# self.b = s2
# def perimeter(self):
# return 2 * (self.a + self.b)
# def area(self):
# return self.a * self.b
# rec1 = Pryamougolnik(5,4)
# print(rec1.perimeter())
# print(rec1.area())
# rec2 = Pryamougolnik(5,2)
# print(rec2.perimeter())
# print(rec2.area())
class Node:
def __init__(self, val, l = None, r = None):
self.val = val
self.left = l
self.right = r
# root = Node(2)
# root.left = Node(7)
# root.right = Node(5)
# print(root.val)
# print(root.left.val, root.right.val)
def insertToNode(values):
if not root: return Node(values)
elif root.val <= val:
root.right = insertToNode(root.right, val)
else:
root.left = insertToNode(root.left, val)
return root
values = [7,2,5]
|
"""Given a list of tuples featuring names and grades on a test,
write a function normalize_grades to normalize the values of the grades to a linear scale between 0 and 1."""
import numpy as np
def normalize_grades(tuples):
low_value = min(x[1] for x in tuples)
high_value = max(x[1] for x in tuples)
return [(x[0], (x[1]-low_value)/(high_value-low_value)) for x in tuples]
def z_normalize_grades(tuples):
values = [x[1] for x in tuples]
mean = sum(values) / len(values)
differences = [(value - mean)**2 for value in values]
sum_of_differences = sum(differences)
standard_deviation = (sum_of_differences / (len(values) - 1)) ** 0.5
# standard_deviation = np.std([values], ddof=1)
return [(x[0], (x[1]-mean)/standard_deviation) for x in tuples]
tuples = [
('Jason', 94),
('Tessa', 80),
('Carla', 38),
('Matt', 43),
('Jessica', 100)
]
print(normalize_grades(tuples))
print(z_normalize_grades(tuples))
|
# -*- coding:utf-8 -*-
import os
import time
import unittest
from BeautifulReport import BeautifulReport
from selenium import webdriver
from zhangjinjin.render.case.common import *
class studentTest(unittest.TestCase):
def setUp(self):
global driver
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
options.add_argument("--no-sandbox")
options.add_argument('--disable-gpu')
options.add_argument('window-size=1920x3000')
driver = webdriver.Chrome(options=options)
def tearDown(self):
try:
driver.close()
driver.quit()
except:
traceback.print_exc()
print("close fail")
def save_img(self, img_name):
driver.get_screenshot_as_file('{}/{}.png'.format(getimg(), img_name))
@BeautifulReport.add_test_img('test_student')
def test_student(self):
"""student:酷培学生端登录"""
project = "student"
try:
new_tab_cn(driver, project)
is_element_exist(driver, '//*[@id="root"]/div/div/div/div[1]', project + '登录页')
obvious_wait_click(driver, '//*[@id="root"]/div/div/div/div[1]/div[2]/div/a', project + '密码登录')
mobil_xpath = '//*[@id="mobile"]'
code_xpath = '//*[@id="password"]'
btn_xpath = '//*[@class="ant-row ant-form-item"]/div/div/span/button'
branch = is_branch_exist(driver, '//*[@id="root"]/div/div/div/div[1]/div[2]/form ', project)
if branch == 'true':
obvious_wait_click(driver, '//*[@id="branch"]/div/span/i', '选择校区')
obvious_wait_click(driver, '/html/body/div[3]/div/div/div/ul/li[1]', '选择总校区')
btn_xpath = '//*[@class="ant-row ant-form-item"]/div/div/span/button'
else:
btn_xpath = '//*[@class="ant-form-item-children"]/button/span'
# driver.find_element_by_css_selector('div.ant-form-item-control>span>button').click()
# driver.find_element_by_xpath(mobil_xpath).send_keys('18300000001')
# driver.find_element_by_xpath(code_xpath).send_keys('000001')
# driver.find_element_by_css_selector('div.ant-form-item-control>span>button').click()
login(driver, mobil_xpath, code_xpath, btn_xpath, project, "18300000001", "000001")
# 很奇怪无痕模式第一次登录时,需要点击点击两次登录才会进入系统
driver.find_element_by_xpath(btn_xpath).click()
result = is_element_exist(driver, '//*[@id="root"]/section/header/img', "酷培ai学生端")
except:
traceback.print_exc()
finally:
self.assertEqual(result, 'true')
if result=='false':
is_fail() |
# """
# This is Master's API interface.
# You should not implement it, or speculate about its implementation
# """
# class Master:
# def guess(self, word: str) -> int:
# TAGS heuristic, interactive
#
# First see there's no way for this to work always in less than 10 attempts.
# Consider aaaaa bbbbb cccccc ddddddd eeeee... no words give us any information
#
#
# Structure is simple: guess a word, and reduce candidate list based on some
# heuristics
#
# At least, we can remove the guess word so this always converges.
#
# TRICK If w has k matches, then any possible solution has EXACTLY k matches with w.
#
# Then one can furthermore improve the heuristic. Among all candidates, choose one that
# has a big "family" (big number of words that share letters with it).
#
# See this nice analysis of the problem
# https://leetcode.com/problems/guess-the-word/discuss/133862/Random-Guess-and-Minimax-Guess-with-Comparison
#
import random
def match(word1, word2):
return sum(a == b for a, b in zip(word1, word2))
class Solution:
def findSecretWord(self, wordlist: List[str], master: 'Master') -> None:
candidates = wordlist
for _ in range(10):
# TODO we could do better here
# consider a guess where [w | match(guess, w) == 0] is small
# or a guess such that [w | match(guess, w) = i] is well balanced
# see different heuristics
guess = random.choice(candidates)
match_num = master.guess(guess)
print(guess, match_num)
if match_num == 6:
break
candidates = [w for w in candidates if match(w, guess) == match_num]
|
from bs4 import BeautifulSoup
import requests
import csv
import re
import os
sku_i_need = []
with open('./skus_lambda.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=",")
for row in reader:
sku_i_need.append(row[0])
file_name = './lambdatek/'
new_row = []
for i in range(1, 4):
result = requests.get(
'https://www.lambda-tek.com/shop/?region=GB&catid=0&searchString=Maintenance%20Kit&show=&itemsperpage=400&page=' + str(i))
content = result.content
status = result.status_code
soup = BeautifulSoup(content, 'html.parser')
for elem in soup.find_all(class_='valignl'):
sku = elem.get_text().split('Mfr Num: ')[1].split('\n')[0]
if sku in sku_i_need:
prod = {}
result = requests.get(elem.find_all(
'a')[0]['href']+'&viewSpec=y#product-view')
content = result.content
status = result.status_code
soup = BeautifulSoup(content, 'html.parser')
try:
prod['long_name'] = soup.find_all('h4', class_='prodsubtitle')[
0].get_text().strip()
except:
continue
try:
prod['img'] = 'img : ' + \
soup.find_all(
'img', class_='img-responsive product-imagePr')[0]['src']
except:
img = "img : unknown"
try:
for pdt in soup.find_all('ul', class_='prodDetails')[0].find_all('li'):
key = pdt.get_text().split(':')[0].strip()
val = pdt.get_text().split(':')[1].strip()
prod[key] = val
except:
pass
try:
for table in soup.find_all('table', class_='table-striped'):
for tr in table.find_all('tr'):
if len(tr.find_all('td')) == 2:
key = tr.find_all('td')[0].get_text().strip()
val = tr.find_all('td')[1].get_text().strip()
prod[key] = val
except:
pass
new_row.append(prod)
break
for row in new_row:
print(row)
print(row.keys())
print(row.values())
# with open('./lambda-mk.csv', 'a') as csvfile:
# writer = csv.DictWriter(csvfile, delimiter=",")
# writer.writerow(prod)
# pass
|
with open('Chapter 9.py',encoding='gb18030',errors='ignore') as file_object:
contents = file_object.read()
filename = 'Chapter 9.txt'
with open(filename,"w") as file_object:
file_object.write(contents)
words = file_object.split()
words_number = len(words)
print("The text has " + str(words_number) + " words.") |
from rest_framework import serializers
from workouts.models import Workout, Exercise, Set
class SetSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(required=False)
class Meta:
model = Set
fields = ('id', 'reps', 'weight')
class ExerciseSerializer(serializers.ModelSerializer):
sets = SetSerializer(many=True, read_only=True)
id = serializers.IntegerField(required=False)
class Meta:
model = Exercise
fields = ('id', 'name', 'sets')
class WorkoutSerializer(serializers.ModelSerializer):
exercises = ExerciseSerializer(many=True, required=False, read_only=True)
account = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = Workout
fields = ('id', 'date', 'notes', 'account', 'exercises') |
from app.models import User,Student,Role, Bank_Account, His
import flask
from app import app
from flask import json, render_template, request, session, Response,jsonify,redirect,url_for,flash,make_response
from app.database import mysql_db
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import date
@app.route('/requestpayment',methods=['GET','POST'])
def requestpayment():
if request.method == "POST":
totalcost = request.json.get("totalcost")
status = request.json.get("status")
orderDatetime = request.json.get("orderDatetime")
paymentUser = request.json.get("paymentUser")
password = request.json.get("password")
puser_email = User.User.query.filter(User.User.email == paymentUser)
puser = puser_email.first()
if puser is not None and check_password_hash(puser.password_hash, password):
bauser = puser.balance
new_balance = bauser - totalcost
dateStr = date.today().strftime("%d/%m/%Y")
payid = 'pm' + orderDatetime
his = His.His(paymentUser, dateStr, paymentUser, payid, totalcost)
mysql_db.session.add(his)
if new_balance < 0:
return make_response(jsonify({"status":"fail","message":"Not enough balance"}))
puser_email.update(dict(balance=new_balance))
mysql_db.session.commit()
return make_response(jsonify({"paymentUser": paymentUser , "orderDatetime" : orderDatetime, "status" : "paid", "totalcost" : totalcost, "message" : "payment success"}))
return make_response(jsonify({"status":"fail","message":"Username or Password invalid"}))
|
'''
Conceptual Aircraft Design Tool
(for PRJ-22 and AP-701 courses)
Cap. Eng. Ney Rafael Secco (ney@ita.br)
Aircraft Design Department
Aeronautics Institute of Technology
07-2021
The code uses several historical regression from
aircraft design books to make a quick initial
sizing procedure.
Generally, the user should call only the 'analyze'
function from this module.
'''
# IMPORTS
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# CONSTANTS
ft2m = 0.3048
kt2ms = 0.514444
lb2N = 4.44822
nm2m = 1852.0
gravity = 9.81
#========================================
# MAIN FUNCTION
def analyze(airplane = None,
print_log = False, # Plot results on the terminal screen
plot = False, # Generate 3D plot of the aircraft
W0_guess = None, # Guess for MTOW [N]
T0_guess = None, # Guess for Takeoff total thrust [N]
):
'''
This is the main function that should be used for aircraft analysis.
'''
# Load standard airplane if none is provided
if airplane is None:
airplane = standard_airplane()
# Use an average wing loading for transports
# to estime W0_guess and T0_guess if none are provided
if W0_guess is None:
W0_guess = 5e3*airplane['S_w']
if T0_guess is None:
T0_guess = 0.3*W0_guess
### ADD CODE FROM SECTION 4.3 HERE ###
if print_log:
print('We', airplane['We']/gravity)
print('Wf', airplane['Wf']/gravity)
print('W0', airplane['W0']/gravity)
print('T0', airplane['T0']/gravity)
print('T0/W0', airplane['T0']/airplane['W0'])
print('W0/S', airplane['W0']/airplane['S_w'])
print('deltaS_wlan', airplane['deltaS_wlan'])
print('xcg_fwd', airplane['xcg_fwd'])
print('xcg_aft', airplane['xcg_aft'])
print('xnp', airplane['xnp'])
print('SM_fwd', airplane['SM_fwd'])
print('SM_aft', airplane['SM_aft'])
print('b_tank_b_w', airplane['b_tank_b_w'])
print('CLv', airplane['CLv'])
print('frac_nlg_fwd', airplane['frac_nlg_fwd'])
print('frac_nlg_aft', airplane['frac_nlg_aft'])
print('alpha_tipback [deg]', airplane['alpha_tipback']*180.0/np.pi)
print('alpha_tailstrike [deg]', airplane['alpha_tailstrike']*180.0/np.pi)
print('phi_overturn [deg]', airplane['phi_overturn']*180.0/np.pi)
if plot:
plot3d(airplane)
return airplane
#========================================
# DISCIPLINE MODULES
def geometry(airplane):
# Unpack dictionary
S_w = airplane['S_w']
AR_w = airplane['AR_w']
taper_w = airplane['taper_w']
sweep_w = airplane['sweep_w']
dihedral_w = airplane['dihedral_w']
xr_w = airplane['xr_w']
zr_w = airplane['zr_w']
Cht = airplane['Cht']
AR_h = airplane['AR_h']
taper_h = airplane['taper_h']
sweep_h = airplane['sweep_h']
dihedral_h = airplane['dihedral_h']
Lc_h = airplane['Lc_h']
zr_h = airplane['zr_h']
Cvt = airplane['Cvt']
AR_v = airplane['AR_v']
taper_v = airplane['taper_v']
sweep_v = airplane['sweep_v']
Lb_v = airplane['Lb_v']
zr_v = airplane['zr_v']
### ADD CODE FROM SECTION 3.1 HERE ###
# Update dictionary with new results
airplane['b_w'] = b_w
airplane['cr_w'] = cr_w
airplane['xt_w'] = xt_w
airplane['yt_w'] = yt_w
airplane['zt_w'] = zt_w
airplane['ct_w'] = ct_w
airplane['xm_w'] = xm_w
airplane['ym_w'] = ym_w
airplane['zm_w'] = zm_w
airplane['cm_w'] = cm_w
airplane['S_h'] = S_h
airplane['b_h'] = b_h
airplane['xr_h'] = xr_h
airplane['cr_h'] = cr_h
airplane['xt_h'] = xt_h
airplane['yt_h'] = yt_h
airplane['zt_h'] = zt_h
airplane['ct_h'] = ct_h
airplane['xm_h'] = xm_h
airplane['ym_h'] = ym_h
airplane['zm_h'] = zm_h
airplane['cm_h'] = cm_h
airplane['S_v'] = S_v
airplane['b_v'] = b_v
airplane['xr_v'] = xr_v
airplane['cr_v'] = cr_v
airplane['xt_v'] = xt_v
airplane['zt_v'] = zt_v
airplane['ct_v'] = ct_v
airplane['xm_v'] = xm_v
airplane['zm_v'] = zm_v
airplane['cm_v'] = cm_v
# All variables are stored in the dictionary.
# There is no need to return anything
return None
#----------------------------------------
def aerodynamics(Mach, altitude, n_engines_failed, flap_def, slat_def,
lg_down, h_ground, W0_guess, airplane, method=2):
'''
method: 1 or 2 -> Method 1 applies a single friction coefficient
to the entire wetted area of the aircraft (based on Howe).
Method 2 is more refined since it computes friction and
form factors for each component.
'''
# c_flap_c_wing: extended total chord/ retracted total chord
# Wetted areas from Torenbeek's Appendix B
# Unpacking dictionary
S_w = airplane['S_w']
AR_w = airplane['AR_w']
cr_w = airplane['cr_w']
ct_w = airplane['ct_w']
taper_w = airplane['taper_w']
sweep_w = airplane['sweep_w']
tcr_w = airplane['tcr_w']
tct_w = airplane['tct_w']
b_w = airplane['b_w']
cm_w = airplane['cm_w']
clmax_w = airplane['clmax_w']
S_h = airplane['S_h']
cr_h = airplane['cr_h']
ct_h = airplane['ct_h']
taper_h = airplane['taper_h']
sweep_h = airplane['sweep_h']
tcr_h = airplane['tcr_h']
tct_h = airplane['tct_h']
b_h = airplane['b_h']
cm_h = airplane['cm_h']
S_v = airplane['S_v']
cr_v = airplane['cr_v']
ct_v = airplane['ct_v']
taper_v = airplane['taper_v']
sweep_v = airplane['sweep_v']
tcr_v = airplane['tcr_v']
tct_v = airplane['tct_v']
b_v = airplane['b_v']
cm_v = airplane['cm_v']
L_f = airplane['L_f']
D_f = airplane['D_f']
L_n = airplane['L_n']
D_n = airplane['D_n']
n_engines = airplane['n_engines']
n_engines_under_wing = airplane['n_engines_under_wing']
max_flap_def = max(airplane['TO_flap_def'], airplane['LD_flap_def'])
flap_type = airplane['flap_type']
c_flap_c_wing = airplane['c_flap_c_wing']
b_flap_b_wing = airplane['b_flap_b_wing']
max_slat_def = max(airplane['TO_slat_def'], airplane['LD_slat_def'])
slat_type = airplane['slat_type']
c_slat_c_wing = airplane['c_slat_c_wing']
b_slat_b_wing = airplane['b_slat_b_wing']
k_exc_drag = airplane['k_exc_drag']
# Default rugosity value (smmoth paint from Raymer Tab 12.5)
rugosity = 0.634e-5
### WING
# Average t/c
tc_avg = 0.5*(tcr_w + tct_w)
#Exposed Area
Sexp = S_w - cr_w*D_f
#Wetted Area
tau = tcr_w/tct_w
Swet_w = 2*Sexp*(1 + 0.25*tcr_w*(1 + tau*taper_w)/(1 + taper_w))
# Friction coefficient
Cf_w = Cf_calc(Mach, altitude,
length = cm_w,
rugosity = rugosity,
k_lam = 0.05)
# Form factor
FF_w = FF_surface(Mach, tcr_w, tct_w, sweep_w, b_w, cr_w, ct_w, cm_w)
# Interference factor
Q_w = 1.0
# Drag coefficient
CD0_w = Cf_w*FF_w*Q_w*Swet_w/S_w
### HORIZONTAL TAIL
#Exposed Area
Sexp = S_h
#Wetted Area
tau = tcr_h/tct_h
Swet_h = 2*Sexp*(1 + 0.25*tcr_h*(1 + tau*taper_h)/(1 + taper_h))
# Friction coefficient
Cf_h = Cf_calc(Mach, altitude,
length = cm_h,
rugosity = rugosity,
k_lam = 0.05)
# Form factor
FF_h = FF_surface(Mach, tcr_h, tct_h, sweep_h, b_h, cr_h, ct_h, cm_h)
# Interference factor
Q_h = 1.0
# Drag coefficient
CD0_h = Cf_h*FF_h*Q_h*Swet_h/S_w
### VERTICAL TAIL
#Exposed Area
Sexp = S_v
#Wetted Area
tau = tcr_v/tct_v
Swet_v = 2*Sexp*(1 + 0.25*tcr_v*(1 + tau*taper_v)/(1 + taper_v))
# Friction coefficient
Cf_v = Cf_calc(Mach, altitude,
length = cm_v,
rugosity = rugosity,
k_lam = 0.05)
# Form factor
FF_v = FF_surface(Mach, tcr_v, tct_v, sweep_v, 2*b_v, cr_v, ct_v, cm_v)
# Interference factor
Q_v = 1.0
# Drag coefficient
CD0_v = Cf_v*FF_v*Q_v*Swet_v/S_w
### FUSELAGE
# Wetted area
lambda_fus = L_f/D_f
Swet_f = np.pi*D_f*L_f*(1 - 2/lambda_fus)**(2.0/3.0)*(1 + 1/lambda_fus**2)
# Friction coefficient
Cf_f = Cf_calc(Mach, altitude,
length = L_f,
rugosity = rugosity,
k_lam = 0.05)
# Form factor
FF_f = 1 + 60/lambda_fus**3 + lambda_fus/400
# Interference factor
Q_f = 1.0
# Drag coefficient
CD0_f = Cf_f*FF_f*Q_f*Swet_f/S_w
### NACELLE
# Wetted area (where we take the number of nacelles into account)
Swet_n = n_engines*np.pi*D_n*L_n
# Friction coefficient
Cf_n = Cf_calc(Mach, altitude,
length = L_n,
rugosity = rugosity,
k_lam = 0.05)
# Form factor
lambda_n = L_n/D_n
FF_n = 1 + 0.35/lambda_n
# Interference factor
Q_n = 1.2
# Drag coefficient
CD0_n = Cf_n*FF_n*Q_n*Swet_n/S_w
### VISCOUS DRAG
if method == 1:
# Total wetted area
Swet = Swet_w + Swet_h + Swet_v + Swet_f + Swet_n
# Wetted area ratio
Sr = Swet/S_w
# t/c correction
tau = (Sr-2)/Sr + 1.9/Sr*(1 + 0.526*(4*tc_avg)**3)
# Other parameters for jet aircraft
Af = 0.93
clam = 0.05
Tf = 1.1
# Friction coefficient (Howe Eq 6.13)
Cfe = 0.005*(1-2*clam/Sr)*tau*(1 - 0.2*Mach + 0.12*(Mach*np.sqrt(np.cos(sweep_w))/(Af - tc_avg))**20)*Tf*S_w**(-0.1)
# Viscous drag
CD0 = Cfe*Swet/S_w
elif method == 2:
# Add all drag coefficients
CD0 = CD0_w + CD0_h + CD0_v + CD0_f + CD0_n
### INDUCED
# Oswald Factor (Howe Eq 6.14)
f_taper = 0.005*(1 + 1.5*(taper_w - 0.6)**2)
e = 1/(1 + 0.12*Mach**6)/(1 + (0.142 + AR_w*(10*tc_avg)**0.33*f_taper)/np.cos(sweep_w)**2 + 0.1*(3*n_engines_under_wing + 1)/(4 + AR_w)**0.8)
# Induced drag term
K = 1/np.pi/AR_w/e
### GROUND EFFECT
if h_ground > 0:
aux = 33*(h_ground/b_w)**1.5
Kge = aux/(1+aux) # Raymer Eq. 12.61
K = K*Kge
### Clean wing CLmax (Raymer Eq. 5.7)
CLmax_clean = 0.9*clmax_w*np.cos(sweep_w)
### Flaps deflection
ct_w = cr_w*taper_w
if max_flap_def > 0.0:
CD0_flap = 0.0023*b_flap_b_wing*flap_def*180/np.pi # Raymer Eq 12.37
sweep_flap=geo_change_sweep(0.25, 2-c_flap_c_wing, sweep_w, b_w/2, cr_w, ct_w)
if flap_type == 'plain':
dclmax = 0.9
elif flap_type == 'slotted':
dclmax = 1.3
elif flap_type == 'fowler':
dclmax = 1.3*c_flap_c_wing
elif flap_type == 'double slotted':
dclmax = 1.6*c_flap_c_wing
elif flap_type == 'triple slotted':
dclmax = 1.9*c_flap_c_wing
deltaCLmax_flap = dclmax*b_flap_b_wing*np.cos(sweep_flap)*flap_def/max_flap_def # Raymer Eq 12.21
else:
CD0_flap = 0.0
deltaCLmax_flap = 0.0
### Slats deflection
if max_slat_def > 0.0:
CD0_slat = 0.0023*b_slat_b_wing*slat_def*180/np.pi # Raymer Eq 12.37
sweep_slat=geo_change_sweep(0.25, c_slat_c_wing-1, sweep_w, b_w/2, cr_w, ct_w)
if slat_type == 'fixed':
dclmax = 0.2
elif slat_type == 'flap':
dclmax = 0.3
elif slat_type == 'kruger':
dclmax = 0.3
elif slat_type == 'slat':
dclmax = 0.4*c_slat_c_wing
deltaCLmax_slat = dclmax*b_slat_b_wing*np.cos(sweep_slat)*slat_def/max_slat_def # Raymer Eq 12.21
else:
CD0_slat = 0.0
deltaCLmax_slat = 0.0
# Maximum lift
CLmax = CLmax_clean + deltaCLmax_flap + deltaCLmax_slat
### Landing gear (ESDU)
lg_factor = (0.57 - 0.26*flap_def/max_flap_def)*1e-3
CD0_lg = lg_down*lg_factor*(W0_guess/gravity)**0.785/S_w
### Windmill engine
#Vn_V = 0.42
#CDwdm = (0.0785*D_n**2 + 1/(1 + 0.16*Mach**2)*np.pi/2*D_n**2*Vn_V*(1-Vn_V))/S_w
#CD0_wdm = n_engines_failed*CDwdm
CD0_wdm = n_engines_failed*0.3*np.pi/4*D_n**2/S_w # Raymer Eq 12.41
# Add all drag values found so far
CD0 = CD0 + CD0_flap + CD0_slat + CD0_lg + CD0_wdm
### Excrescence
CD0_exc = CD0*k_exc_drag/(1-k_exc_drag)
CD0 = CD0 + CD0_exc
### WAVE DRAG (Korn Equation)
# The CL may get unreasonably high values for low Mach numbers.
# This decreases Mdd, resulting in an unrealistic wave drag for
# low Mach numbers. So we add another condition to discard any
# wave drag below Mach 0.4
if Mach > 0.4:
# Estimate flight CL
T,p,rho,mi = atmosphere(altitude)
a = np.sqrt(1.4*287*T)
V = a*Mach
CL = 2*W0_guess/rho/V**2/S_w
Mach_dd = 0.95/np.cos(sweep_w) - tc_avg/np.cos(sweep_w)**2 - CL/10/np.cos(sweep_w)**3
Mach_crit = Mach_dd - (0.1/80)**(1/3)
if (Mach > Mach_crit):
CDw = 20*(Mach - Mach_crit)**4
else:
CDw = 0.0
else:
CDw = 0.0
CD0 = CD0 + CDw
# Update dictionary
airplane['Swet_f'] = Swet_f
return CD0, K, CLmax
#----------------------------------------
def engineTSFC(Mach, altitude, airplane):
# Unpack dictionary
BPR = airplane['BPR']
Cbase = airplane['Cbase']
### ADD CODE FROM SECTION 3.3 HERE ###
return C
#----------------------------------------
def empty_weight(W0_guess, T0_guess, airplane):
# Unpack dictionary
S_w = airplane['S_w']
AR_w = airplane['AR_w']
taper_w = airplane['taper_w']
sweep_w = airplane['sweep_w']
xm_w = airplane['xm_w']
cm_w = airplane['cm_w']
tcr_w = airplane['tcr_w']
S_h = airplane['S_h']
xm_h = airplane['xm_h']
cm_h = airplane['cm_h']
S_v = airplane['S_v']
xm_v = airplane['xm_v']
cm_v = airplane['cm_v']
L_f = airplane['L_f']
Swet_f = airplane['Swet_f']
n_engines = airplane['n_engines']
BPR = airplane['BPR']
x_n = airplane['x_n']
L_n = airplane['L_n']
x_nlg = airplane['x_nlg']
x_mlg = airplane['x_mlg']
### ADD CODE FROM SECTION 3.4 HERE ###
# Update dictionary
airplane['W_w'] = W_w
airplane['W_h'] = W_h
airplane['W_v'] = W_v
airplane['W_f'] = W_f
airplane['W_nlg'] = W_nlg
airplane['W_mlg'] = W_mlg
airplane['W_eng'] = W_eng_installed
airplane['W_allelse'] = W_allelse
return We, xcg_e
#----------------------------------------
def fuel_weight(W0_guess, airplane):
# Unpacking dictionary
S_w = airplane['S_w']
altitude_cruise = airplane['altitude_cruise']
Mach_cruise = airplane['Mach_cruise']
range_cruise = airplane['range_cruise']
loiter_time = airplane['loiter_time']
altitude_altcruise = airplane['altitude_altcruise']
Mach_altcruise = airplane['Mach_altcruise']
range_altcruise = airplane['range_altcruise']
### ADD CODE FROM SECTION 3.5 HERE ###
return Wf, Mf_cruise
#----------------------------------------
def weight(W0_guess, T0_guess, airplane):
# Unpacking dictionary
W_payload = airplane['W_payload']
W_crew = airplane['W_crew']
# Set iterator
delta = 1000
while abs(delta) > 10:
### ADD CODE FROM SECTION 3.6.4 HERE ###
return W0, We, Wf, Mf_cruise, xcg_e
#----------------------------------------
def performance(W0, Mf_cruise, airplane):
'''
This function computes the required thrust and wing areas
required to meet takeoff, landing, climb, and cruise requirements.
OUTPUTS:
T0: real -> Total thrust required to meet all mission phases
S_wlan: real -> Wing area required for landing. The wing area (S_w) should
be greater than this value.
'''
# Unpacking dictionary
S_w = airplane['S_w']
n_engines = airplane['n_engines']
BPR = airplane['BPR']
TO_flap_def = airplane['TO_flap_def']
LD_flap_def = airplane['LD_flap_def']
TO_slat_def = airplane['TO_slat_def']
LD_slat_def = airplane['LD_slat_def']
h_ground = airplane['h_ground']
altitude_takeoff = airplane['altitude_takeoff']
distance_takeoff = airplane['distance_takeoff']
altitude_landing = airplane['altitude_landing']
distance_landing = airplane['distance_landing']
MLW_frac = airplane['MLW_frac']
altitude_cruise = airplane['altitude_cruise']
Mach_cruise = airplane['Mach_cruise']
### ADD CODE FROM SECTION 3.7.3 TO SECTION 3.7.5 HERE ###
### CLIMB
# Define standard function for climb analysis
def climb_analysis(grad, Ks, altitude, CLmax_guess,
lg_down, h_ground_climb, flap_def, slat_def, n_engines_failed, Mf,
kT):
'''
We need a guess for CLmax just to get an approximate drag polar for
speed computation. We will get the correct CLmax from the aerodynamics module
kT: Thrust decay factor (e.g. use 0.94 for maximum continuous thrust)
'''
### ADD CODE FROM SECTION 3.7.6 HERE ###
return T0
### CONTINUE THE CODE FROM SECTION 3.7.6 HERE ###
### ADD CODE FROM SECTION 3.7.7 HERE ###
return T0, T0vec, deltaS_wlan, CLmaxTO
#----------------------------------------
def thrust_matching(W0_guess, T0_guess, airplane):
### ADD CODE FROM SECTION 3.8 HERE ###
# Update dictionary
airplane['W0'] = W0
airplane['We'] = We
airplane['Wf'] = Wf
airplane['xcg_e'] = xcg_e
airplane['T0'] = T0
airplane['T0vec'] = T0vec
airplane['deltaS_wlan'] = deltaS_wlan
airplane['CLmaxTO'] = CLmaxTO
# Return
return None
#----------------------------------------
def balance(airplane):
# Unpack dictionary
W0 = airplane['W0']
W_payload = airplane['W_payload']
xcg_payload = airplane['xcg_payload']
W_crew = airplane['W_crew']
xcg_crew = airplane['xcg_crew']
We = airplane['We']
xcg_e = airplane['xcg_e']
Wf = airplane['Wf']
Mach_cruise = airplane['Mach_cruise']
S_w = airplane['S_w']
AR_w = airplane['AR_w']
sweep_w = airplane['sweep_w']
b_w = airplane['b_w']
xr_w = airplane['xr_w']
cr_w = airplane['cr_w']
ct_w = airplane['ct_w']
xm_w = airplane['xm_w']
cm_w = airplane['cm_w']
tcr_w = airplane['tcr_w']
tct_w = airplane['tct_w']
c_tank_c_w = airplane['c_tank_c_w']
x_tank_c_w = airplane['x_tank_c_w']
S_h = airplane['S_h']
AR_h = airplane['AR_h']
sweep_h = airplane['sweep_h']
b_h = airplane['b_h']
cr_h = airplane['cr_h']
ct_h = airplane['ct_h']
xm_h = airplane['xm_h']
cm_h = airplane['cm_h']
eta_h = airplane['eta_h']
Cvt = airplane['Cvt']
L_f = airplane['L_f']
D_f = airplane['D_f']
y_n = airplane['y_n']
T0 = airplane['T0']
n_engines = airplane['n_engines']
CLmaxTO = airplane['CLmaxTO']
rho_f = airplane['rho_f']
### ADD CODE FROM SECTION 3.9 HERE ###
# Update dictionary
airplane['xcg_fwd'] = xcg_fwd
airplane['xcg_aft'] = xcg_aft
airplane['xnp'] = xnp
airplane['SM_fwd'] = SM_fwd
airplane['SM_aft'] = SM_aft
airplane['b_tank_b_w'] = b_tank_b_w
airplane['CLv'] = CLv
return None
#----------------------------------------
def landing_gear(airplane):
# Unpack dictionary
x_nlg = airplane['x_nlg']
x_mlg = airplane['x_mlg']
y_mlg = airplane['y_mlg']
z_lg = airplane['z_lg']
xcg_fwd = airplane['xcg_fwd']
xcg_aft = airplane['xcg_aft']
x_tailstrike = airplane['x_tailstrike']
z_tailstrike = airplane['z_tailstrike']
### ADD CODE FROM SECTION 3.10 HERE ###
# Update dictionary
airplane['frac_nlg_fwd'] = frac_nlg_fwd
airplane['frac_nlg_aft'] = frac_nlg_aft
airplane['alpha_tipback'] = alpha_tipback
airplane['alpha_tailstrike'] = alpha_tailstrike
airplane['phi_overturn'] = phi_overturn
return None
#========================================
# AUXILIARY FUNCTIONS
def plot3d(airplane, figname='3dview.png'):
'''
This function generates a 3D plot of the aircraft
'''
# Unpack dictionary
xr_w = airplane['xr_w']
zr_w = airplane['zr_w']
cr_w = airplane['cr_w']
xt_w = airplane['xt_w']
yt_w = airplane['yt_w']
zt_w = airplane['zt_w']
ct_w = airplane['ct_w']
xm_w = airplane['xm_w']
ym_w = airplane['ym_w']
zm_w = airplane['zm_w']
cm_w = airplane['cm_w']
xr_h = airplane['xr_h']
zr_h = airplane['zr_h']
cr_h = airplane['cr_h']
xt_h = airplane['xt_h']
yt_h = airplane['yt_h']
zt_h = airplane['zt_h']
ct_h = airplane['ct_h']
xm_h = airplane['xm_h']
ym_h = airplane['ym_h']
zm_h = airplane['zm_h']
cm_h = airplane['cm_h']
xr_v = airplane['xr_v']
zr_v = airplane['zr_v']
cr_v = airplane['cr_v']
xt_v = airplane['xt_v']
zt_v = airplane['zt_v']
ct_v = airplane['ct_v']
xm_v = airplane['xm_v']
zm_v = airplane['zm_v']
cm_v = airplane['cm_v']
L_f = airplane['L_f']
D_f = airplane['D_f']
x_n = airplane['x_n']
y_n = airplane['y_n']
z_n = airplane['z_n']
L_n = airplane['L_n']
D_n = airplane['D_n']
# Optional arguments
if 'xcg_fwd' in airplane:
xcg_fwd = airplane['xcg_fwd']
xcg_aft = airplane['xcg_aft']
else:
xcg_fwd = None
xcg_aft = None
if 'xnp' in airplane:
xnp = airplane['xnp']
else:
xnp = None
### PLOT
fig = plt.figure()
ax = fig.gca(projection='3d')
# Wing plot
ax.plot([xr_w, xt_w, xt_w+ct_w, xr_w+cr_w, xt_w+ct_w, xt_w, xr_w],
[0.0, yt_w, yt_w, 0.0, -yt_w, -yt_w, 0.0],
[zr_w, zt_w, zt_w, zr_w, zt_w, zt_w, zr_w])
# HT plot
ax.plot([xr_h, xt_h, xt_h+ct_h, xr_h+cr_h, xt_h+ct_h, xt_h, xr_h],
[0.0, yt_h, yt_h, 0.0, -yt_h, -yt_h, 0.0],
[zr_h, zt_h, zt_h, zr_h, zt_h, zt_h, zr_h])
# VT plot
ax.plot([xr_v, xt_v, xt_v+ct_v, xr_v+cr_v, xr_v],
[0.0, 0.0, 0.0, 0.0, 0.0],
[zr_v, zt_v, zt_v, zr_v, zr_v])
# Fuselage plot
ax.plot([0.0, L_f],
[0.0, 0.0],
[0.0, 0.0])
# Nacelle 1 plot
ax.plot([x_n, x_n+L_n],
[y_n, y_n],
[z_n, z_n])
# Nacelle 2 plot
ax.plot([x_n, x_n+L_n],
[-y_n, -y_n],
[z_n, z_n])
# Mean aerodynamic chords
ax.plot([xm_w, xm_w+cm_w],
[ym_w, ym_w],
[zm_w, zm_w],'g')
ax.plot([xm_h, xm_h+cm_h],
[ym_h, ym_h],
[zm_h, zm_h],'g')
ax.plot([xm_v, xm_v+cm_v],
[0.0, 0.0],
[zm_v, zm_v],'g')
# Center of gravity
if xcg_fwd is not None:
ax.plot([xcg_fwd, xcg_aft],
[0.0, 0.0],
[0.0, 0.0],'o')
# Neutral point
if xnp is not None:
ax.plot([xnp, xnp],
[0.0, 0.0],
[0.0, 0.0],'o')
# Create cubic bounding box to simulate equal aspect ratio
X = np.array([xr_w, xt_h+ct_h, xt_v+ct_v])
Y = np.array([-yt_w, yt_w])
Z = np.array([zt_w, zt_h, zt_v])
max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(X.max()+X.min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(Y.max()+Y.min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(Z.max()+Z.min())
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
# Set initial point of view
ax.view_init(45,-135)
# Save figure
plt.show()
fig.savefig(figname,dpi=300)
#----------------------------------------
def atmosphere(z, Tba=288.15):
'''
Funçao que retorna a Temperatura, Pressao e Densidade para uma determinada
altitude z [m]. Essa funçao usa o modelo padrao de atmosfera para a
temperatura no solo de Tba.
'''
# Zbase (so para referencia)
# 0 11019.1 20063.1 32161.9 47350.1 50396.4
# DEFINING CONSTANTS
# Earth radius
r = 6356766
# gravity
g0 = 9.80665
# air gas constant
R = 287.05287
# layer boundaries
Ht = [0, 11000, 20000, 32000, 47000, 50000]
# temperature slope in each layer
A = [-6.5e-3, 0, 1e-3, 2.8e-3, 0]
# pressure at the base of each layer
pb = [101325, 22632, 5474.87, 868.014, 110.906]
# temperature at the base of each layer
Tstdb = [288.15, 216.65, 216.65, 228.65, 270.65];
# temperature correction
Tb = Tba-Tstdb[0]
# air viscosity
mi0 = 18.27e-6 # [Pa s]
T0 = 291.15 # [K]
C = 120 # [K]
# geopotential altitude
H = r*z/(r+z)
# selecting layer
if H < Ht[0]:
raise ValueError('Under sealevel')
elif H <= Ht[1]:
i = 0
elif H <= Ht[2]:
i = 1
elif H <= Ht[3]:
i = 2
elif H <= Ht[4]:
i = 3
elif H <= Ht[5]:
i = 4
else:
raise ValueError('Altitude beyond model boundaries')
# Calculating temperature
T = Tstdb[i]+A[i]*(H-Ht[i])+Tb
# Calculating pressure
if A[i] == 0:
p = pb[i]*np.exp(-g0*(H-Ht[i])/R/(Tstdb[i]+Tb))
else:
p = pb[i]*(T/(Tstdb[i]+Tb))**(-g0/A[i]/R)
# Calculating density
rho = p/R/T
# Calculating viscosity with Sutherland's Formula
mi=mi0*(T0+C)/(T+C)*(T/T0)**(1.5)
return T,p,rho,mi
#----------------------------------------
def geo_change_sweep(x,y,sweep_x,panel_length,chord_root,chord_tip):
'''
This function converts sweep computed at chord fraction x into
sweep measured at chord fraction y
(x and y should be between 0 (leading edge) and 1 (trailing edge).
'''
sweep_y=sweep_x+np.arctan((x-y)*(chord_root-chord_tip)/panel_length)
return sweep_y
#----------------------------------------
def Cf_calc(Mach, altitude, length, rugosity, k_lam, Tba=288.15):
'''
This function computes the flat plate friction coefficient
for a given Reynolds number while taking transition into account
k_lam: float -> Fraction of the length (from 0 to 1) where
transition occurs
'''
# Dados atmosféricos
T, p, rho, mi = atmosphere(altitude, Tba)
# Velocidade
v = np.sqrt(1.4*287*T)*Mach
# Reynolds na transição
Re_conv = rho*v*k_lam*length/mi
Re_rug = 38.21*(k_lam*length/rugosity)**1.053
Re_trans = min(Re_conv, Re_rug)
# Reynolds no fim
Re_conv = rho*v*length/mi
Re_rug = 38.21*(length/rugosity)**1.053
Re_fim = min(Re_conv, Re_rug)
# Coeficientes de fricção
# Laminar na transição
Cf1 = 1.328/np.sqrt(Re_trans)
# Turbulento na transição
Cf2 = 0.455/(np.log10(Re_trans)**2.58*(1+0.144*Mach**2)**0.65)
# Turbulento no fim
Cf3 = 0.455/(np.log10(Re_fim)**2.58*(1+0.144*Mach**2)**0.65)
# Média
Cf = (Cf1 - Cf2)*k_lam + Cf3
return Cf
#----------------------------------------
def FF_surface(Mach, tcr, tct, sweep, b, cr, ct, cm, x_c_max_tc=0.4):
'''
This function computes the form factor for lifting surfaces
INPUTS
tcr: float -> Thickness/chord ratio at the root
tct: float -> Thickness/chord ratio at the tip
sweep: float -> Quarter-chord sweep angle [rad]
b: float -> Wing span (considering both sides. Double this value for vertical tails if necessary)
cr: float -> Root chord
ct: float -> Tip chord
cm: float -> Mean aerodynamic chord
x_c_max_tc: float -> Chord fraction with maximum thickness
'''
# Average chord fraction
t_c = (tcr + tct)/2
# Swee at maximum thickness position
sweep_maxtc=geo_change_sweep(0.25, x_c_max_tc, sweep, b/2, cr, ct)
# Form factor
FF = 1.34*Mach**0.18*np.cos(sweep_maxtc)**0.28*(1 + 0.6*t_c/x_c_max_tc/cm + 100*(t_c)**4)
return FF
#----------------------------------------
def standard_airplane():
'''
The standard parameters refer to the Fokker 100, but they could be redefined for
any new aircraft.
'''
airplane = {'S_w' : 93.5, # Wing area [m2]
'AR_w' : 8.43, # Wing aspect ratio
'taper_w' : 0.235, # Wing taper ratio
'sweep_w' : 17.45*np.pi/180, # Wing sweep [rad]
'dihedral_w' : 5*np.pi/180, # Wing dihedral [rad]
'xr_w' : 13.5, # Longitudinal position of the wing (with respect to the fuselage nose) [m]
'zr_w' : 0.0, # Vertical position of the wing (with respect to the fuselage nose) [m]
'tcr_w' : 0.123, # t/c of the root section of the wing
'tct_w' : 0.096, # t/c of the tip section of the wing
'Cht' : 0.94, # Horizontal tail volume coefficient
'Lc_h' : 4.83, # Non-dimensional lever of the horizontal tail (lever/wing_mac)
'AR_h' : 4.64, # HT aspect ratio
'taper_h' : 0.39, # HT taper ratio
'sweep_h' : 26*np.pi/180, # HT sweep [rad]
'dihedral_h' : 2*np.pi/180, # HT dihedral [rad]
'zr_h' : 4.359, # Vertical position of the HT [m]
'tcr_h' : 0.1, # t/c of the root section of the HT
'tct_h' : 0.1, # t/c of the tip section of the HT
'eta_h' : 1.0, # Dynamic pressure factor of the HT
'Cvt' : 0.088, # Vertical tail volume coefficient
'Lb_v' : 0.55, # Non-dimensional lever of the vertical tail (lever/wing_span)
'AR_v' : 1.27, # VT aspect ratio
'taper_v' : 0.74, # VT taper ratio
'sweep_v' : 41*np.pi/180, # VT sweep [rad]
'zr_v' : 0.0, # Vertical position of the VT [m]
'tcr_v' : 0.1, # t/c of the root section of the VT
'tct_v' : 0.1, # t/c of the tip section of the VT
'L_f' : 32.5, # Fuselage length [m]
'D_f' : 3.3, # Fuselage diameter [m]
'x_n' : 23.2, # Longitudinal position of the nacelle frontal face [m]
'y_n' : 2.6, # Lateral position of the nacelle centerline [m]
'z_n' : 0.0, # Vertical position of the nacelle centerline [m]
'L_n' : 4.3, # Nacelle length [m]
'D_n' : 1.5, # Nacelle diameter [m]
'n_engines' : 2, # Number of engines
'n_engines_under_wing' : 0, # Number of engines installed under the wing
'BPR' : 3.04, # Engine bypass ratio
'Cbase' : None, # Base engine TSFC [1/s] (use 'None' for Howe's values)
'x_nlg' : 3.6, # Longitudinal position of the nose landing gear [m]
'x_mlg' : 17.8, # Longitudinal position of the main landing gear [m]
'y_mlg' : 2.47, # Lateral position of the main landing gear [m]
'z_lg' : -2.0, # Vertical position of the landing gear [m]
'x_tailstrike' : 23.68, # Longitudinal position of critical tailstrike point [m]
'z_tailstrike' : -0.84, # Vertical position of critical tailstrike point [m]
'c_tank_c_w' : 0.4, # Fraction of the wing chord occupied by the fuel tank
'x_tank_c_w' : 0.2, # Fraction of the wing chord where fuel tank starts
'clmax_w' : 2.1, # Maximum lift coefficient of wing airfoil
'TO_flap_def' : 20*np.pi/180, # Takeoff flap deflection [rad]
'LD_flap_def' : 40*np.pi/180, # Landing flap deflection [rad]
'flap_type' : 'double slotted', # Flap type
'c_flap_c_wing' : 1.2, # chord_with_deflected_flaps/chord_with_retracted_flaps
'b_flap_b_wing' : 0.6, # Fraction of the wing span occupied by flaps
'TO_slat_def' : 0*np.pi/180, # Takeoff slat deflection [rad]
'LD_slat_def' : 0*np.pi/180, # Landing slat deflection [rad]
'slat_type' : 'slat', # Slat type
'c_slat_c_wing' : 1.05, # chord_with_deflected_slats/chord_with_retracted_slats
'b_slat_b_wing' : 0.75, # Fraction of the wing span occupied by slats
'h_ground' : 35.0*ft2m, # Distance to the ground for ground effect computation [m]
'k_exc_drag' : 0.03, # Excrescence drag factor
'altitude_takeoff' : 0.0, # Altitude for takeoff computation [m]
'distance_takeoff' : 1800.0, # Required takeoff distance [m]
'altitude_landing' : 0.0, # Altitude for landing computation [m]
'distance_landing' : 1800.0, # Required landing distance [m] (The actual Fokker100 distance is 1350 m but it is very restrictive compared to the historical regression. Therefore I kept the same TO distance since the aircraft should takeoff and land at the same runway)
'MLW_frac' : 38300/41500, # Max Landing Weight / Max Takeoff Weight
'altitude_cruise' : 35000*ft2m, # Cruise altitude [m]
'Mach_cruise' : 0.73, # Cruise Mach number
'range_cruise' : 1200*nm2m, # Cruise range [m]
'loiter_time' : 45*60, # Loiter time [s]
'altitude_altcruise' : 4572, # Alternative cruise altitude [m]
'Mach_altcruise' : 0.4, # Alternative cruise Mach number
'range_altcruise' : 200*nm2m, # Alternative cruise range [m]
'W_payload' : 107*91*gravity, # Payload weight [N]
'xcg_payload' : 14.4, # Longitudinal position of the Payload center of gravity [m]
'W_crew' : 5*91*gravity, # Crew weight [N]
'xcg_crew' : 2.5, # Longitudinal position of the Crew center of gravity [m]
'rho_f' : 804, # Fuel density kg/m3 (This is Jet A-1)
}
return airplane
|
#!/usr/bin/env python
# -*- utf-8 -*-
import sys
import xmlrpclib
s = xmlrpclib.ServerProxy('http://localhost:8000')
if len(sys.argv) != 4:
print "%s 1: min, 2: max, 3: try count" % sys.argv[0]
sys.exit(1)
for i in range(0, int(sys.argv[3])):
print s.random(int(sys.argv[1]),int(sys.argv[2]))
|
def name():
return "CSVtoVector"
def description():
return "This plugin has no real use."
def category():
return "Vector"
def version():
return "Version 0.1"
def qgisMinimumVersion():
return "2.0"
def authorName():
return "Alexander Lisovenko"
def classFactory(iface):
from csv2vector import Csv2VectorPlugin
return Csv2VectorPlugin(iface)
|
from selenium import webdriver
from openpyxl import Workbook, load_workbook
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
import time
try:
wb = Workbook()
wb.save('vehicles.xlsx')
ws = wb.create_sheet('Vehicles')
finally:
wb = load_workbook('vehicles.xlsx')
ws = wb.create_sheet('Vehicles')
path = "G:/Project/outlook_account_automation/chromedriver.exe"
args = ["hide_console"]
driver = webdriver.Chrome("G:/Project/Gta_base_scraper/chromedriver.exe", service_args=args)
GTA_base_website = driver.get("https://www.gtabase.com/grand-theft-auto-v/vehicles/")
time.sleep(10)
list_of_links = []
condition = True
while condition:
products = driver.find_elements_by_xpath("//a[@class='product-item-link']")
for product in products:
list_of_links.append(product.get_attribute('href'))
driver.execute_script("window.scrollTo(0, 500);")
WebDriverWait(driver, 20).until(
ec.element_to_be_clickable((By.XPATH, "//a[@class='page action next']"))).click()
try:
WebDriverWait(driver, 20).until(
ec.element_to_be_clickable((By.XPATH, "//a[@title='Next']//parent::li[@class='item pages-item-next']"))).click()
time.sleep(2)
except:
condition = False
#print list_of_links
for link in list_of_links:
vc_s = ''
vf_s = ''
af_s = ''
driver.get(link)
name = driver.find_element_by_xpath("//h1[@class='contentheading']").text
# print name
vc_list = driver.find_elements_by_xpath("//span[contains(text(),'Vehicle Class')]//following-sibling::span[@class='field-value']//span//a")
for vc in vc_list:
vc_s += vc.text + ', '
# print vc_s
vf_list = driver.find_elements_by_xpath("//span[contains(text(),'Vehicle Features')]//following-sibling::span[@class='field-value']//span//a")
for vf in vf_list:
vf_s += vf.text + ', '
# print vf_s
af_list = driver.find_elements_by_xpath("//span[contains(text(),'Available from')]//following-sibling::span[@class='field-value']//span//a")
for af in af_list:
af_s += af.text + ', '
# print af_s
try:
price = driver.find_element_by_xpath("//span[contains(text(),'GTA Online Price')]//following-sibling::span[@class='field-value']").text
except:
price = 0
# print price
try:
real = driver.find_element_by_xpath("//span[contains(text(),'Based on (Real Life)')]//following-sibling::span[@class='field-value']").text
except:
real = 'none'
time.sleep(2)
ws.append([name, vc_s, price, vf_s, af_s, real])
time.sleep(2)
# print price
wb.save('vehicles.xlsx')
driver.close()
|
import numpy as np
import math
from qcodes.instrument.base import Instrument
from qcodes import validators as vals
#%% Create funciton for two-qubit readout and spectroscopy
class MultiQ_PulseBuilder(Instrument):
def __init__(self,name,number_read_freqs,alazar,alazar_ctrl,awg,qubit,cavity,**kwargs):
super().__init__(name, **kwargs)
self.awg = awg
self.qubit = qubit
self.cavity = cavity
self.alazar = alazar
self.alazar_ctrl = alazar_ctrl
self.filename = 'Waveforms'
self.x_val = lambda: np.linspace(0,1,10)
self.SR = 2.5e9
self.number_read_freqs = number_read_freqs
self.add_parameter('cycle_time',
label='Pulse Cycle Time',
unit='s',
set_cmd= lambda x : x,
vals=vals.Numbers(0,10e-3))
self.add_parameter('int_time',
label='Integration time',
unit='s',
set_cmd= lambda x : x,
vals=vals.Numbers(0,0.2e-3))
self.add_parameter('int_delay',
label='Readout Delay',
unit='s',
set_cmd= lambda x : x,
vals=vals.Numbers(0,0.2e-3))
self.add_parameter('readout_dur',
label='Readout Duration',
unit='s',
set_cmd= lambda x : x,
vals=vals.Numbers(0,0.2e-3))
self.add_parameter('marker_offset',
label='Marker Offset',
unit='s',
set_cmd= lambda x : x,
vals=vals.Numbers(-1e-5,1e-5))
self.add_parameter('averages',
label='Averages',
unit='',
set_cmd=self.num_averages,
vals=vals.Numbers(1,1e5))
for i in range(number_read_freqs):
self.add_parameter('readout_freq_{}'.format(i+1),
label='Readout Frequency {}'.format(i+1),
unit='Hz',
set_cmd= lambda x : x,
vals=vals.Numbers(0,12.5e9))
def MultiQ_SSB_Spectroscopy(self, start, stop, npts, custom_name = None):
qubitf = np.mean([start,stop])
self.qubit.frequency(qubitf)
self.x_val = lambda: np.linspace(start - qubitf,stop - qubitf,npts) + self.qubit.frequency()
# Here we decide whether we want to use "default" names or a user-specified custom name
if custom_name == None:
readout_seq_name = 'Readout_Seq'
this_seq_name = self.filename
self.awg.clearSequenceList()
self.awg.clearWaveformList()
else:
this_seq_name = 'Waveforms_' + custom_name
readout_seq_name = 'Readout_Seq_' + custom_name
N = int((self.cycle_time()*self.SR+64) - self.cycle_time()*self.SR%64)
N_offset = int(self.marker_offset()*self.SR)
time = np.linspace(N/self.SR-self.readout_dur(), N/self.SR, int(self.readout_dur()*self.SR), endpoint=False)
# Create triggers
ZerosMarker = np.zeros(int(N))
TriggerMarker = np.zeros(int(N))
TriggerMarker[-int(self.readout_dur()*self.SR-N_offset):-int(self.readout_dur()*self.SR-N_offset-500e-9*self.SR)] = 1
# Create Drive tones
wfms = [[], []]
for i , f in enumerate(self.x_val()-qubitf):
# SSB drive tone
#sine_signal = np.concatenate((0.5*np.sin(f*2*np.pi*time),np.zeros(N-len(time))))
#cosine_signal = np.concatenate((0.5*np.cos(f*2*np.pi*time),np.zeros(N-len(time))))
sine_signal = np.concatenate((np.zeros(N-len(time)), 0.5*np.sin(f*2*np.pi*time)))
cosine_signal = np.concatenate((np.zeros(N-len(time)), 0.5*np.cos(f*2*np.pi*time)))
if i == 0:
wfm_ch1 = np.array([cosine_signal,TriggerMarker,TriggerMarker])
wfm_ch2 = np.array([sine_signal,TriggerMarker,TriggerMarker])
else:
wfm_ch1 = np.array([cosine_signal,ZerosMarker,ZerosMarker])
wfm_ch2 = np.array([sine_signal,ZerosMarker,ZerosMarker])
wfms[0].append(wfm_ch1)
wfms[1].append(wfm_ch2)
trig_waits = [0 for _ in range(npts)]
nreps = [1 for _ in range(npts)]
event_jumps = [0 for _ in range(npts)]
event_jump_to = [0 for _ in range(npts)]
go_to = [0 for _ in range(npts)]
go_to[-1] = 1 # Make the sequence loop back to first step
seqx = self.awg.makeSEQXFile(trig_waits,
nreps,
event_jumps,
event_jump_to,
go_to,
wfms,
[1, 1],
this_seq_name, custom_name = custom_name)
self.awg.sendSEQXFile(seqx, this_seq_name + '.seqx')
self.awg.loadSEQXFile(this_seq_name + '.seqx')
# Create Readout tones
self.update_readout_freqs()
# Create sequence for readout tones SLISt:SEQuence:NEW <sequence_name>,<number_of_steps> [,<number_of_tracks>]
#self.awg.write('SLISt:SEQuence:NEW \"Readout_Seq\", {}, 2'.format(npts))
#self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"Readout_Seq\", 1'.format(npts))
self.awg.write('SLISt:SEQuence:NEW \"' + readout_seq_name + '\", {}, 2'.format(npts))
self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"'.format(npts) + readout_seq_name + '\", 1')
# Fill Readout waveforms into sequence
for i in range(npts):
self.awg.write('SLISt:SEQuence:STEP{}:TASSet1:WAVeform \"'.format(str(i+1)) + readout_seq_name + '\", \"Readout_I\"')
self.awg.write('SLISt:SEQuence:STEP{}:TASSet2:WAVeform \"'.format(str(i+1)) + readout_seq_name + '\", \"Readout_Q\"')
# Assign waveforms
#self.awg.ch1.setSequenceTrack(self.filename, 1)
#self.awg.ch2.setSequenceTrack(self.filename, 2)
#self.awg.ch3.setSequenceTrack('Readout_Seq', 1)
#self.awg.ch4.setSequenceTrack('Readout_Seq', 2)
self.awg.ch1.setSequenceTrack(this_seq_name, 1)
self.awg.ch2.setSequenceTrack(this_seq_name, 2)
self.awg.ch3.setSequenceTrack(readout_seq_name, 1)
self.awg.ch4.setSequenceTrack(readout_seq_name, 2)
self.awg.ch2.state(1)
self.awg.play()
# Alazar labels
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('SSB Drive frequency (Overlap)', ala_chan.data.setpoint_labels[1])
ala_chan.data.setpoint_units = ('Hz',ala_chan.data.setpoint_units[1])
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]): #AK:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('SSB Drive frequency (Overlap)',)
ala_chan.data.setpoint_units = ('Hz',)
# prepare channels
self.num_averages(self._averages)
def MultiQ_SSB_Spec_NoOverlap(self, start, stop, npts, pulse_length = 2e-6, custom_name = None):
qubitf = np.mean([start,stop])
self.qubit.frequency(qubitf)
self.x_val = lambda: np.linspace(start - qubitf,stop - qubitf,npts) + self.qubit.frequency()
# Here we decide whether we want to use "default" names or a user-specified custom name
if custom_name == None:
readout_seq_name = 'Readout_Seq'
this_seq_name = self.filename
self.awg.clearSequenceList()
self.awg.clearWaveformList()
else:
this_seq_name = 'Waveforms_' + custom_name
readout_seq_name = 'Readout_Seq_' + custom_name
N = int((self.cycle_time()*self.SR+64) - self.cycle_time()*self.SR%64)
N_offset = int(self.marker_offset()*self.SR)
time = np.linspace(0, pulse_length, int(pulse_length*self.SR), endpoint=False)
# Create triggers
ZerosMarker = np.zeros(int(N))
TriggerMarker = np.zeros(int(N))
TriggerMarker[-int(self.readout_dur()*self.SR-N_offset):-int(self.readout_dur()*self.SR-N_offset-500e-9*self.SR)] = 1
# Create Drive tones
wfms = [[], []]
for i , f in enumerate(self.x_val()-qubitf):
# SSB drive tone
sine_signal = np.concatenate((np.zeros(N-len(time)-int(self.readout_dur()*self.SR)), 0.5*np.sin(f*2*np.pi*time),np.zeros(int(self.readout_dur()*self.SR))))
cosine_signal = np.concatenate((np.zeros(N-len(time)-int(self.readout_dur()*self.SR)), 0.5*np.cos(f*2*np.pi*time),np.zeros(int(self.readout_dur()*self.SR))))
if i == 0:
wfm_ch1 = np.array([cosine_signal,TriggerMarker,TriggerMarker])
wfm_ch2 = np.array([sine_signal,TriggerMarker,TriggerMarker])
else:
wfm_ch1 = np.array([cosine_signal,ZerosMarker,ZerosMarker])
wfm_ch2 = np.array([sine_signal,ZerosMarker,ZerosMarker])
wfms[0].append(wfm_ch1)
wfms[1].append(wfm_ch2)
trig_waits = [0 for _ in range(npts)]
nreps = [1 for _ in range(npts)]
event_jumps = [0 for _ in range(npts)]
event_jump_to = [0 for _ in range(npts)]
go_to = [0 for _ in range(npts)]
go_to[-1] = 1 # Make the sequence loop back to first step
seqx = self.awg.makeSEQXFile(trig_waits,
nreps,
event_jumps,
event_jump_to,
go_to,
wfms,
[1, 1],
this_seq_name, custom_name = custom_name)
self.awg.sendSEQXFile(seqx, this_seq_name + '.seqx')
self.awg.loadSEQXFile(this_seq_name + '.seqx')
# Create Readout tones
self.update_readout_freqs()
# Create sequence for readout tones SLISt:SEQuence:NEW <sequence_name>,<number_of_steps> [,<number_of_tracks>]
#self.awg.write('SLISt:SEQuence:NEW \"Readout_Seq\", {}, 2'.format(npts))
#self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"Readout_Seq\", 1'.format(npts))
self.awg.write('SLISt:SEQuence:NEW \"' + readout_seq_name + '\", {}, 2'.format(npts))
self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"'.format(npts) + readout_seq_name + '\", 1')
# Fill Readout waveforms into sequence
for i in range(npts):
self.awg.write('SLISt:SEQuence:STEP{}:TASSet1:WAVeform \"'.format(str(i+1)) + readout_seq_name + '\", \"Readout_I\"')
self.awg.write('SLISt:SEQuence:STEP{}:TASSet2:WAVeform \"'.format(str(i+1)) + readout_seq_name + '\", \"Readout_Q\"')
# Assign waveforms
#self.awg.ch1.setSequenceTrack(self.filename, 1)
#self.awg.ch2.setSequenceTrack(self.filename, 2)
#self.awg.ch3.setSequenceTrack('Readout_Seq', 1)
#self.awg.ch4.setSequenceTrack('Readout_Seq', 2)
self.awg.ch1.setSequenceTrack(this_seq_name, 1)
self.awg.ch2.setSequenceTrack(this_seq_name, 2)
self.awg.ch3.setSequenceTrack(readout_seq_name, 1)
self.awg.ch4.setSequenceTrack(readout_seq_name, 2)
self.awg.ch2.state(1)
self.awg.play()
# Alazar labels
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('SSB Drive frequency (Non-overlap)',ala_chan.data.setpoint_labels[1])
ala_chan.data.setpoint_units = ('Hz',ala_chan.data.setpoint_units[1])
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]):
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('SSB Drive frequency (Non-overlap)',)
ala_chan.data.setpoint_units = ('Hz',)
# prepare channels
self.num_averages(self._averages)
def MultiQ_SSB_Two_Seq_Pi(self, npts, carrier, freq1, dur1, freq2, dur2):
# What is this definition above, taken from NoOverlap spectroscopy.
# Why does it have to be a lambda function, and why is `qubitf` subtracted and then added back?
# self.x_val = lambda: np.linspace(start - qubitf,stop - qubitf,npts) + self.qubit.frequency()
# Set carrier frequency of `qubit` RF source, create `x_val` array, for looping through
self.qubit.frequency(carrier)
self.x_val = lambda: np.arange(npts)
# Clear AWG
self.awg.clearSequenceList()
self.awg.clearWaveformList()
# Determine number of points in a waveform
wf_npts = int((self.cycle_time()*self.SR+64) - self.cycle_time()*self.SR%64)
N_offset = int(self.marker_offset() * self.SR) # Not sure what this one is. Not looking into it for now.
# Create triggers
ZerosMarker = np.zeros(int(wf_npts))
TriggerMarker = np.zeros(int(wf_npts))
TriggerMarker[-int(self.readout_dur()*self.SR - N_offset):-int(self.readout_dur()*self.SR-N_offset-500e-9*self.SR)] = 1
# Create Drive tones
wfms = [[], []]
freq1_mod = freq1 - carrier
freq2_mod = freq2 - carrier
freq1_time_array = np.linspace(0, dur1, int(dur1 * self.SR), endpoint=False)
freq2_time_array = np.linspace(0, dur2, int(dur2 * self.SR), endpoint=False)
# for i , f in enumerate(self.x_val() - qubitf):
# Below is from the poisoning code. We don't even need this. No need for loop at all.
# for i in np.arange(2):
wait_init = np.zeros(wf_npts - len(freq1_time_array) - len(freq2_time_array) - int(self.readout_dur() * self.SR))
pipulse1_sin = 0.5 * np.sin(freq1_mod * 2 * np.pi * freq1_time_array)
pipulse2_sin = 0.5 * np.sin(freq2_mod * 2 * np.pi * freq2_time_array)
pipulse1_cos = 0.5 * np.cos(freq1_mod * 2 * np.pi * freq1_time_array)
pipulse2_cos = 0.5 * np.cos(freq2_mod * 2 * np.pi * freq2_time_array)
wait_readout = np.zeros(int(self.readout_dur() * self.SR))
sine_signal = np.concatenate((wait_init, pipulse1_sin, pipulse2_sin, wait_readout))
cosine_signal = np.concatenate((wait_init, pipulse1_cos, pipulse2_cos, wait_readout))
wfm_ch1 = np.array([cosine_signal,TriggerMarker,TriggerMarker])
wfm_ch2 = np.array([sine_signal,TriggerMarker,TriggerMarker])
# Previously, we had double entries below, i.e.
# wfms = [[wfm_ch1, wfm_ch1], [wfm_ch2, wfm_ch2]]
# so that one of them can loop (npts-1) times. Now, however, that we are using `seq_mode = False` for these "historgram" measurements,
# I think it should be OK to simply loop one element npts times.
# Or actually, do we even need to loop?
# We can just let the sequence repeat ... With `seq_mode = False`, the measurement stops because, IIUC, the `records_per_buffer` has been met ...
wfms = [[wfm_ch1], [wfm_ch2]]
trig_waits = [0]
nreps = [1]
event_jumps = [0]
event_jump_to = [0]
go_to = [1]
# Setting the names for the AWG sequences
# This follows a modification I made previously, to enable storing waveforms on the AWG.
# But here, we stick with the standard sequence name, which is the value of `self.filename`
this_seq_name = self.filename
readout_seq_name = 'Readout_Seq'
# The [1, 1] below is the `amplitudes` argument, of `makeSEQXFile`.
# My impression is that we need [1, 1], not [1] because we are using both Ch1 and Ch2,
# i.e., our Sequence has two tracks (and we play Track 1 on Ch1 and Track 2 on Ch2).
seqx = self.awg.makeSEQXFile(trig_waits,
nreps,
event_jumps,
event_jump_to,
go_to,
wfms,
[1, 1],
this_seq_name)
self.awg.sendSEQXFile(seqx, this_seq_name + '.seqx')
self.awg.loadSEQXFile(this_seq_name + '.seqx')
# Create Readout tones
self.update_readout_freqs()
# Create sequence for readout tones SLISt:SEQuence:NEW <sequence_name>,<number_of_steps> [,<number_of_tracks>]
self.awg.write('SLISt:SEQuence:NEW \"' + readout_seq_name + '\", {}, 2'.format(1))
# The below line is saying, that for our 1 waveforms long sequence, after Waveform 1, the instrument should go (back) to Waveform 1
self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"'.format(1) + readout_seq_name + '\", 1')
# Fill Readout waveforms into sequence
# Here, we only put one (not 2, or npts many) Readout waveforms into the sequence
# They are all the same, either way.
self.awg.write('SLISt:SEQuence:STEP{}:TASSet1:WAVeform \"'.format(str(1)) + readout_seq_name + '\", \"Readout_I\"')
self.awg.write('SLISt:SEQuence:STEP{}:TASSet2:WAVeform \"'.format(str(1)) + readout_seq_name + '\", \"Readout_Q\"')
# Assign waveforms
self.awg.ch1.setSequenceTrack(this_seq_name, 1)
self.awg.ch2.setSequenceTrack(this_seq_name, 2)
self.awg.ch3.setSequenceTrack(readout_seq_name, 1)
self.awg.ch4.setSequenceTrack(readout_seq_name, 2)
self.awg.ch2.state(1)
self.awg.play()
# Alazar labels
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Records',ala_chan.data.setpoint_labels[1])
ala_chan.data.setpoint_units = ('',ala_chan.data.setpoint_units[1])
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]):
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Records',)
ala_chan.data.setpoint_units = ('',)
# Prepare channels
self.num_averages(self._averages)
def MultiQ_Rabi(self, start, stop, npts, custom_name = None):
self.x_val = lambda: np.linspace(start ,stop ,npts)
self.awg.ch2.state(0)
# Here we decide whether we want to use "default" names or a user-specified custom name
if custom_name == None:
readout_seq_name = 'Readout_Seq'
this_seq_name = self.filename
self.awg.clearSequenceList()
self.awg.clearWaveformList()
else:
this_seq_name = 'Waveforms_' + custom_name
readout_seq_name = 'Readout_Seq_' + custom_name
N = int((self.cycle_time()*self.SR+64) - self.cycle_time()*self.SR%64)
N_offset = int(self.marker_offset()*self.SR)
# Create triggers
ZerosMarker = np.zeros(int(N))
TriggerMarker = np.zeros(int(N))
TriggerMarker[-int(self.readout_dur()*self.SR-N_offset):-int(self.readout_dur()*self.SR-N_offset-500e-9*self.SR)] = 1
# Create Drive tones
wfms = [[]]
for i , t in enumerate(self.x_val()):
# SSB drive tone
drive = np.zeros(int(N))
drive[-int((self.readout_dur() + t)*self.SR):-int(self.readout_dur()*self.SR)] = 0.5
if i == 0:
wfm_ch1 = np.array([drive,TriggerMarker,TriggerMarker])
else:
wfm_ch1 = np.array([drive,ZerosMarker,ZerosMarker])
wfms[0].append(wfm_ch1)
trig_waits = [0 for _ in range(npts)]
nreps = [1 for _ in range(npts)]
event_jumps = [0 for _ in range(npts)]
event_jump_to = [0 for _ in range(npts)]
go_to = [0 for _ in range(npts)]
go_to[-1] = 1 # Make the sequence loop back to first step
seqx = self.awg.makeSEQXFile(trig_waits,
nreps,
event_jumps,
event_jump_to,
go_to,
wfms,
[1, 1],
this_seq_name, custom_name = custom_name)
self.awg.sendSEQXFile(seqx, this_seq_name + '.seqx')
self.awg.loadSEQXFile(this_seq_name + '.seqx')
# Create Readout tones
self.update_readout_freqs()
# Create sequence for readout tones SLISt:SEQuence:NEW <sequence_name>,<number_of_steps> [,<number_of_tracks>]
#self.awg.write('SLISt:SEQuence:NEW \"Readout_Seq\", {}, 2'.format(npts))
#self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"Readout_Seq\", 1'.format(npts))
self.awg.write('SLISt:SEQuence:NEW \"' + readout_seq_name + '\", {}, 2'.format(npts))
self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"'.format(npts) + readout_seq_name + '\", 1')
# Fill Readout waveforms into sequence
for i in range(npts):
self.awg.write('SLISt:SEQuence:STEP{}:TASSet1:WAVeform \"'.format(str(i+1)) + readout_seq_name + '\", \"Readout_I\"')
self.awg.write('SLISt:SEQuence:STEP{}:TASSet2:WAVeform \"'.format(str(i+1)) + readout_seq_name + '\", \"Readout_Q\"')
# Assign waveforms
self.awg.ch1.setSequenceTrack(this_seq_name, 1)
self.awg.ch3.setSequenceTrack(readout_seq_name, 1)
self.awg.ch4.setSequenceTrack(readout_seq_name, 2)
self.awg.play()
# Alazar labels
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Drive time',ala_chan.data.setpoint_labels[1])
ala_chan.data.setpoint_units = ('s',ala_chan.data.setpoint_units[1])
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]):
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Drive time',)
ala_chan.data.setpoint_units = ('s',)
# prepare channels
self.num_averages(self._averages)
def MultiQ_Lifetime(self, start, stop, npts, pipulse = 10e-9):
self.x_val = lambda: np.linspace(start ,stop ,npts)
# Clear AWG
self.awg.ch2.state(0)
self.awg.clearSequenceList()
self.awg.clearWaveformList()
N = int((self.cycle_time()*self.SR+64) - self.cycle_time()*self.SR%64)
N_offset = int(self.marker_offset()*self.SR)
# Create triggers
ZerosMarker = np.zeros(int(N))
TriggerMarker = np.zeros(int(N))
TriggerMarker[-int(self.readout_dur()*self.SR-N_offset):-int(self.readout_dur()*self.SR-N_offset-500e-9*self.SR)] = 1
# Create Drive tones
wfms = [[]]
for i , t in enumerate(self.x_val()):
# SSB drive tone
drive = np.zeros(int(N))
drive[-int((self.readout_dur() + t + pipulse)*self.SR):-int((self.readout_dur() + t)*self.SR)] = 0.5
if i == 0:
wfm_ch1 = np.array([drive,TriggerMarker,TriggerMarker])
else:
wfm_ch1 = np.array([drive,ZerosMarker,ZerosMarker])
wfms[0].append(wfm_ch1)
trig_waits = [0 for _ in range(npts)]
nreps = [1 for _ in range(npts)]
event_jumps = [0 for _ in range(npts)]
event_jump_to = [0 for _ in range(npts)]
go_to = [0 for _ in range(npts)]
go_to[-1] = 1 # Make the sequence loop back to first step
seqx = self.awg.makeSEQXFile(trig_waits,
nreps,
event_jumps,
event_jump_to,
go_to,
wfms,
[1],
self.filename)
self.awg.sendSEQXFile(seqx, self.filename + '.seqx')
self.awg.loadSEQXFile(self.filename + '.seqx')
# Create Readout tones
self.update_readout_freqs()
# Create sequence for readout tones SLISt:SEQuence:NEW <sequence_name>,<number_of_steps> [,<number_of_tracks>]
self.awg.write('SLISt:SEQuence:NEW \"Readout_Seq\", {}, 2'.format(npts))
self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"Readout_Seq\", 1'.format(npts))
# Fill Readout waveforms into sequence
for i in range(npts):
self.awg.write('SLISt:SEQuence:STEP{}:TASSet1:WAVeform \"Readout_Seq\", \"Readout_I\"'.format(str(i+1)))
self.awg.write('SLISt:SEQuence:STEP{}:TASSet2:WAVeform \"Readout_Seq\", \"Readout_Q\"'.format(str(i+1)))
# Assign waveforms
self.awg.ch1.setSequenceTrack(self.filename, 1)
self.awg.ch3.setSequenceTrack('Readout_Seq', 1)
self.awg.ch4.setSequenceTrack('Readout_Seq', 2)
self.awg.play()
# ALazar labels
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Wait time',ala_chan.data.setpoint_labels[1])
ala_chan.data.setpoint_units = ('s',ala_chan.data.setpoint_units[1])
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]):
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Wait time',)
ala_chan.data.setpoint_units = ('s',)
# prepare channels
self.num_averages(self._averages)
def MultiQ_Ramsey(self, start, stop, npts, pi_half_pulse = 5e-9):
self.x_val = lambda: np.linspace(start ,stop ,npts)
# Clear AWG
self.awg.ch2.state(0)
self.awg.clearSequenceList()
self.awg.clearWaveformList()
N = int((self.cycle_time()*self.SR+64) - self.cycle_time()*self.SR%64)
N_offset = int(self.marker_offset()*self.SR)
# Create triggers
ZerosMarker = np.zeros(int(N))
TriggerMarker = np.zeros(int(N))
TriggerMarker[-int(self.readout_dur()*self.SR-N_offset):-int(self.readout_dur()*self.SR-N_offset-500e-9*self.SR)] = 1
# Create Drive tones
wfms = [[]]
for i , t in enumerate(self.x_val()):
# SSB drive tone
drive = np.zeros(int(N))
drive[-int((self.readout_dur() + pi_half_pulse)*self.SR):-int(self.readout_dur()*self.SR)] = 0.5
drive[-int((self.readout_dur() + t + 2*pi_half_pulse)*self.SR):-int((self.readout_dur() + t + pi_half_pulse)*self.SR)] = 0.5
if i == 0:
wfm_ch1 = np.array([drive,TriggerMarker,TriggerMarker])
else:
wfm_ch1 = np.array([drive,ZerosMarker,ZerosMarker])
wfms[0].append(wfm_ch1)
trig_waits = [0 for _ in range(npts)]
nreps = [1 for _ in range(npts)]
event_jumps = [0 for _ in range(npts)]
event_jump_to = [0 for _ in range(npts)]
go_to = [0 for _ in range(npts)]
go_to[-1] = 1 # Make the sequence loop back to first step
seqx = self.awg.makeSEQXFile(trig_waits,
nreps,
event_jumps,
event_jump_to,
go_to,
wfms,
[1],
self.filename)
self.awg.sendSEQXFile(seqx, self.filename + '.seqx')
self.awg.loadSEQXFile(self.filename + '.seqx')
# Create Readout tones
self.update_readout_freqs()
# Create sequence for readout tones SLISt:SEQuence:NEW <sequence_name>,<number_of_steps> [,<number_of_tracks>]
self.awg.write('SLISt:SEQuence:NEW \"Readout_Seq\", {}, 2'.format(npts))
self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"Readout_Seq\", 1'.format(npts))
# Fill Readout waveforms into sequence
for i in range(npts):
self.awg.write('SLISt:SEQuence:STEP{}:TASSet1:WAVeform \"Readout_Seq\", \"Readout_I\"'.format(str(i+1)))
self.awg.write('SLISt:SEQuence:STEP{}:TASSet2:WAVeform \"Readout_Seq\", \"Readout_Q\"'.format(str(i+1)))
# Assign waveforms
self.awg.ch1.setSequenceTrack(self.filename, 1)
self.awg.ch3.setSequenceTrack('Readout_Seq', 1)
self.awg.ch4.setSequenceTrack('Readout_Seq', 2)
self.awg.play()
# ALazar labels
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Wait time',ala_chan.data.setpoint_labels[1])
ala_chan.data.setpoint_units = ('s',ala_chan.data.setpoint_units[1])
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]):
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Wait time',)
ala_chan.data.setpoint_units = ('s',)
# prepare channels
self.num_averages(self._averages)
def MultiQ_Lambda(self, start, stop, npts, qubitf, delta, Delta, cont_drive_enabled = False, pidrive = 500e-9, wiggles = True, keep_old_seqs = False, histogram = False):
if histogram == False:
self.x_val = lambda: np.linspace(start, stop, npts)
else:
self.x_val = lambda: np.arange(npts)
self.awg.ch2.state(0) # Do not need Q modulation for `histogram` mode
self.qubit.frequency(qubitf)
TAU_DRIVE_AMPLITUDE = 0.1
PIPULSE_AMPLITUDE = 0.1
if wiggles == True:
pass
else:
TAU_DRIVE_AMPLITUDE = 0
PIPULSE_AMPLITUDE = 0.5
if keep_old_seqs == False:
# Clear AWG
self.awg.clearSequenceList()
self.awg.clearWaveformList()
N = int((self.cycle_time()*self.SR+64) - self.cycle_time()*self.SR%64)
N_offset = int(self.marker_offset()*self.SR)
# Create triggers
ZerosMarker = np.zeros(int(N))
TriggerMarker = np.zeros(int(N))
TriggerMarker[-int(self.readout_dur()*self.SR-N_offset):-int(self.readout_dur()*self.SR-N_offset-500e-9*self.SR)] = 1
# Create Drive tones
wfms = [[], []]
if cont_drive_enabled == False:
if histogram == False:
for i, t in enumerate(self.x_val()):
# SSB drive tone
sine_drive = np.zeros(int(N))
cosine_drive = np.zeros(int(N))
temp = np.linspace(0,t,int(self.SR*t))
l = len(temp)
cosine_drive[-l-int(self.readout_dur()*self.SR):-int(self.readout_dur()*self.SR)] = TAU_DRIVE_AMPLITUDE * np.cos(delta*2*np.pi*temp)+ TAU_DRIVE_AMPLITUDE * np.cos((Delta+delta)*2*np.pi*temp)
cosine_drive[-(int((self.readout_dur() + t+pidrive)*self.SR)):-int((self.readout_dur() + t)*self.SR)] = PIPULSE_AMPLITUDE
sine_drive[-l-int(self.readout_dur()*self.SR):-int(self.readout_dur()*self.SR)] = TAU_DRIVE_AMPLITUDE * np.sin(delta*2*np.pi*temp)+ TAU_DRIVE_AMPLITUDE * np.sin((Delta+delta)*2*np.pi*temp)
sine_drive[-(int((self.readout_dur() + t+pidrive)*self.SR)):-int((self.readout_dur() + t)*self.SR)] = 0
if i == 0:
wfm_ch1 = np.array([cosine_drive, TriggerMarker, TriggerMarker])
wfm_ch2 = np.array([sine_drive, TriggerMarker, TriggerMarker])
else:
wfm_ch1 = np.array([cosine_drive, ZerosMarker, ZerosMarker])
wfm_ch2 = np.array([sine_drive, ZerosMarker, ZerosMarker])
wfms[0].append(wfm_ch1)
wfms[1].append(wfm_ch2)
else:
# This is the `histogram = True` case; The idea is simply to just readout after a \pi pulse.
# So I am setting l = t = 0, and ignoring the second channel (we do not need Q modulation here).
wfms = [[]]
for i in np.arange(2):
# SSB drive tone
drive = np.zeros(int(N))
drive[-int(self.readout_dur()*self.SR):-int(self.readout_dur()*self.SR)] = 0
drive[-(int((self.readout_dur() + pidrive)*self.SR)):-int((self.readout_dur())*self.SR)] = PIPULSE_AMPLITUDE
if i == 0:
wfm_ch1 = np.array([drive,TriggerMarker,TriggerMarker])
else:
wfm_ch1 = np.array([drive,TriggerMarker,TriggerMarker])
wfms[0].append(wfm_ch1)
else:
for i, t in enumerate(self.x_val()):
# SSB drive tone
sine_drive = np.zeros(int(N))
cosine_drive = np.zeros(int(N))
temp = np.linspace(0,t,int(self.SR*t))
l = len(temp)
sine_drive[-l-int(self.readout_dur()*self.SR):-int(self.readout_dur()*self.SR)] = PIPULSE_AMPLITUDE + TAU_DRIVE_AMPLITUDE * np.sin(delta*2*np.pi*temp)+ TAU_DRIVE_AMPLITUDE * np.sin((Delta+delta)*2*np.pi*temp)
cosine_drive[-l-int(self.readout_dur()*self.SR):-int(self.readout_dur()*self.SR)] = PIPULSE_AMPLITUDE + TAU_DRIVE_AMPLITUDE * np.cos(delta*2*np.pi*temp)+ TAU_DRIVE_AMPLITUDE * np.cos((Delta+delta)*2*np.pi*temp)
if i == 0:
wfm_ch1 = np.array([cosine_drive, TriggerMarker, TriggerMarker])
wfm_ch2 = np.array([sine_drive, TriggerMarker, TriggerMarker])
else:
wfm_ch1 = np.array([cosine_drive, ZerosMarker, ZerosMarker])
wfm_ch2 = np.array([sine_drive, ZerosMarker, ZerosMarker])
wfms[0].append(wfm_ch1)
wfms[1].append(wfm_ch2)
if histogram == False:
trig_waits = [0 for _ in range(npts)]
nreps = [1 for _ in range(npts)]
event_jumps = [0 for _ in range(npts)]
event_jump_to = [0 for _ in range(npts)]
go_to = [0 for _ in range(npts)]
go_to[-1] = 1 # Make the sequence loop back to first step
else:
trig_waits = [0, 0]
nreps = [1, npts - 1]
event_jumps = [0, 0]
event_jump_to = [0, 0]
go_to = [0, 1]
this_seq_name = self.filename
if histogram == False:
amplitudes = [1, 1]
else:
amplitudes = [1]
seqx = self.awg.makeSEQXFile(trig_waits,
nreps,
event_jumps,
event_jump_to,
go_to,
wfms,
amplitudes,
this_seq_name)
self.awg.sendSEQXFile(seqx, this_seq_name + '.seqx')
self.awg.loadSEQXFile(this_seq_name + '.seqx')
# Create Readout tones
self.update_readout_freqs()
# Create sequence for readout tones SLISt:SEQuence:NEW <sequence_name>,<number_of_steps> [,<number_of_tracks>]
# OE: Changing npts -> 2 below, not sure if correct
self.awg.write('SLISt:SEQuence:NEW \"Readout_Seq\", {}, 2'.format(2))
self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"Readout_Seq\", 1'.format(2))
# Fill Readout waveforms into sequence
for i in range(2):
self.awg.write('SLISt:SEQuence:STEP{}:TASSet1:WAVeform \"Readout_Seq\", \"Readout_I\"'.format(str(i+1)))
self.awg.write('SLISt:SEQuence:STEP{}:TASSet2:WAVeform \"Readout_Seq\", \"Readout_Q\"'.format(str(i+1)))
# Assign waveforms
self.awg.ch1.setSequenceTrack(this_seq_name, 1)
if histogram == False:
self.awg.ch2.setSequenceTrack(this_seq_name, 2)
self.awg.ch2.state(1)
self.awg.ch3.setSequenceTrack('Readout_Seq', 1)
self.awg.ch4.setSequenceTrack('Readout_Seq', 2)
self.awg.play()
# Alazar labels
if histogram == False:
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Lambda Drive time', ala_chan.data.setpoint_labels[1])
ala_chan.data.setpoint_units = ('s',ala_chan.data.setpoint_units[1])
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]):
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Lambda Drive time',)
ala_chan.data.setpoint_units = ('s',)
else:
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Records', ala_chan.data.setpoint_labels[1])
ala_chan.data.setpoint_units = ('',ala_chan.data.setpoint_units[1])
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]):
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Records',)
ala_chan.data.setpoint_units = ('',)
# prepare channels
self.num_averages(self._averages)
def MultiQ_Poisoning_Loop(self, npts, keep_old_seqs = False):
self.x_val = lambda: np.arange(npts)
self.awg.ch2.state(0)
# Clear AWG
if keep_old_seqs == False:
self.awg.clearSequenceList()
self.awg.clearWaveformList()
N = int((self.cycle_time()*self.SR+64) - self.cycle_time()*self.SR%64)
N_offset = int(self.marker_offset()*self.SR)
# Create triggers
ZerosMarker = np.zeros(int(N))
TriggerMarker = np.zeros(int(N))
TriggerMarker[-int(self.readout_dur()*self.SR-N_offset):-int(self.readout_dur()*self.SR-N_offset-500e-9*self.SR)] = 1
# Create Drive tones
wfms = [[]]
for i in np.arange(2):
# SSB drive tone
drive = np.zeros(int(N))
if i == 0:
wfm_ch1 = np.array([drive,TriggerMarker,TriggerMarker])
else:
wfm_ch1 = np.array([drive,TriggerMarker,TriggerMarker])
wfms[0].append(wfm_ch1)
trig_waits = [0, 0]
nreps = [1, npts - 1]
event_jumps = [0, 0]
event_jump_to = [0, 0]
go_to = [0, 1]
seqx = self.awg.makeSEQXFile(trig_waits,
nreps,
event_jumps,
event_jump_to,
go_to,
wfms,
[1],
self.filename)
self.awg.sendSEQXFile(seqx, self.filename + '.seqx')
self.awg.loadSEQXFile(self.filename + '.seqx')
# Create Readout tones
self.update_readout_freqs()
# Create sequence for readout tones SLISt:SEQuence:NEW <sequence_name>,<number_of_steps> [,<number_of_tracks>]
# OE: Changing npts -> 2 below, not sure if correct
# --> This means that he readout waveforms loop within a pair, we do not include `npts` number of waveforms in the Readout Seq.
# Since they all look the same, this should be fine. In fact, they might not need to be uploaded in the first place.
self.awg.write('SLISt:SEQuence:NEW \"Readout_Seq\", {}, 2'.format(2))
self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"Readout_Seq\", 1'.format(2))
# Fill Readout waveforms into sequence
for i in range(2):
self.awg.write('SLISt:SEQuence:STEP{}:TASSet1:WAVeform \"Readout_Seq\", \"Readout_I\"'.format(str(i+1)))
self.awg.write('SLISt:SEQuence:STEP{}:TASSet2:WAVeform \"Readout_Seq\", \"Readout_Q\"'.format(str(i+1)))
# Assign waveforms
self.awg.ch1.setSequenceTrack(self.filename, 1)
self.awg.ch3.setSequenceTrack('Readout_Seq', 1)
self.awg.ch4.setSequenceTrack('Readout_Seq', 2)
self.awg.play()
# ALazar labels
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Records',ala_chan.data.setpoint_labels[1])
ala_chan.data.setpoint_units = ('',ala_chan.data.setpoint_units[1])
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]):
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Records',)
ala_chan.data.setpoint_units = ('',)
# prepare channels
self.num_averages(self._averages)
def set_readout_freqs(self,readout_frequencies):
if len(readout_frequencies) != self.number_read_freqs:
raise ValueError('Number of given readout frequencies has to \
be {}.'.format(self.number_read_freqs))
for i in range(self.number_read_freqs):
getattr(self,'readout_freq_{}'.format(i+1))(readout_frequencies[i])
self.update_readout_freqs()
def get_readout_freqs(self):
ret = []
for i in range(self.number_read_freqs):
ret.append(getattr(self,'readout_freq_{}'.format(i+1))())
return np.array(ret)
def update_readout_freqs(self):
readout_freqs = self.get_readout_freqs()
N = int((self.cycle_time()*self.SR+64) - self.cycle_time()*self.SR%64)
N_offset = int(self.marker_offset()*self.SR)
# Create trigger
TriggerMarker = np.zeros(N)
TriggerMarker[-int(self.readout_dur()*self.SR-N_offset):-int(self.readout_dur()*self.SR-N_offset-500e-9*self.SR)] = 1
# Readout tones
time = np.linspace(0, self.readout_dur(), int(self.readout_dur()*self.SR), endpoint=False)
cosine_readout = np.zeros(N)
sine_readout = np.zeros(N)
for fr in readout_freqs:
fi = fr - self.cavity.frequency()
cosine_readout += (0.5/len(readout_freqs))*np.concatenate((np.zeros(N-len(time)),np.cos(fi*2*np.pi*time)))
sine_readout += (0.5/len(readout_freqs))*np.concatenate((np.zeros(N-len(time)),np.sin(fi*2*np.pi*time)))
wfm_ch3 = np.array([cosine_readout,TriggerMarker,TriggerMarker])
wfm_ch4 = np.array([sine_readout,TriggerMarker,TriggerMarker])
state = self.awg.run_state()
self.awg.stop()
wfm_ch3_file = self.awg.makeWFMXFile(wfm_ch3, 1)
wfm_ch4_file = self.awg.makeWFMXFile(wfm_ch4, 1)
self.awg.sendWFMXFile(wfm_ch3_file, 'Readout_I.wfmx')
self.awg.sendWFMXFile(wfm_ch4_file, 'Readout_Q.wfmx')
self.awg.loadWFMXFile('Readout_I.wfmx')
self.awg.loadWFMXFile('Readout_Q.wfmx')
# Only start play if running to begin with
if state == 'Running':
self.awg.play()
# Set demod frequencies
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.demod_freq(abs(readout_freqs[0]-self.cavity.frequency()))
for n, ala_chan in enumerate(self.alazar_ctrl.channels[4:12]):
try:
ala_chan.demod_freq(abs(readout_freqs[n//2]-self.cavity.frequency()))
except:
pass
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]):#changed to 12:24 from 12:20 to add rec5 and rec6, AK 8/10
try:
ala_chan.demod_freq(abs(readout_freqs[n//2]-self.cavity.frequency()))
except:
pass
def num_averages(self,value):
self._averages = value
self.alazar_ctrl.int_time(self.int_time())
self.alazar_ctrl.int_delay(self.int_delay())
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.num_averages(value)
ala_chan.prepare_channel()
ala_chan.data.setpoints = (tuple(self.x_val()),ala_chan.data.setpoints[1])
for ala_chan in self.alazar_ctrl.channels[4:12]:
ala_chan.num_averages(value)
ala_chan.prepare_channel()
for i, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]): #Changed to 24, AK 8/10
if i != 8:
ala_chan.num_averages(value)
ala_chan.prepare_channel()
ala_chan.data.setpoints = (tuple(self.x_val()),)
#define new functions for to have a duty cycle measurement and partly overlapping spec and rabis. Albert Hertel fall 2020 to January 2021
def MultiQ_Duty_cycle(self, tmin, tmax, npts):
# Define array with different drive times
self.x_val = lambda: np.geomspace(tmin ,tmax , num = npts)
# I do not understand why, but we have to add this to the qubit drive to sync drive and readout
offset = self.cycle_time()-self.readout_dur()
# Calculate how many duty cycles on can fit into readout_dur
M = np.zeros(len(self.x_val()), dtype = int)
for k, l in enumerate(self.x_val()):
M[k] = math.floor(self.readout_dur()/l)
# Clear AWG
self.awg.ch2.state(0)
self.awg.clearSequenceList()
self.awg.clearWaveformList()
N = int((self.cycle_time()*self.SR+64) - self.cycle_time()*self.SR%64)
N_offset = int(self.marker_offset()*self.SR)
# Create triggers
ZerosMarker = np.zeros(int(N))
TriggerMarker = np.zeros(int(N))
TriggerMarker[-int(self.readout_dur()*self.SR-N_offset):-int(self.readout_dur()*self.SR-N_offset-500e-9*self.SR)] = 1
# Create Drive tones
wfms = [[]]
for i , t in enumerate(self.x_val()):
# SSB drive tone
drive = np.zeros(int(N))
for j in range(M[i]):
drive[int((j*t+offset)*self.SR):int((j*t+t/2+offset)*self.SR)] = 0.5
# print(np.where(drive!=0))
if i == 0:
wfm_ch1 = np.array([drive,TriggerMarker,TriggerMarker])
else:
wfm_ch1 = np.array([drive,ZerosMarker,ZerosMarker])
wfms[0].append(wfm_ch1)
trig_waits = [0 for _ in range(npts)]
nreps = [1 for _ in range(npts)]
event_jumps = [0 for _ in range(npts)]
event_jump_to = [0 for _ in range(npts)]
go_to = [0 for _ in range(npts)]
go_to[-1] = 1 # Make the sequence loop back to first step
seqx = self.awg.makeSEQXFile(trig_waits,
nreps,
event_jumps,
event_jump_to,
go_to,
wfms,
[1],
self.filename)
self.awg.sendSEQXFile(seqx, self.filename + '.seqx')
self.awg.loadSEQXFile(self.filename + '.seqx')
# Create Readout tones
self.update_readout_freqs()
# Create sequence for readout tones SLISt:SEQuence:NEW <sequence_name>,<number_of_steps> [,<number_of_tracks>]
self.awg.write('SLISt:SEQuence:NEW \"Readout_Seq\", {}, 2'.format(npts))
self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"Readout_Seq\", 1'.format(npts))
# Fill Readout waveforms into sequence
for i in range(npts):
self.awg.write('SLISt:SEQuence:STEP{}:TASSet1:WAVeform \"Readout_Seq\", \"Readout_I\"'.format(str(i+1)))
self.awg.write('SLISt:SEQuence:STEP{}:TASSet2:WAVeform \"Readout_Seq\", \"Readout_Q\"'.format(str(i+1)))
# Assign waveforms
self.awg.ch1.setSequenceTrack(self.filename, 1)
self.awg.ch3.setSequenceTrack('Readout_Seq', 1)
self.awg.ch4.setSequenceTrack('Readout_Seq', 2)
self.awg.play()
# ALazar labels
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Cycle time tau',ala_chan.data.setpoint_labels[1])
ala_chan.data.setpoint_units = ('s',ala_chan.data.setpoint_units[1])
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]):
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Cycle time tau',)
ala_chan.data.setpoint_units = ('s',)
# prepare channels
self.num_averages(self._averages)
def MultiQ_SSB_Spec_SomeOverlap(self, start, stop, npts, pulse_length = 2e-6, overlap = 1e-6,custom_name = None):
qubitf = np.mean([start,stop])
self.qubit.frequency(qubitf)
self.x_val = lambda: np.linspace(start - qubitf,stop - qubitf,npts) + self.qubit.frequency()
# Here we decide whether we want to use "default" names or a user-specified custom name
if custom_name == None:
readout_seq_name = 'Readout_Seq'
this_seq_name = self.filename
self.awg.clearSequenceList()
self.awg.clearWaveformList()
else:
this_seq_name = 'Waveforms_' + custom_name
readout_seq_name = 'Readout_Seq_' + custom_name
N = int((self.cycle_time()*self.SR+64) - self.cycle_time()*self.SR%64)
N_offset = int(self.marker_offset()*self.SR)
time = np.linspace(0, pulse_length, int(pulse_length*self.SR), endpoint=False)
#overlap to be added to drive tone. Keep the trigger after the drive tone
overlap_offset = int(overlap*self.SR)
overlap_array = np.linspace(0, overlap, int(overlap*self.SR), endpoint=False)
# Create triggers
ZerosMarker = np.zeros(int(N))
TriggerMarker = np.zeros(int(N))
TriggerMarker[-int(self.readout_dur()*self.SR-N_offset):-int(self.readout_dur()*self.SR-N_offset-500e-9*self.SR)] = 1
# Create Drive tones
wfms = [[], []]
for i , f in enumerate(self.x_val()-qubitf):
# SSB drive tone
sine_signal = np.concatenate((np.zeros(N-len(time)+len(overlap_array)-int(self.readout_dur()*self.SR)), 0.5*np.sin(f*2*np.pi*time),np.zeros(int(self.readout_dur()*self.SR-len(overlap_array)))))
cosine_signal = np.concatenate((np.zeros(N-len(time)+len(overlap_array)-int(self.readout_dur()*self.SR)), 0.5*np.cos(f*2*np.pi*time),np.zeros(int(self.readout_dur()*self.SR-len(overlap_array)))))
if i == 0:
wfm_ch1 = np.array([cosine_signal,TriggerMarker,TriggerMarker])
wfm_ch2 = np.array([sine_signal,TriggerMarker,TriggerMarker])
else:
wfm_ch1 = np.array([cosine_signal,ZerosMarker,ZerosMarker])
wfm_ch2 = np.array([sine_signal,ZerosMarker,ZerosMarker])
wfms[0].append(wfm_ch1)
wfms[1].append(wfm_ch2)
trig_waits = [0 for _ in range(npts)]
nreps = [1 for _ in range(npts)]
event_jumps = [0 for _ in range(npts)]
event_jump_to = [0 for _ in range(npts)]
go_to = [0 for _ in range(npts)]
go_to[-1] = 1 # Make the sequence loop back to first step
seqx = self.awg.makeSEQXFile(trig_waits,
nreps,
event_jumps,
event_jump_to,
go_to,
wfms,
[1, 1],
this_seq_name, custom_name = custom_name)
self.awg.sendSEQXFile(seqx, this_seq_name + '.seqx')
self.awg.loadSEQXFile(this_seq_name + '.seqx')
# Create Readout tones
self.update_readout_freqs()
# Create sequence for readout tones SLISt:SEQuence:NEW <sequence_name>,<number_of_steps> [,<number_of_tracks>]
#self.awg.write('SLISt:SEQuence:NEW \"Readout_Seq\", {}, 2'.format(npts))
#self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"Readout_Seq\", 1'.format(npts))
self.awg.write('SLISt:SEQuence:NEW \"' + readout_seq_name + '\", {}, 2'.format(npts))
self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"'.format(npts) + readout_seq_name + '\", 1')
# Fill Readout waveforms into sequence
for i in range(npts):
self.awg.write('SLISt:SEQuence:STEP{}:TASSet1:WAVeform \"'.format(str(i+1)) + readout_seq_name + '\", \"Readout_I\"')
self.awg.write('SLISt:SEQuence:STEP{}:TASSet2:WAVeform \"'.format(str(i+1)) + readout_seq_name + '\", \"Readout_Q\"')
# Assign waveforms
#self.awg.ch1.setSequenceTrack(self.filename, 1)
#self.awg.ch2.setSequenceTrack(self.filename, 2)
#self.awg.ch3.setSequenceTrack('Readout_Seq', 1)
#self.awg.ch4.setSequenceTrack('Readout_Seq', 2)
self.awg.ch1.setSequenceTrack(this_seq_name, 1)
self.awg.ch2.setSequenceTrack(this_seq_name, 2)
self.awg.ch3.setSequenceTrack(readout_seq_name, 1)
self.awg.ch4.setSequenceTrack(readout_seq_name, 2)
self.awg.ch2.state(1)
self.awg.play()
# Alazar labels
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('SSB Drive frequency (Some-overlap)',ala_chan.data.setpoint_labels[1])
ala_chan.data.setpoint_units = ('Hz',ala_chan.data.setpoint_units[1])
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]):
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('SSB Drive frequency (Some-overlap)',)
ala_chan.data.setpoint_units = ('Hz',)
# prepare channels
self.num_averages(self._averages)
def MultiQ_Rabi_overlap(self, start, stop, npts, overlap = 0 ,custom_name = None):
self.x_val = lambda: np.linspace(start ,stop ,npts)
self.awg.ch2.state(0)
# Here we decide whether we want to use "default" names or a user-specified custom name
if custom_name == None:
readout_seq_name = 'Readout_Seq'
this_seq_name = self.filename
self.awg.clearSequenceList()
self.awg.clearWaveformList()
else:
this_seq_name = 'Waveforms_' + custom_name
readout_seq_name = 'Readout_Seq_' + custom_name
N = int((self.cycle_time()*self.SR+64) - self.cycle_time()*self.SR%64)
N_offset = int(self.marker_offset()*self.SR)
overlap_array = np.linspace(0, overlap, int(overlap*self.SR), endpoint=False)
# Create triggers
ZerosMarker = np.zeros(int(N))
TriggerMarker = np.zeros(int(N))
TriggerMarker[-int(self.readout_dur()*self.SR-N_offset):-int(self.readout_dur()*self.SR-N_offset-500e-9*self.SR)] = 1
# Create Drive tones
wfms = [[]]
for i , t in enumerate(self.x_val()):
# SSB drive tone
drive = np.zeros(int(N))
drive[-int((self.readout_dur() + t-overlap)*self.SR):-int((self.readout_dur()-overlap)*self.SR)] = 0.5
if i == 0:
wfm_ch1 = np.array([drive,TriggerMarker,TriggerMarker])
else:
wfm_ch1 = np.array([drive,ZerosMarker,ZerosMarker])
wfms[0].append(wfm_ch1)
trig_waits = [0 for _ in range(npts)]
nreps = [1 for _ in range(npts)]
event_jumps = [0 for _ in range(npts)]
event_jump_to = [0 for _ in range(npts)]
go_to = [0 for _ in range(npts)]
go_to[-1] = 1 # Make the sequence loop back to first step
seqx = self.awg.makeSEQXFile(trig_waits,
nreps,
event_jumps,
event_jump_to,
go_to,
wfms,
[1, 1],
this_seq_name, custom_name = custom_name)
self.awg.sendSEQXFile(seqx, this_seq_name + '.seqx')
self.awg.loadSEQXFile(this_seq_name + '.seqx')
# Create Readout tones
self.update_readout_freqs()
# Create sequence for readout tones SLISt:SEQuence:NEW <sequence_name>,<number_of_steps> [,<number_of_tracks>]
#self.awg.write('SLISt:SEQuence:NEW \"Readout_Seq\", {}, 2'.format(npts))
#self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"Readout_Seq\", 1'.format(npts))
self.awg.write('SLISt:SEQuence:NEW \"' + readout_seq_name + '\", {}, 2'.format(npts))
self.awg.write('SLISt:SEQuence:STEP{}:GOTO \"'.format(npts) + readout_seq_name + '\", 1')
# Fill Readout waveforms into sequence
for i in range(npts):
self.awg.write('SLISt:SEQuence:STEP{}:TASSet1:WAVeform \"'.format(str(i+1)) + readout_seq_name + '\", \"Readout_I\"')
self.awg.write('SLISt:SEQuence:STEP{}:TASSet2:WAVeform \"'.format(str(i+1)) + readout_seq_name + '\", \"Readout_Q\"')
# Assign waveforms
self.awg.ch1.setSequenceTrack(this_seq_name, 1)
self.awg.ch3.setSequenceTrack(readout_seq_name, 1)
self.awg.ch4.setSequenceTrack(readout_seq_name, 2)
self.awg.play()
# Alazar labels
for ala_chan in self.alazar_ctrl.channels[2:4]:
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Drive time',ala_chan.data.setpoint_labels[1])
ala_chan.data.setpoint_units = ('s',ala_chan.data.setpoint_units[1])
for n, ala_chan in enumerate(self.alazar_ctrl.channels[12:24]):
ala_chan.records_per_buffer(npts)
ala_chan.data.setpoint_labels = ('Drive time',)
ala_chan.data.setpoint_units = ('s',)
# prepare channels
self.num_averages(self._averages)
|
# ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2019-11-07 18:50
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test-sanic.py
# ----------------------------------------------
from sanic import Sanic
from sanic import response
from pprint import pprint
app = Sanic()
@app.route('/', methods=['POST'])
async def g(request):
data = request.json
resp = []
for k, v in data:
for d in v:
resp.append(sorted(d.items()))
pprint(sorted(resp))
return response.json(True)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=10000, debug=True)
|
import logging
from typing import Dict, Optional, Tuple
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from transformers import (
AutoModel,
AutoTokenizer,
PreTrainedModel,
PreTrainedTokenizerBase,
)
from clrcmd.utils import masked_mean
logger = logging.getLogger(__name__)
def dist_all_gather(x: Tensor) -> Tensor:
"""Boilerplate code for all gather in distributed setting
The first dimension could be different
:param x: Tensor to be gathered
:type x: Tensor
:return: Tensor after gathered. For the gradient flow, current rank is
replaced to original tensor
:rtype: Tensor
"""
assert dist.is_initialized(), "The process is not in DDP setting"
world_size = dist.get_world_size()
# 1. Get size acroess processes
x_numel_list = [torch.tensor(x.numel(), device=x.device) for _ in range(world_size)]
dist.all_gather(x_numel_list, torch.tensor(x.numel(), device=x.device))
# 2. Infer maximum size
max_size = max(x.item() for x in x_numel_list)
# 3. Communitcate tensor with padded version
_x_list = [torch.empty((max_size,), device=x.device) for _ in range(world_size)]
_x = torch.cat(
(
x.contiguous().view(-1),
torch.empty((max_size - x.numel(),), device=x.device),
)
)
dist.all_gather(_x_list, _x)
# 4. Remove padded data to change original shape
x_list = [_x[:n].view(-1, *x.shape[1:]) for n, _x in zip(x_numel_list, _x_list)]
# Since `all_gather` results do not have gradients, we replace the
# current process's corresponding embeddings with original tensors
x_list[dist.get_rank()] = x
return torch.cat(x_list, dim=0)
ModelInput = Dict[str, Tensor]
class LastHiddenSentenceRepresentationModel(nn.Module):
def __init__(self, model: PreTrainedModel, head: bool = False):
super().__init__()
self.model = model
self.head = head
if head:
hidden_size = self.model.config.hidden_size
self.linear = nn.Linear(hidden_size, hidden_size)
def forward(self, inputs: ModelInput) -> Tuple[Tensor, Tensor]:
# Return representation with mask
outputs = self.model(**inputs).last_hidden_state
if self.head:
outputs = self.linear(outputs)
return outputs, inputs["attention_mask"].bool()
def compute_last_hidden(self, inputs: ModelInput) -> Tuple[Tensor, Tensor]:
return self.forward(inputs)
class CLSPoolingSentenceRepresentationModel(nn.Module):
def __init__(self, model: PreTrainedModel, head: bool = False):
super().__init__()
self.model = model
self.head = head
if head:
hidden_size = self.model.config.hidden_size
self.linear = nn.Linear(hidden_size, hidden_size)
def forward(self, inputs: ModelInput) -> Tensor:
outputs = self.model(**inputs).last_hidden_state[:, 0]
if self.head:
outputs = self.linear(outputs)
return outputs
def compute_last_hidden(self, inputs: ModelInput) -> Tuple[Tensor, Tensor]:
raise ValueError("No interpretable resource for cls pooling")
class AveragePoolingSentenceRepresentationModel(nn.Module):
def __init__(self, model: PreTrainedModel, head: bool = False):
super().__init__()
self.model = model
self.head = head
if head:
hidden_size = self.model.config.hidden_size
self.linear = nn.Linear(hidden_size, hidden_size)
def forward(self, inputs: ModelInput) -> Tensor:
mask = inputs["attention_mask"].bool().unsqueeze(2)
outputs = masked_mean(self.model(**inputs).last_hidden_state, mask, dim=1)
if self.head:
outputs = self.linear(outputs)
return outputs
def compute_last_hidden(self, inputs: ModelInput) -> Tuple[Tensor, Tensor]:
outputs = self.model(**inputs).last_hidden_state
if self.head:
outputs = self.linear(outputs)
return outputs, inputs["attention_mask"].bool()
class SentenceBertLearningModule(nn.Module):
def __init__(self, model: nn.Module, hidden_size: int):
super().__init__()
self.model = model
self.head = nn.Linear(hidden_size, 3, bias=False) # 3-way classification
self.criterion = nn.CrossEntropyLoss()
def forward(self, inputs1: ModelInput, inputs2: ModelInput, labels: Tensor) -> Tuple[Tensor]:
x1, x2 = self.representation_model(inputs1), self.representation_model(inputs2)
pred = self.head(torch.cat((x1, x2, torch.abs(x1 - x2)), dim=2))
return (self.criterion(pred, labels),)
class SentenceSimilarityModel(nn.Module):
def __init__(self, representation_model: nn.Module, similarity: nn.Module):
super().__init__()
self.representation_model = representation_model
self.similarity = similarity
def forward(self, inputs1: ModelInput, inputs2: ModelInput) -> Tensor:
"""Provide similarity between two sentences
:param inputs1: model input for sentence1.
:param inputs2: model input for sentence2.
:return: similarity score.
"""
x1, x2 = self.representation_model(inputs1), self.representation_model(inputs2)
return self.similarity(x1, x2)
def compute_heatmap(self, inputs1: ModelInput, inputs2: ModelInput) -> Tensor:
x1 = self.representation_model.compute_last_hidden(inputs1)
x2 = self.representation_model.compute_last_hidden(inputs2)
return self.similarity.compute_heatmap(x1, x2)
class SimcseLearningModule(nn.Module):
def __init__(self, model: nn.Module, pairwise_similarity: nn.Module, temp: float):
super().__init__()
self.model = model
self.pairwise_similarity = pairwise_similarity
self.temp = temp
self.criterion = nn.CrossEntropyLoss()
def forward(
self,
inputs1: ModelInput,
inputs2: ModelInput,
inputs_neg: Optional[ModelInput] = None,
) -> Tuple[Tensor]:
if inputs_neg is not None:
inputs = {
k: torch.cat((inputs1[k], inputs2[k], inputs_neg[k]), dim=0)
for k in inputs1.keys()
}
else:
inputs = {k: torch.cat((inputs1[k], inputs2[k]), dim=0) for k in inputs1.keys()}
x = self.model.representation_model(inputs)
if inputs_neg is not None:
sections = (
inputs1["input_ids"].shape[0],
inputs2["input_ids"].shape[0] + inputs_neg["input_ids"].shape[0],
)
else:
sections = inputs1["input_ids"].shape[0], inputs2["input_ids"].shape[0]
# NOTE: Really bad.. how to fix it?
if type(x) == Tensor:
x1, x2 = torch.split(x, sections)
else:
x1, x2 = list(zip(torch.split(x[0], sections), torch.split(x[1], sections)))
sim = self.pairwise_similarity(x1, x2)
sim = sim / self.temp
# (batch_size, batch_size)
labels = torch.arange(sim.shape[0], dtype=torch.long, device=sim.device)
return (self.criterion(sim, labels),)
def compute_alignment(
x1: Tensor, x2: Tensor, mask1: Tensor, mask2: Tensor
) -> Tuple[Tensor, Tensor]:
sim = F.cosine_similarity(x1.unsqueeze(-2), x2.unsqueeze(-3), dim=-1)
# Set similarity of invalid position to negative inf
inf = torch.tensor(float("-inf"), device=sim.device)
sim = torch.where(mask1.unsqueeze(-1), sim, inf)
sim = torch.where(mask2.unsqueeze(-2), sim, inf)
indice1 = torch.max(sim, dim=-1)[1]
indice2 = torch.max(sim, dim=-2)[1]
return indice1, indice2
class RelaxedWordMoverSimilarity(nn.Module):
def __init__(self):
super().__init__()
self.cos = nn.CosineSimilarity(dim=-1)
def forward(self, x1: Tuple[Tensor, Tensor], x2: Tuple[Tensor, Tensor]) -> Tensor:
"""Compute relaxed word mover similarity
:param x1: ((batch, seq_len1, hidden_dim), (batch, seq_len1)), torch.float
:param x2: ((batch, seq_len2, hidden_dim), (batch, seq_len2)), torch.float
:return: (batch)
"""
(x1, mask1), (x2, mask2) = x1, x2
sim = self.cos(x1[:, :, None, :], x2[:, None, :, :])
inf = torch.tensor(float("-inf"), device=sim.device)
sim = torch.where(mask1.unsqueeze(-1), sim, inf)
sim = torch.where(mask2.unsqueeze(-2), sim, inf)
# (batch, seq_len1, seq_len2)
sim1, sim2 = torch.max(sim, dim=2)[0], torch.max(sim, dim=1)[0]
sim1 = masked_mean(sim1, mask1, dim=1)
sim2 = masked_mean(sim2, mask2, dim=1)
sim = (sim1 + sim2) / 2
return sim
def compute_heatmap(self, x1: Tuple[Tensor, Tensor], x2: Tuple[Tensor, Tensor]) -> Tensor:
(x1, mask1), (x2, mask2) = x1, x2
sim = self.cos(x1[:, :, None, :], x2[:, None, :, :])
inf = torch.tensor(float("-inf"), device=sim.device)
sim = torch.where(mask1.unsqueeze(-1), sim, inf)
sim = torch.where(mask2.unsqueeze(-2), sim, inf)
# (batch, seq_len1, seq_len2)
sim1 = torch.mul(sim, (sim == torch.max(sim, dim=2, keepdim=True)[0]).float())
sim2 = torch.mul(sim, (sim == torch.max(sim, dim=1, keepdim=True)[0]).float())
sim1 = sim1 / torch.count_nonzero(mask1, dim=1)[:, None, None]
sim2 = sim2 / torch.count_nonzero(mask2, dim=1)[:, None, None]
sim = (sim1 + sim2) / 2
return sim
class PairwiseRelaxedWordMoverSimilarity(nn.Module):
def __init__(self):
super().__init__()
self.cos = nn.CosineSimilarity(dim=-1)
def forward(self, x1: Tuple[Tensor, Tensor], x2: Tuple[Tensor, Tensor]) -> Tensor:
"""Compute relaxed word mover similarity
:param x1: ((batch1, seq_len1, hidden_dim), (batch1, seq_len1)), torch.float
:param x2: ((batch2, seq_len2, hidden_dim), (batch2, seq_len2)), torch.float
:return: (batch1, batch2)
"""
(x1, mask1), (x2, mask2) = x1, x2
batch1, seq_len1, hidden_dim = x1.shape
batch2, seq_len2, _ = x2.shape
# Compute max indice batchwise
with torch.no_grad():
indice1 = torch.empty((batch1, batch2, seq_len1), dtype=torch.long, device=x1.device)
indice2 = torch.empty((batch1, batch2, seq_len2), dtype=torch.long, device=x2.device)
for i in range(0, batch1, 8):
for j in range(0, batch2, 8):
_indice1, _indice2 = compute_alignment(
x1[i : i + 8, None, :, :],
x2[None, j : j + 8, :, :],
mask1[i : i + 8, None, :],
mask2[None, j : j + 8, :],
)
indice1[i : i + 8, j : j + 8, :] = _indice1
indice2[i : i + 8, j : j + 8, :] = _indice2
# Construct computational graph for RWMD
x1, x2 = x1.unsqueeze(1), x2.unsqueeze(0)
sim1 = self.cos(
x1, # (batch1, 1, seq_len1, hidden_dim)
torch.gather(
x2.expand((batch1, -1, -1, -1)),
dim=2,
index=indice1.unsqueeze(-1).expand((-1, -1, -1, hidden_dim)),
), # (batch1, batch2, seq_len1, hidden_dim)
)
# (batch1, batch2, seq_len1)
sim2 = self.cos(
torch.gather(
x1.expand((-1, batch2, -1, -1)),
dim=2,
index=indice2.unsqueeze(-1).expand((-1, -1, -1, hidden_dim)),
), # (batch1, batch2, seq_len2, hidden_dim)
x2, # (1, batch2, seq_len2, hidden_dim)
)
# (batch1, batch2, seq_len2)
sim1 = masked_mean(sim1, mask1.unsqueeze(1).expand_as(sim1), dim=-1)
sim2 = masked_mean(sim2, mask2.unsqueeze(0).expand_as(sim2), dim=-1)
sim = (sim1 + sim2) / 2
return sim
class DensePairwiseRelaxedWordMoverSimilarity(nn.Module):
def __init__(self):
super().__init__()
self.cos = nn.CosineSimilarity(dim=-1)
def forward(self, x1: Tuple[Tensor, Tensor], x2: Tuple[Tensor, Tensor]) -> Tensor:
"""Compute relaxed word mover similarity
:param x1: ((batch1, seq_len1, hidden_dim), (batch1, seq_len1)), torch.float
:param x2: ((batch2, seq_len2, hidden_dim), (batch2, seq_len2)), torch.float
:return: (batch1, batch2)
"""
(x1, mask1), (x2, mask2) = x1, x2
sim = self.cos(x1[:, None, :, None, :], x2[None, :, None, :, :])
# (batch1, batch2, seq_len1, seq_len2)
inf = torch.tensor(float("-inf"), device=sim.device)
sim = torch.where(mask1[:, None, :, None], sim, inf)
sim = torch.where(mask2[None, :, None, :], sim, inf)
sim1, sim2 = torch.max(sim, dim=3)[0], torch.max(sim, dim=2)[0]
# (batch1, batch2, seq_len1), (batch1, batch2, seq_len2)
sim1 = masked_mean(sim1, mask1[:, None, :], dim=-1)
sim2 = masked_mean(sim2, mask2[None, :, :], dim=-1)
sim = (sim1 + sim2) / 2
return sim
class CosineSimilarity(nn.Module):
def __init__(self, dim: int):
super().__init__()
self.cos = nn.CosineSimilarity(dim=dim)
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return self.cos(x1, x2)
def compute_heatmap(self, x1: Tuple[Tensor, Tensor], x2: Tuple[Tensor, Tensor]) -> Tensor:
(x1, mask1), (x2, mask2) = x1, x2
s1 = masked_mean(x1, mask1.unsqueeze(2), dim=1) # (batch, hidden)
s2 = masked_mean(x2, mask2.unsqueeze(2), dim=1) # (batch, hidden)
sim = torch.einsum("bih,bjh->bij", x1, x2)
inf = torch.tensor(float("-inf"), device=sim.device)
sim = torch.where(mask1.unsqueeze(-1), sim, inf)
sim = torch.where(mask2.unsqueeze(-2), sim, inf)
sim = sim / torch.norm(s1, dim=1)[:, None, None]
sim = sim / torch.norm(s2, dim=1)[:, None, None]
sim = sim / torch.count_nonzero(mask1, dim=1)[:, None, None]
sim = sim / torch.count_nonzero(mask2, dim=1)[:, None, None]
return sim
class PairwiseCosineSimilarity(nn.Module):
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return F.cosine_similarity(x1.unsqueeze(1), x2.unsqueeze(0), dim=-1)
def create_tokenizer(model_name: str) -> PreTrainedTokenizerBase:
if model_name.startswith("bert"):
return AutoTokenizer.from_pretrained("bert-base-uncased", use_fast=False)
elif model_name.startswith("roberta"):
return AutoTokenizer.from_pretrained("roberta-base", use_fast=False)
else:
raise ValueError(f"Undefined {model_name = }")
def create_similarity_model(model_name: str) -> nn.Module:
if model_name.startswith("bert"):
model = AutoModel.from_pretrained("bert-base-uncased")
elif model_name.startswith("roberta"):
model = AutoModel.from_pretrained("roberta-base")
else:
raise ValueError(f"Undefined {model_name = }")
if model_name.endswith("cls"):
model = CLSPoolingSentenceRepresentationModel(model, head=True)
model = SentenceSimilarityModel(model, CosineSimilarity(dim=-1))
elif model_name.endswith("avg"):
model = AveragePoolingSentenceRepresentationModel(model, head=True)
model = SentenceSimilarityModel(model, CosineSimilarity(dim=-1))
elif model_name.endswith("rcmd"):
model = LastHiddenSentenceRepresentationModel(model, head=True)
model = SentenceSimilarityModel(model, RelaxedWordMoverSimilarity())
else:
raise ValueError(f"Undefined {model_name = }")
return model
def create_contrastive_learning(
model_name: str, temp: float = 1.0, dense_rwmd: bool = False
) -> nn.Module:
model = create_similarity_model(model_name)
if model_name.endswith("cls"):
return SimcseLearningModule(model, PairwiseCosineSimilarity(), temp)
elif model_name.endswith("avg"):
return SimcseLearningModule(model, PairwiseCosineSimilarity(), temp)
elif model_name.endswith("rcmd"):
if dense_rwmd:
pairwise_similarity = DensePairwiseRelaxedWordMoverSimilarity()
else:
pairwise_similarity = PairwiseRelaxedWordMoverSimilarity()
return SimcseLearningModule(model, pairwise_similarity, temp)
else:
raise ValueError(f"Undefined {model_name = }")
|
from appium import webdriver
desired_caps = {
'platformName': 'Android',
'deviceName': 'emulator-5554',
'platformVersion': '7.0',
'appPackage': 'com.android.calculator2',
'appActivity': 'com.android.calculator2.Calculator'
}
driver = webdriver.Remote('http://localhost:4723/wd/hub',desired_caps)
driver.find_element_by_id("com.android.calculator2:id/digit_1").click()
driver.find_element_by_id("com.android.calculator2:id/digit_5").click()
driver.find_element_by_id("com.android.calculator2:id/digit_9").click()
driver.find_element_by_id("com.android.calculator2:id/del").click()
driver.find_element_by_id("com.android.calculator2:id/op_add").click()
driver.find_element_by_id("com.android.calculator2:id/digit_6").click()
driver.find_element_by_id("com.android.calculator2:id/eq").click()
driver.quit() #退出
|
import pygame
from settings import SCREEN_WIDTH, SCREEN_HEIGHT
# FIXME: Bad naming
class Score:
global_score = 0
player_life = 3
@staticmethod
def add_score(score):
Score.global_score += score
@staticmethod
def draw_score(display_surface):
font = pygame.font.Font(None, 32)
text = font.render(f' Total points: {Score.global_score} ', True, 'white', 'black')
text_rect = text.get_rect()
text_rect.bottomright = (SCREEN_WIDTH - 20, SCREEN_HEIGHT - 20)
display_surface.blit(text, text_rect)
@staticmethod
def add_player_life():
Score.player_life += 1
@staticmethod
def remove_player_life():
Score.player_life -= 1
@staticmethod
def draw_player_life(display_surface):
font = pygame.font.Font(None, 32)
text = font.render(f' Player life: {Score.player_life} ', True, 'red', 'white')
text_rect = text.get_rect()
display_surface.blit(text, text_rect) |
from flask import Flask, render_template, request
from .utils.parser import Parser
from .utils.predictor import Predictor
from .config import API_KEY
from .database import database
import pyowm
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///clothes.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
database.init_app(app)
from .cli import create_all, insert, drop_all, populate
with app.app_context():
app.cli.add_command(create_all)
app.cli.add_command(drop_all)
app.cli.add_command(insert)
app.cli.add_command(populate)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/weather", methods=["GET", "POST"])
def weather():
if request.method == "POST":
owm = pyowm.OWM(API_KEY)
place = request.form["location"]
if place:
try:
obs = owm.weather_at_place(place)
parser = Parser()
weather_data = parser.parse(obs)
predictor = Predictor(weather_data)
predictor.predict()
suggestion = predictor.suggest()
return render_template("weather.html", weather_data=weather_data, suggestion=suggestion)
except pyowm.exceptions.api_response_error.NotFoundError:
return render_template("error.html",
error="Looks like your location doesn't exist. Maybe try again?")
return render_template("error.html", error="Looks like you haven't selected the location. Maybe try again?")
if __name__ == "__main__":
app.debug = True
app.run()
|
import os
import json
# 根据key列表读取配置参数
def load_config(keys):
configs = {}
cur_path = os.path.dirname(__file__) + '/'
with open(cur_path + '../config/config.json', 'r', encoding='utf-8') as json_file:
json_obj = json.load(json_file)
for key in keys:
try:
configs[key] = json_obj[key]
except Exception:
pass
return configs
|
from rest_framework import serializers
from .models import Book, Assignment, User
class AssignmentSerializer(serializers.ModelSerializer):
class Meta:
model = Assignment
fields = ('id', 'user', 'book')
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Book
fields = ('id', 'title', 'description')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'name', 'email')
|
import numpy as np
from math import ceil, floor
from random import shuffle
class Cluster:
"""
Immutable object containing properties of the cluster:
numIslands
nodesPerIsland
islandBandwidth
singleNodeBandwidth
"""
def __init__(self, numIslands=2, nodesPerIsland=8, islandBandwidth=7, singleNodeBandwidth=4):
self.numIslands = numIslands
self.nodesPerIsland = nodesPerIsland
self.islandBandwidth = islandBandwidth
self.singleNodeBandwidth = singleNodeBandwidth
self.numNodes = self.numIslands*self.nodesPerIsland
def __str__(self):
cString = "Cluster: " + str(self.numIslands) + " islands of " + str(self.nodesPerIsland) + " nodes.\n" \
+ "Max bandwidth between nodes is " + str(self.singleNodeBandwidth) + "GB/s. Max bandwidth between islands is " \
+ str(self.islandBandwidth) + "GB/s."
return cString
"""
Object holding properties of a single, unidirectional message passed from rank1 to rank2
rank1: sender id
rank2: receiver id
host1: sender host id
host2: receiver host id
bandwidth: bandwidth at which the message will be sent
"""
class Message:
def __init__(self, rank1, rank2, host1, host2):
self.rank1 = rank1
self.rank2 = rank2
self.host1 = host1
self.host2 = host2
self.bandwidth = 0
def setBandwidth(self, bandwidth):
self.bandwidth = bandwidth
def setIslands(self, island1, island2):
self.island1 = island1
self.island2 = island2
def __str__(self):
return "ranks " + str(self.rank1) + " and " +\
str(self.rank2) + " at " + str(self.bandwidth) + "GB/s"
"""
Object containing a collection of messages, represented by Message objects.
Includes functions to set the bandwidth of those messages given the assumption that all
messages will be sent simultaneously
"""
class MessageModel:
def __init__(self):
self.messages = []
def setPairedMessages(self, job):
"""
Create a group of messages that pair off with the rank
distance numRanks/2 away
"""
self.setPairedMessagesByDistance(job, job.numRanks/2)
def setPairedMessagesByDistance(self, job, distance):
"""
Create a group of messages that pair off with the rank the provided
distance away
"""
messages = []
hosts = job.getHosts()
numBlocks = job.numRanks/(2*distance)
blockIndex = 0
for block in range(numBlocks):
for rank in range(distance):
rank1 = blockIndex+rank
rank2 = blockIndex+rank+distance
message = Message(rank1, rank2, hosts[rank1], hosts[rank2])
messages.append(message)
blockIndex = blockIndex + distance*2
self.messages = messages
def setOneToAllMessages(self, job):
"""
Set rank 0 to communicate with all other ranks.
Probably don't actually want this as currently it will be treated as sending to
all ranks simultaneously
"""
messages = []
hosts = job.getHosts()
for rank in job.getRanks():
message = Message(0, rank, hosts[0], hosts[rank])
messages.append(message)
self.messages = messages
def setBandwidths(self, cluster):
"""
Given a group of messages and a model for the maximum point to point and
inter-island bandwidth, calculate the bandwidth that each message could
be sent at, assuming all those messages were sent simultaneously
"""
islandCounts = np.zeros(cluster.numIslands, dtype=int)
# calculate total number of ranks which need to use each edge switch
# (ie how many ranks in each island communicate with a different island)
for message in self.messages:
island1 = int(floor(message.host1/float(cluster.nodesPerIsland)))
island2 = int(floor(message.host2/float(cluster.nodesPerIsland)))
message.setIslands(island1, island2)
if (island1 != island2):
islandCounts[island1] = islandCounts[island1]+1
islandCounts[island2] = islandCounts[island2]+1
# set bandwidths based on the saturation of the islands in the network
for message in self.messages:
if (message.island1 != message.island2):
sharedIslandBw1 = cluster.islandBandwidth/float(islandCounts[message.island1])
bw1 = min(cluster.singleNodeBandwidth, sharedIslandBw1)
sharedIslandBw2 = cluster.islandBandwidth/float(islandCounts[message.island2])
bw2 = min(cluster.singleNodeBandwidth, sharedIslandBw2)
message.bandwidth = min(bw1, bw2)
else:
message.bandwidth = cluster.singleNodeBandwidth
def __str__(self):
fullString = ""
count = 0
for message in self.messages:
fullString = fullString + "message " + str(count) + ": " + str(message) + "\n"
count = count + 1
return fullString
"""
Object representing a job to run on a cluster. Takes a number of ranks and allocates them
to host IDs using different allocation strategies:
allocateScattered
allocateSplitIslands
allocateRandom
"""
class Job:
def __init__(self, numRanks=8):
self.numRanks = numRanks
def allocatePacked(self, cluster):
"""
Allocate consecutive ranks to consecutive host ids.
Host IDs are integers [0:numIslands*nodesPerIsland]
"""
hosts = range(self.numRanks)
self.hosts = hosts
def allocateScattered(self, cluster):
"""
Allocate ranks spaced by the maximum possible regular interval, starting at host 0.
If the interval is one, pack from the left.
Host IDs are integers [0:numIslands*nodesPerIsland]
"""
hosts = []
interval = int(ceil(cluster.numNodes/self.numRanks))
for i in range(self.numRanks):
if i*interval < cluster.numNodes:
hosts.append(i*interval)
else:
hosts.append(hosts[i-1]+1)
self.hosts = hosts
def allocateSplitIslands(self, cluster):
"""
Allocate ranks to hosts by dividing ranks evenly between islands.
Rank order is maintained and ranks are packed within an island.
The last island may contain fewer ranks if they don't split evenly.
Host IDs are integers [0:numIslands*nodesPerIsland]
"""
hosts = []
numRanksPerIsland = int(ceil(self.numRanks/cluster.numIslands))
for i in range(cluster.numIslands):
for r in range(numRanksPerIsland):
host = i*cluster.nodesPerIsland + r
if host < cluster.numNodes:
hosts.append(host)
self.hosts = hosts
def allocateRandom(self, cluster):
"""
Allocate ranks to hosts maintaining rank order but spaced at random intervals.
Host IDs are integers [0:numIslands*nodesPerIsland]
"""
allHosts = range(cluster.numNodes)
shuffle(allHosts)
hosts = allHosts[0:self.numRanks]
hosts.sort()
self.hosts = hosts
def getHosts(self):
return self.hosts
def getRanks(self):
return range(self.numRanks)
def __str__(self):
jString = "Job: " + str(self.numRanks) + " ranks on hosts " + str(self.getHosts())
return jString
|
def add(param1, param2):
return param1 + param2
###
def centuryFromYear(year):
return year/100 + 1 if year%100 != 0 else year /100
###
def checkPalindrome(inputString):
return inputString == inputString[::-1]
###
def adjacentElementsProduct(inputArray):
output = - sys.maxint - 1
for i in range(1, len(inputArray)):
if inputArray[i]*inputArray[i-1] > output:
output = inputArray[i]*inputArray[i-1]
return output
###
def shapeArea(n):
area = 1
for i in range(n):
area += 4 * i
return area
###
def makeArrayConsecutive2(statues):
statues = sorted(statues)
end = statues[-1]
i = statues[0]
count = 0
while i < end:
i+= 1
if i not in statues:
count+= 1
return count
###
def almostIncreasingSequence(s):
i = 0
j = 1
count = 0
while i < len(s)-1 and j < len(s):
if s[i] >= s[j]:
if count == 1:
return False
count += 1
if i != 0:
if s[i-1] >= s[j]:
j += 1
continue
i = j
j += 1
return True
###
def matrixElementsSum(matrix):
output = 0
for i in range(len(matrix[0])):
for j in range(len(matrix)):
if matrix[j][i] == 0:
break
output += matrix[j][i]
return output
###
def allLongestStrings(inputArray):
longestLength = max([len(x) for x in inputArray])
output = []
for each in inputArray:
if len(each) == longestLength:
output.append(each)
return output
###
def isPowerOfTwo(n):
while n % 2 == 0:
n >>= 1
if n == 1:
return True
return False
###
def isDivisibleBy6(inputString):
digitSum = 0
leftBound = ord('0')
rightBound = ord('9')
answer = []
mask = list(inputString)
asteriskPos = -1
for i in range(len(mask)):
if (leftBound <= ord(mask[i]) and
ord(mask[i]) <= rightBound):
digitSum += int(mask[i]) - leftBound
else:
asteriskPos = i
for i in range(10):
if (digitSum + i) % 3 == 0:
mask[asteriskPos] = chr(leftBound + i)
if (ord(mask[len(mask) - 1]) - leftBound) % 2 == 0:
answer.append(''.join(mask))
return answer
###
def chessBoardSquaresUnderQueenAttack(a, b):
t = 0
for i in range(a):
for j in range(b):
for dx in range(a):
for dy in range(b):
if (i == dx or j == dy) or abs(i-dx) == abs(j-dy):
t+= 1
return t - a*b
###
def regularBracketSequence2(sequence):
stack = []
for i in range(len(sequence)):
if (len(stack) > 0
and stack[len(stack) - 1] == '(' and sequence[i] == ')'):
stack.pop()
continue
if (len(stack) > 0
and stack[len(stack) - 1] == '[' and sequence[i] == ']'):
stack.pop()
continue
stack.append(sequence[i])
if len(stack) != 0:
return False
return True
|
from channels.generic.websocket import WebsocketConsumer
import json
import time
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from lora_networks.models import Device_info, Device, Node
class LoraConsumer(WebsocketConsumer):
def connect(self):
self.accept()
def disconnect(self, close_code):
pass
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
print(message)
self.send(text_data=json.dumps({
'message': message
}))
class TestConsumer(WebsocketConsumer):
def connect(self):
self.accept()
def disconnect(self, close_code):
pass
def getDeviceId(self, name):
dev = Device.objects.get(name=name)
dev_id = dev.id
return dev_id
def receive(self, text_data):
text_data_json = json.loads(text_data)
dev_name = text_data_json['device']
date = text_data_json['date']
datetime_object = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
info = text_data_json['info']
dev_id = self.getDeviceId(dev_name)
print(dev_name)
print(dev_id)
print(str(datetime_object))
print(info)
devInfo = Device_info.objects.create(device_id=dev_id, date=date, info=info)
self.send(text_data=json.dumps({
'message': 'Yes!'
}))
class NodeConsumer(WebsocketConsumer):
def __init__(self, path):
WebsocketConsumer.__init__(self, path)
self.buf = b''
self.node_name = ''
self.sensors = dict()
self.response = dict()
self.response['message'] = 'ok'
def get_node_id(self, name):
dev_id = -1
try:
dev = Node.objects.get(name=name)
dev_id = dev.id
return dev_id
except ObjectDoesNotExist:
self.response['message'] = 'error'
self.response['error'] = 'NodeDoesNotExist'
return dev_id
def get_device_id(self, name):
dev_id = -1
try:
dev = Device.objects.get(name=name)
dev_id = dev.id
return dev_id
except ObjectDoesNotExist:
self.response['message'] = 'error'
self.response['error'] = 'DeviceDoesNotExist'
return dev_id
def update_node(self):
k = self.node_name
k += 'vdd'
vdd = self.sensors.pop(k, -1)
if vdd > -1:
Node.objects.filter(name=self.node_name).update(charge=vdd)
def create_sensor_data(self):
for key, value in self.sensors.items():
dev_id = self.get_device_id(key)
if dev_id > -1:
Device_info.objects.create(device_id=dev_id, date=datetime.now(), info=value)
def save_to_db(self):
if self.response.get('message') == 'ok':
self.update_node()
self.create_sensor_data()
def crc99(self, data_len):
b = 65535
s = 255
crc = 0
for i in range(data_len):
crc = (crc << 2) + crc + self.buf[i]
crc = crc & b
crc = (crc << 2) + crc + self.buf[i]
crc = crc & b
crc = (crc ^ (crc >> 8))
return crc & s
def parser(self):
message_length = len(self.buf)-1
i = 0
for c in reversed(self.buf):
if c == 35:
break
i += 1
message_length -= i-2
if message_length > 0:
sensors_count = (message_length - 25)//16
crc = self.buf[message_length-1]
p_crc = self.crc99(message_length-1)
if p_crc == crc:
self.node_name = self.buf[0:24].decode()
node_id = self.get_node_id(self.node_name)
if node_id >= 0:
j = 0
for i in range(sensors_count):
n = 25 + j
m = n + 12
name_sensor = self.node_name
name_sensor += self.buf[n:m].decode()
name_sensor = name_sensor.rstrip('\x00')
num = self.buf[38+j] << 8 | self.buf[39+j]
num /= 100
self.sensors[name_sensor] = num
j = j + 16
else:
self.response['message'] = 'error'
self.response['error'] = 'CRCError'
else:
self.response['message'] = 'error'
self.response['error'] = 'LengthError'
def connect(self):
self.accept()
def disconnect(self, close_code):
pass
def receive(self, text_data=None, bytes_data=None):
if text_data:
self.send(text_data=json.dumps({
'message': 'Has text'
}))
else:
self.buf = bytes_data
self.parser()
self.save_to_db()
self.send(text_data=json.dumps(self.response))
class TimeConsumer(WebsocketConsumer):
def __init__(self, path):
WebsocketConsumer.__init__(self, path)
self.buf = b''
self.response = dict()
self.response['message'] = 'ok'
def crc99(self, data_len):
b = 65535
s = 255
crc = 0
for i in range(data_len):
crc = (crc << 2) + crc + self.buf[i]
crc = crc & b
crc = (crc << 2) + crc + self.buf[i]
crc = crc & b
crc = (crc ^ (crc >> 8))
return crc & s
def connect(self):
self.accept()
def disconnect(self, close_code):
pass
def receive(self, text_data=None, bytes_data=None):
tim = round(time.time()*1000)
self.buf = bytes(str(tim), 'utf-8')
message_length = len(self.buf)-1
crc = self.crc99(message_length)
self.response['message'] = str(tim) + "#" + str(crc)
self.send(text_data=json.dumps(self.response))
|
import pytest
from yamlpath.enums import CollectorOperators
from yamlpath.path import CollectorTerms
class Test_path_CollectorTerms():
"""Tests for the CollectorTerms class."""
@pytest.mark.parametrize("path,operator,output", [
("abc", CollectorOperators.NONE, "(abc)"),
("abc", CollectorOperators.ADDITION, "+(abc)"),
("abc", CollectorOperators.SUBTRACTION, "-(abc)"),
])
def test_str(self, path, operator, output):
assert output == str(CollectorTerms(path, operator))
|
"""
Test ILPSolver in Cassiopeia.solver.
"""
import os
import unittest
import itertools
import networkx as nx
import numpy as np
import pandas as pd
import pathlib as pl
import cassiopeia as cas
from cassiopeia.solver.ILPSolver import ILPSolver
from cassiopeia.mixins import ILPSolverError
from cassiopeia.solver import ilp_solver_utilities
GUROBI_INSTALLED = True
try:
import gurobipy
except ModuleNotFoundError:
GUROBI_INSTALLED = False
def find_triplet_structure(triplet, T):
a, b, c = triplet[0], triplet[1], triplet[2]
a_ancestors = [node for node in nx.ancestors(T, a)]
b_ancestors = [node for node in nx.ancestors(T, b)]
c_ancestors = [node for node in nx.ancestors(T, c)]
ab_common = len(set(a_ancestors) & set(b_ancestors))
ac_common = len(set(a_ancestors) & set(c_ancestors))
bc_common = len(set(b_ancestors) & set(c_ancestors))
structure = "-"
if ab_common > bc_common and ab_common > ac_common:
structure = "ab"
elif ac_common > bc_common and ac_common > ab_common:
structure = "ac"
elif bc_common > ab_common and bc_common > ac_common:
structure = "bc"
return structure
class TestILPSolver(unittest.TestCase):
def assertIsFile(self, path):
if not pl.Path(path).resolve().is_file():
raise AssertionError("File does not exist: %s" % str(path))
def setUp(self):
# basic PP example with no missing data
cm = pd.DataFrame.from_dict(
{
"a": [1, 1, 0],
"b": [1, 2, 0],
"c": [1, 2, 1],
"d": [2, 0, 0],
"e": [2, 0, 2],
},
orient="index",
columns=["x1", "x2", "x3"],
)
# basic PP example with duplicates
cm_duplicates = pd.DataFrame.from_dict(
{
"a": [1, 1, 0],
"b": [1, 2, 0],
"c": [1, 2, 1],
"d": [2, 0, 0],
"e": [2, 0, 2],
"f": [1, 1, 0],
},
orient="index",
columns=["x1", "x2", "x3"],
)
# basic example with missing data
cm_missing = pd.DataFrame.from_dict(
{
"a": [1, 3, 1, 1],
"b": [1, 3, 1, -1],
"c": [1, 0, 1, 0],
"d": [1, 1, 3, 0],
"e": [1, 1, 0, 0],
"f": [2, 0, 0, 0],
"g": [2, 4, -1, -1],
"h": [2, 4, 2, 0],
},
orient="index",
)
dir_path = os.path.dirname(os.path.realpath(__file__))
open(os.path.join(dir_path, "test.log"), "a").close()
self.pp_tree = cas.data.CassiopeiaTree(cm, missing_state_indicator=-1)
self.duplicates_tree = cas.data.CassiopeiaTree(
cm_duplicates, missing_state_indicator=-1
)
self.missing_tree = cas.data.CassiopeiaTree(
cm_missing, missing_state_indicator=-1
)
self.logfile = os.path.join(dir_path, "test.log")
self.ilp_solver = cas.solver.ILPSolver(mip_gap=0.0)
# for the purposes of making sure we throw an error when a potential
# graph cannot be solved
self.ilp_solver_small = cas.solver.ILPSolver(mip_gap=0.0, maximum_potential_graph_layer_size=3)
def test_raises_error_on_ambiguous(self):
cm = pd.DataFrame.from_dict(
{
"c1": [5, (0, 1), 1, 2, -1],
"c2": [0, 0, 3, 2, -1],
"c3": [-1, 4, 0, 2, 2],
"c4": [4, 4, 1, 2, 0],
},
orient="index",
columns=["a", "b", "c", "d", "e"],
)
tree = cas.data.CassiopeiaTree(cm, missing_state_indicator=-1)
with self.assertRaises(ILPSolverError):
solver = cas.solver.ILPSolver()
solver.solve(tree)
def test_get_lca_cython(self):
# test single sample
cm = self.missing_tree.character_matrix.copy().astype(str)
lca = ilp_solver_utilities.get_lca_characters_cython(
cm.loc["a"].values, cm.loc["b"].values, 4, "-1"
)
self.assertEqual(lca, "1|3|1|1")
lca = ilp_solver_utilities.get_lca_characters_cython(
cm.loc["h"].values, cm.loc["b"].values, 4, "-1"
)
self.assertEqual(lca, "0|0|0|0")
def test_cython_hamming_dist(self):
sample1 = np.array(["1", "2", "3", "0", "0"])
sample2 = np.array(["1", "4", "0", "0", "1"])
dist = ilp_solver_utilities.simple_hamming_distance_cython(
sample1, sample2, "-"
)
self.assertEqual(dist, 3)
sample1 = np.array(["1", "2", "3", "0", "-"])
sample2 = np.array(["1", "-", "0", "0", "1"])
dist = ilp_solver_utilities.simple_hamming_distance_cython(
sample1, sample2, "-"
)
self.assertEqual(dist, 1)
@unittest.skipUnless(GUROBI_INSTALLED, "Gurobi installation not found.")
def test_single_sample_ilp(self):
# test single sample
cm = pd.DataFrame([1], index=["a"])
tree = cas.data.CassiopeiaTree(cm)
self.ilp_solver.solve(tree, logfile=self.logfile)
expected_leaves = ["a"]
self.assertCountEqual(expected_leaves, tree.leaves)
# test single unique sample
cm = pd.DataFrame([[1], [1], [1]], index=["a", "b", "c"])
tree = cas.data.CassiopeiaTree(cm)
self.ilp_solver.solve(tree, logfile=self.logfile)
expected_leaves = ["a", "b", "c"]
self.assertCountEqual(expected_leaves, tree.leaves)
def test_basic_ilp_constructor(self):
self.assertEqual(self.ilp_solver.convergence_time_limit, 12600)
self.assertEqual(
self.ilp_solver.maximum_potential_graph_layer_size, 10000
)
self.assertFalse(self.ilp_solver.weighted)
expected_character_matrix = pd.DataFrame.from_dict(
{
"a": [1, 1, 0],
"b": [1, 2, 0],
"c": [1, 2, 1],
"d": [2, 0, 0],
"e": [2, 0, 2],
},
orient="index",
columns=["x1", "x2", "x3"],
)
pd.testing.assert_frame_equal(
expected_character_matrix, self.pp_tree.character_matrix.copy()
)
def test_get_layer_for_potential_graph(self):
unique_character_matrix = (
self.pp_tree.character_matrix.drop_duplicates()
)
source_nodes = unique_character_matrix.values
dim = source_nodes.shape[1]
source_node_strings = np.array(
["|".join(arr) for arr in source_nodes.astype(str)]
)
(
layer_nodes,
layer_edges,
) = ilp_solver_utilities.infer_layer_of_potential_graph(
source_node_strings, 10
)
layer_nodes = np.array(
[node.split("|") for node in layer_nodes], dtype=int
)
layer_nodes = np.unique(layer_nodes, axis=0)
expected_next_layer = np.array(
[[1, 0, 0], [1, 2, 0], [0, 0, 0], [2, 0, 0]]
)
for sample in expected_next_layer:
self.assertIn(sample, layer_nodes)
layer_edges = np.array(
[edge.split("|") for edge in layer_edges], dtype=int
)
layer_edges = [(list(e[:dim]), list(e[dim:])) for e in layer_edges]
expected_edges = [
([1, 0, 0], [1, 1, 0]),
([1, 0, 0], [1, 2, 0]),
([1, 0, 0], [1, 2, 1]),
([1, 2, 0], [1, 2, 0]),
([1, 2, 0], [1, 2, 1]),
([0, 0, 0], [1, 1, 0]),
([0, 0, 0], [1, 2, 0]),
([0, 0, 0], [1, 2, 1]),
([0, 0, 0], [2, 0, 0]),
([0, 0, 0], [2, 0, 2]),
([2, 0, 0], [2, 0, 0]),
([2, 0, 0], [2, 0, 2]),
]
for edge in expected_edges:
self.assertIn(edge, layer_edges)
uniq_edges = []
for edge in layer_edges:
if edge not in uniq_edges:
uniq_edges.append(edge)
self.assertEqual(len(uniq_edges), len(expected_edges))
def test_simple_potential_graph_inference(self):
unique_character_matrix = (
self.pp_tree.character_matrix.drop_duplicates()
)
max_lca_height = 10
potential_graph = self.ilp_solver.infer_potential_graph(
unique_character_matrix,
0,
max_lca_height,
self.pp_tree.priors,
self.pp_tree.missing_state_indicator,
)
# expected nodes
expected_nodes = [
(1, 1, 0),
(1, 2, 0),
(1, 2, 1),
(2, 0, 0),
(2, 0, 2),
(1, 0, 0),
(1, 2, 0),
(0, 0, 0),
(2, 0, 0),
]
for node in expected_nodes:
self.assertIn(node, potential_graph.nodes())
# expected edges
expected_edges = [
((1, 0, 0), (1, 1, 0)),
((1, 0, 0), (1, 2, 0)),
((1, 0, 0), (1, 2, 1)),
((1, 2, 0), (1, 2, 1)),
((0, 0, 0), (1, 1, 0)),
((0, 0, 0), (1, 2, 0)),
((0, 0, 0), (1, 2, 1)),
((0, 0, 0), (2, 0, 0)),
((0, 0, 0), (2, 0, 2)),
((2, 0, 0), (2, 0, 2)),
((0, 0, 0), (1, 0, 0)),
]
for edge in expected_edges:
self.assertIn(edge, potential_graph.edges())
self.assertEqual(len(potential_graph.edges()), len(expected_edges))
def test_post_process_steiner_solution(self):
tree = nx.DiGraph()
tree.add_weighted_edges_from(
[
(6, "c1", 1),
(6, "c2", 1),
(8, "c3", 1),
(8, "c4", 1),
(7, "c5", 1),
(7, "c6", 1),
(7, "c7", 1),
(8, 6, 1),
(9, 7, 1),
(9, 8, 1),
(10, "c4", 1.5),
(11, 10, 1.5),
(9, 6, 1.5),
(8, "c2", 0.5),
]
)
processed_tree = self.ilp_solver.post_process_steiner_solution(tree, 9)
expected_tree = nx.DiGraph()
expected_tree.add_weighted_edges_from(
[
(6, "c1", 1),
(6, "c2", 1),
(8, "c3", 1),
(8, "c4", 1),
(7, "c5", 1),
(7, "c6", 1),
(7, "c7", 1),
(8, 6, 1),
(9, 7, 1),
(9, 8, 1),
]
)
self.assertEqual(set(processed_tree.edges), set(expected_tree.edges))
def test_append_sample_nodes_and_remove_spurious_leaves(self):
cm = self.duplicates_tree.character_matrix
tree = nx.DiGraph()
tree.add_edges_from(
[
((0, 0, 0), (1, 0, 0)),
((0, 0, 0), (2, 0, 0)),
((2, 0, 0), (2, 0, 2)),
((2, 0, 0), (2, 0, 1)),
((1, 0, 0), (1, 1, 0)),
((1, 0, 0), (1, 2, 0)),
((1, 2, 0), (1, 2, 1)),
((1, 2, 0), (1, 2, 2)),
((2, 0, 1), (2, 1, 1)),
((2, 0, 1), (2, 2, 1)),
((2, 0, 1), (2, 3, 1)),
((1, 0, 0), (1, 1, 2)),
((1, 0, 0), (1, 1, 1)),
]
)
processed_tree = self.ilp_solver._ILPSolver__append_sample_names_and_remove_spurious_leaves(
tree, cm
)
expected_tree = nx.DiGraph()
expected_tree.add_edges_from(
[
((0, 0, 0), (1, 0, 0)),
((0, 0, 0), (2, 0, 0)),
((2, 0, 0), "d"),
((2, 0, 0), (2, 0, 2)),
((2, 0, 2), "e"),
((1, 0, 0), (1, 1, 0)),
((1, 1, 0), "a"),
((1, 1, 0), "f"),
((1, 0, 0), (1, 2, 0)),
((1, 2, 0), "b"),
((1, 2, 0), (1, 2, 1)),
((1, 2, 1), "c"),
]
)
self.assertEqual(set(processed_tree.edges), set(expected_tree.edges))
@unittest.skipUnless(GUROBI_INSTALLED, "Gurobi installation not found.")
def test_ilp_solver_perfect_phylogeny(self):
self.ilp_solver.solve(self.pp_tree, logfile=self.logfile)
tree = self.pp_tree.get_tree_topology()
# make sure log file is created correctly
self.assertIsFile(self.logfile)
# make sure there's one root
roots = [n for n in tree if tree.in_degree(n) == 0]
self.assertEqual(len(roots), 1)
# make sure all samples are leaves
tree_leaves = self.pp_tree.leaves
expected_leaves = ["a", "b", "c", "d", "e"]
for leaf in expected_leaves:
self.assertIn(leaf, tree_leaves)
# make sure every node has at most one parent
multi_parents = [n for n in tree if tree.in_degree(n) > 1]
self.assertEqual(len(multi_parents), 0)
# make sure the resulting tree has no unifurcations
one_child = [
n for n in self.pp_tree.nodes if len(self.pp_tree.children(n)) == 1
]
self.assertEqual(len(one_child), 0)
# expected parsimony
expected_parsimony = 6
# apply camin-sokal parsimony
self.pp_tree.reconstruct_ancestral_characters()
observed_parsimony = 0
for e in self.pp_tree.depth_first_traverse_edges():
c1, c2 = (
self.pp_tree.get_character_states(e[0]),
self.pp_tree.get_character_states(e[1]),
)
observed_parsimony += cas.solver.dissimilarity.hamming_distance(
np.array(c1), np.array(c2)
)
self.assertEqual(observed_parsimony, expected_parsimony)
# expected tree structure
expected_tree = nx.DiGraph()
expected_tree.add_nodes_from(
["a", "b", "c", "d", "e", "root", "6", "7", "8", "9"]
)
expected_tree.add_edges_from(
[
("root", "9"),
("9", "8"),
("9", "7"),
("7", "6"),
("7", "a"),
("6", "b"),
("6", "c"),
("8", "e"),
("8", "d"),
]
)
triplets = itertools.combinations(["a", "b", "c", "d", "e"], 3)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
# make sure that the tree can be converted to newick format
tree_newick = self.pp_tree.get_newick()
def test_potential_graph_inference_with_duplicates(self):
unique_character_matrix = (
self.duplicates_tree.character_matrix.drop_duplicates()
)
max_lca_height = 10
potential_graph = self.ilp_solver.infer_potential_graph(
unique_character_matrix,
0,
max_lca_height,
self.duplicates_tree.priors,
self.duplicates_tree.missing_state_indicator,
)
# expected nodes
expected_nodes = [
(1, 1, 0),
(1, 2, 0),
(1, 2, 1),
(2, 0, 0),
(2, 0, 2),
(1, 0, 0),
(1, 2, 0),
(0, 0, 0),
(2, 0, 0),
]
for node in expected_nodes:
self.assertIn(node, potential_graph.nodes())
# expected edges
expected_edges = [
((1, 0, 0), (1, 1, 0)),
((1, 0, 0), (1, 2, 0)),
((1, 0, 0), (1, 2, 1)),
((1, 2, 0), (1, 2, 1)),
((0, 0, 0), (1, 1, 0)),
((0, 0, 0), (1, 2, 0)),
((0, 0, 0), (1, 2, 1)),
((0, 0, 0), (2, 0, 0)),
((0, 0, 0), (2, 0, 2)),
((2, 0, 0), (2, 0, 2)),
((0, 0, 0), (1, 0, 0)),
]
for edge in expected_edges:
self.assertIn(edge, potential_graph.edges())
self.assertEqual(len(potential_graph.edges()), len(expected_edges))
@unittest.skipUnless(GUROBI_INSTALLED, "Gurobi installation not found.")
def test_ilp_solver_with_duplicates(self):
self.ilp_solver.solve(self.duplicates_tree, logfile=self.logfile)
tree = self.duplicates_tree.get_tree_topology()
# make sure log file is created correctly
self.assertIsFile(self.logfile)
# make sure there's one root
roots = [n for n in tree if tree.in_degree(n) == 0]
self.assertEqual(len(roots), 1)
# make sure all samples are leaves
tree_leaves = self.duplicates_tree.leaves
expected_leaves = ["a", "b", "c", "d", "e", "f"]
for leaf in expected_leaves:
self.assertIn(leaf, tree_leaves)
# make sure every node has at most one parent
multi_parents = [n for n in tree if tree.in_degree(n) > 1]
self.assertEqual(len(multi_parents), 0)
# make sure the resulting tree has no unifurcations
one_child = [
n
for n in self.duplicates_tree.nodes
if len(self.duplicates_tree.children(n)) == 1
]
self.assertEqual(len(one_child), 0)
# expected parsimony
expected_parsimony = 6
self.duplicates_tree.reconstruct_ancestral_characters()
observed_parsimony = 0
for e in self.duplicates_tree.depth_first_traverse_edges():
c1, c2 = (
self.duplicates_tree.get_character_states(e[0]),
self.duplicates_tree.get_character_states(e[1]),
)
observed_parsimony += cas.solver.dissimilarity.hamming_distance(
np.array(c1), np.array(c2)
)
self.assertEqual(observed_parsimony, expected_parsimony)
# expected tree structure
expected_tree = nx.DiGraph()
expected_tree.add_nodes_from(
["a", "b", "c", "d", "e", "f", "root", "6", "7", "8", "9"]
)
expected_tree.add_edges_from(
[
("root", "9"),
("9", "8"),
("9", "7"),
("7", "6"),
("7", "5"),
("7", "5"),
("5", "a"),
("5", "f"),
("6", "b"),
("6", "c"),
("8", "e"),
("8", "d"),
]
)
triplets = itertools.combinations(["a", "b", "c", "d", "e", "f"], 3)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
self.ilp_solver.solve(
self.duplicates_tree,
logfile=self.logfile,
collapse_mutationless_edges=True,
)
tree = self.duplicates_tree.get_tree_topology()
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
# make sure that the tree can be converted to newick format
tree_newick = self.duplicates_tree.get_newick()
@unittest.skipUnless(GUROBI_INSTALLED, "Gurobi installation not found.")
def test_ilp_solver_missing_data(self):
self.ilp_solver.solve(self.missing_tree, logfile=self.logfile)
tree = self.missing_tree.get_tree_topology()
# make sure log file is created correctly
self.assertIsFile(self.logfile)
# make sure there's one root
roots = [n for n in tree if tree.in_degree(n) == 0]
self.assertEqual(len(roots), 1)
# make sure all samples are leaves
tree_leaves = [n for n in tree if tree.out_degree(n) == 0]
expected_leaves = ["a", "b", "c", "d", "e", "f", "g", "h"]
for leaf in expected_leaves:
self.assertIn(leaf, tree_leaves)
expected_tree = nx.DiGraph()
expected_tree.add_edges_from(
[
("node0", "node1"),
("node0", "node2"),
("node1", "node3"),
("node1", "node4"),
("node3", "c"),
("node3", "node6"),
("node6", "a"),
("node6", "b"),
("node4", "d"),
("node4", "e"),
("node2", "f"),
("node2", "node5"),
("node5", "g"),
("node5", "h"),
]
)
triplets = itertools.combinations(
["a", "b", "c", "d", "e", "f", "g", "h"], 3
)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
self.ilp_solver.solve(
self.missing_tree,
logfile=self.logfile,
collapse_mutationless_edges=True,
)
tree = self.missing_tree.get_tree_topology()
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, tree)
self.assertEqual(expected_triplet, observed_triplet)
@unittest.skipUnless(GUROBI_INSTALLED, "Gurobi installation not found.")
def test_ilp_throws_error_when_potential_graph_is_not_found(self):
with self.assertRaises(ILPSolverError):
self.ilp_solver_small.solve(self.missing_tree, logfile=self.logfile)
def tearDown(self):
os.remove(self.logfile)
if __name__ == "__main__":
unittest.main()
|
import argparse
import json
import bisect
from itertools import chain
__FORMAT_TYPE__ = "bp-corpus"
__FORMAT_VERSION__ = "v8f"
IGNORE_QUAD_CLASS = True # applicable for Abstract events
class Corpus:
def __init__(self,data):
self.__corpus_id = data.get('corpus-id', '')
self.__format_type = data.get('format-type', '')
self.__format_version = data.get('format-version', '')
self.__provenance = data.get('provenance', '')
# assert(self.__format_type == __FORMAT_TYPE__)
# assert(self.__format_version == __FORMAT_VERSION__)
# should be a list of supported types
self.__segments = []
for entry_id, entry_value in data['entries'].items():
assert(entry_id == entry_value['entry-id'])
self.add_entry(entry_value)
@staticmethod
def from_file(filepath):
with open(filepath, 'r', encoding='utf8') as f:
data = json.load(f)
return Corpus(data)
def add_entry(self, entry_dict):
if entry_dict['segment-type'] == 'sentence':
segment = Segment(doc_id=entry_dict['doc-id'], entry_dict=entry_dict)
segment.add_segment_section_entry("Sentence", 0, len(segment.text) - 1)
elif entry_dict['segment-type'] in {'document','highlight'}:
segment = Segment(doc_id=entry_dict['doc-id'], entry_dict=entry_dict)
else:
raise RuntimeError(
'segment-type: {} not implemented!'.format(
entry_dict['segment-type']
)
)
self.__segments.append(segment)
@property
def format_type(self):
return self.__format_type
@property
def format_version(self):
return self.__format_version
@property
def segments(self):
return self.__segments
def clear_annotation(self):
for segment in self.segments:
segment.clear_annotation()
def save(self, output_file):
entries = {}
for segment in self.segments:
corpus_entries = segment.to_json_dict()
entry_id = corpus_entries['entry-id']
assert(entry_id not in entries)
entries[entry_id] = corpus_entries
data = {
'corpus-id': self.__corpus_id,
'entries': entries,
'format-type': self.__format_type,
'format-version': self.__format_version,
'provenance': self.__provenance
}
with open(output_file, 'w', encoding='utf-8') as output:
json.dump(
data, output, ensure_ascii=False, indent=2, sort_keys=True)
class SegmentSection:
def __init__(self, structural_element):
self.__structural_element = structural_element
self.__entries = []
def add_entry(self, start, end):
bisect.insort(self.__entries, (start, end))
@property
def entries(self):
return self.__entries
class Segment:
def __init__(self, *, doc_id, entry_dict):
self.__abstract_events = dict()
self.__basic_events = dict()
self.__granular_templates = dict()
self.__span_sets = dict()
self.__relations = dict()
self.__coref_events = []
self.__doc_id = doc_id
self.__text = entry_dict['segment-text']
self.__entry_id = entry_dict.get('entry-id', "")
self.__sent_id = entry_dict.get('sent-id', "")
self.__segment_sections = dict()
if "segment-sections" in entry_dict:
segment_sections = entry_dict.get("segment-sections", [])
for segment_section_entry in segment_sections:
structural_element = segment_section_entry["structural-element"]
segment_section_obj = self.__segment_sections.get(
structural_element,
SegmentSection(structural_element)
)
segment_section_obj.add_entry(
segment_section_entry['start'],
segment_section_entry['end']
)
self.__segment_sections[structural_element] = segment_section_obj
# read abstract-events
if "abstract-events" in entry_dict.get('annotation-sets', {}):
events = entry_dict.get('annotation-sets', {}).get('abstract-events', {})
events_data = events.get('events', {})
spans_sets_data = events.get('span-sets', {})
for span_set_name, span_set_value in spans_sets_data.items():
spans = []
for span_data in span_set_value.get('spans', []):
# Data is often missing from span, use defaults when missing
hstring = None
string = None
start = -1
end = -1
hinferred = None
hstart = -1
hend = -1
synclass = None
if 'hstring' in span_data:
hstring = span_data['hstring']
if 'string' in span_data:
string = span_data['string']
if 'start' in span_data:
start = span_data['start']
if 'end' in span_data:
end = span_data['end']
if 'hinferred' in span_data:
hinferred = span_data['hinferred']
if 'hstart' in span_data:
hstart = span_data['hstart']
if 'hend' in span_data:
hend = span_data['hend']
if 'synclass' in span_data:
synclass = span_data['synclass']
spans.append(Span(hstring=hstring, string=string,
start=start, end=end,
hinferred=hinferred, hstart=hstart,
hend=hend, synclass=synclass))
self.__span_sets[span_set_name] = SpanSet(
span_set_name=span_set_name,
spans=spans
)
for event_name, event_dict in events_data.items():
agents = []
patients = []
for agent_span_set_id in event_dict['agents']:
agents.append(self.span_sets[agent_span_set_id])
for patient_span_set_id in event_dict['patients']:
patients.append(self.span_sets[patient_span_set_id])
if IGNORE_QUAD_CLASS is True:
helpful_harmful_value = 'neutral'
material_verbal_value = 'material'
else:
helpful_harmful_value = event_dict['helpful-harmful']
material_verbal_value = event_dict['material-verbal']
abstract_event = AbstractEvent(
event_id=event_dict['eventid'],
helpful_harmful=helpful_harmful_value,
material_verbal=material_verbal_value,
anchor_span_set=self.span_sets[event_dict['anchors']],
agent_span_sets=agents,
patient_span_sets=patients,
agent_offsets=event_dict.get('agents_offsets', {}),
patient_offsets=event_dict.get('patients_offsets', {}),
anchor_offsets=event_dict.get('anchor_offsets', {}),
ref_events=event_dict.get('ref-events', [])
)
self.add_abstract_event(abstract_event)
# read basic-events
if "basic-events" in entry_dict.get('annotation-sets', {}):
events = entry_dict.get('annotation-sets', {}).get('basic-events', {})
events_data = events.get('events', {})
spans_sets_data = events.get('span-sets', {})
relations_data = events.get('includes-relations', {})
coref_events_data = events.get('template-filler-coref-events', []) # list of (list of event-ids)
for span_set_name, span_set_value in spans_sets_data.items():
spans = []
for span_data in span_set_value.get('spans', []):
if 'hstring' in span_data:
spans.append(
Span(
hstring=span_data['hstring'],
hinferred=span_data.get('hinferred', None),
string=span_data['string'],
start=span_data['start'],
end=span_data['end'],
hstart=span_data['hstart'],
hend=span_data['hend'],
synclass=span_data.get('synclass', None)
)
)
else:
spans.append(
Span(
hstring=None,
string=span_data['string'],
start=span_data['start'],
end=span_data['end'],
synclass=span_data.get('synclass', None)
)
)
self.__span_sets[span_set_name] = SpanSet(
span_set_name=span_set_name,
spans=spans
)
# cache span set IDs for events
event_to_span_set = dict()
for event_name, event_dict in events_data.items():
event_id = event_dict['eventid']
anchor_span_set = self.span_sets[event_dict['anchors']]
event_to_span_set[event_id] = anchor_span_set
for event_name, event_dict in events_data.items():
agents = []
patients = []
money = []
arg = []
for agent_span_set_id in event_dict['agents']:
agents.append(self.span_sets[agent_span_set_id])
for patient_span_set_id in event_dict['patients']:
patients.append(self.span_sets[patient_span_set_id])
for ref_event_id in event_dict['ref-events']:
arg.append(event_to_span_set[ref_event_id])
soa = False
if 'state-of-affairs' in event_dict:
soa = event_dict['state-of-affairs']
if 'money' in event_dict:
for money_span_set_id in event_dict['money']:
money.append(self.span_sets[money_span_set_id])
basic_event = BasicEvent(
event_id=event_dict['eventid'],
event_type=event_dict['event-type'],
anchor_span_set=self.span_sets[event_dict['anchors']],
agent_span_sets=agents,
patient_span_sets=patients,
agent_offsets=event_dict.get('agents_offsets', {}),
patient_offsets=event_dict.get('patients_offsets', {}),
anchor_offsets=event_dict.get('anchor_offsets', {}),
ref_events=event_dict.get('ref-events', []),
ref_event_span_sets=arg,
state_of_affairs=soa,
money_span_sets=money,
money_offsets=event_dict.get('money_offsets', {}),
)
self.add_basic_event(basic_event)
for ssid, ssids_related in relations_data.items():
self.__relations[ssid] = []
for entry in ssids_related:
self.__relations[ssid].append(
self.__span_sets[entry]
)
for coref_events in coref_events_data:
# coref_anchor_span_sets = []
# for event_id in coref_events:
# assert event_id in event_to_span_set
# # event_to_span_set[event_id] gets me the anchor span_set for the event_id
# coref_anchor_span_sets.append(event_to_span_set[event_id])
# self.__coref_events.append(coref_anchor_span_sets)
self.__coref_events.append(coref_events)
# read granular-templates
if "granular-templates" in events:
templates_data = events.get('granular-templates', {})
for template_name, template_dict in templates_data.items():
template_anchor_span_set = None
template_id = None
template_type = None
type_ = None
completion = None
over_time = None
coordinated = None
project_type = None
role_to_args = dict()
for arg_role, arg_dict_set in template_dict.items():
if arg_role == "template-anchor":
template_anchor_span_set = self.span_sets[arg_dict_set]
elif arg_role == "template-id":
template_id = arg_dict_set
elif arg_role == "template-type":
template_type = arg_dict_set
elif arg_role == "type":
type_ = arg_dict_set
elif arg_role == "completion":
completion = arg_dict_set
elif arg_role == "over-time":
over_time = arg_dict_set
elif arg_role == "coordinated":
coordinated = arg_dict_set
elif arg_role == "project-type":
project_type = arg_dict_set
else:
# if arg_role not in role_to_args:
# role_to_args[arg_role] = []
args = []
#print(arg_role)
#
for arg_dict in arg_dict_set:
arg = []
# if it's event ID
if "event-id" in arg_dict:
ref_event_id = arg_dict["event-id"]
arg = {
'span_set': event_to_span_set[ref_event_id],
'event': self.basic_events[ref_event_id]
}
for key, value in arg_dict.items():
if key != "event-id":
arg[key] = value
# if it's span set ID
elif "ssid" in arg_dict:
arg = {
'span_set': self.span_sets[arg_dict["ssid"]]
}
for key, value in arg_dict.items():
if key != "ssid":
arg[key] = value
args.append(arg)
role_to_args[arg_role] = args
granular_template = GranularTemplate(
event_id=template_id,
event_type=template_type,
type_=type_,
completion=completion,
over_time=over_time,
coordinated=coordinated,
project_type=project_type,
anchor_span_set=template_anchor_span_set,
role_to_args=role_to_args
)
self.add_granular_template(granular_template)
def add_segment_section_entry(self, structural_element, start, end):
segment_section_obj = self.__segment_sections.get(
structural_element,
SegmentSection(structural_element)
)
segment_section_obj.add_entry(
start,
end
)
self.__segment_sections[structural_element] = segment_section_obj
@property
def abstract_events(self):
return self.__abstract_events
@property
def basic_events(self):
return self.__basic_events
@property
def granular_templates(self):
return self.__granular_templates
@property
def segment_sections(self):
return self.__segment_sections
@property
def span_sets(self):
return self.__span_sets
@property
def text(self):
return self.__text
@property
def entry_id(self):
return self.__entry_id
@property
def sent_id(self):
return self.__sent_id
@property
def doc_id(self):
return self.__doc_id
@property
def relations(self):
return self.__relations
@property
def coref_events(self):
return self.__coref_events
# Creates a span set and returns the span set id. If an identical span set
# already existed, that span set id is returned instead of creating a new
# one.
def add_span_set(self, *, span_strings):
spans = []
for span_string in span_strings:
assert(span_string in self.text)
spans.append(Span(hstring=None, string=span_string))
for ss_id, span_set in self.span_sets.items():
if spans == span_set.spans:
return ss_id
new_ss_id = 'ss-' + str(len(self.span_sets) + 1)
self.span_sets[new_ss_id] = SpanSet(span_set_name=new_ss_id,
spans=spans)
return new_ss_id
# Add a new abstract event that references span sets that already exist on
# this object
def add_abstract_event(self, abstract_event):
# We have to cast to string because MITRE was mixing strings and ints
key = str(abstract_event.event_id)
assert(key not in self.abstract_events)
self.__abstract_events[key] = abstract_event
def add_basic_event(self, basic_event):
# We have to cast to string because MITRE was mixing strings and ints
key = str(basic_event.event_id)
assert(key not in self.basic_events)
self.__basic_events[key] = basic_event
def add_granular_template(self, granular_template):
# We have to cast to string because MITRE was mixing strings and ints
key = str(granular_template.event_id)
assert(key not in self.granular_templates)
self.__granular_templates[key] = granular_template
def clear_annotation(self):
self.abstract_events.clear()
self.span_sets.clear()
def to_json_dict(self):
events = {}
span_sets = {}
for event_id, event in self.abstract_events.items():
events[event_id] = event.to_json_dict()
for ss_id, span_set in self.span_sets.items():
span_sets[ss_id] = span_set.to_json_dict()
abstract_events = {
'events': events,
'span-sets': span_sets
}
annotation_sets = {
'abstract-events': abstract_events
}
data = {
'annotation-sets': annotation_sets,
'doc-id': self.__doc_id,
'entry-id': self.entry_id,
'segment-text': self.text,
'segment-type': 'sentence',
'sent-id': str(self.sent_id)
}
return data
class Event:
def __init__(self, *, event_id, anchor_span_set, anchor_offsets=None):
self.__event_id = event_id
self.__anchors = anchor_span_set
self.__anchor_offsets = anchor_offsets
@property
def anchors(self):
return self.__anchors
@property
def anchor_offsets(self):
return self.__anchor_offsets
@property
def event_id(self):
return self.__event_id
class AbstractEvent(Event):
# Removed SPECIFIED and NOT as they no longer show up as of 8d
HELPFUL_HARMFUL_TYPES = {'helpful', 'harmful', 'neutral', 'unk'}
MATERIAL_VERBAL_TYPES = {'material', 'verbal', 'both', 'unk'}
def __init__(self, *, event_id, helpful_harmful, material_verbal,
anchor_span_set, agent_span_sets, patient_span_sets,
anchor_offsets=None, agent_offsets=None, patient_offsets=None, ref_events=()):
super().__init__(event_id=event_id, anchor_span_set=anchor_span_set,
anchor_offsets=anchor_offsets)
if helpful_harmful not in self.HELPFUL_HARMFUL_TYPES:
raise RuntimeError(
'Unexpected helpful-harmful value: ' + helpful_harmful)
if material_verbal not in self.MATERIAL_VERBAL_TYPES:
raise RuntimeError(
'Unexpected material-verbal value: ' + material_verbal)
self.__helpful_harmful = helpful_harmful
self.__material_verbal = material_verbal
self.__agents = agent_span_sets
self.__patients = patient_span_sets
self.__ref_events = ref_events
self.__agent_offsets = agent_offsets
self.__patient_offsets = patient_offsets
@property
def agents(self):
return self.__agents
@property
def agent_offsets(self):
return self.__agent_offsets
@property
def patients(self):
return self.__patients
@property
def patient_offsets(self):
return self.__patient_offsets
@property
def ref_events(self):
return self.__ref_events
def add_ref_event_id(self, event_id):
self.__ref_events.append(event_id)
@property
def helpful_harmful(self):
return self.__helpful_harmful
@property
def material_verbal(self):
return self.__material_verbal
def to_json_dict(self):
data = {
'agents': sorted([x.ss_id for x in self.agents]),
'anchors': self.anchors.ss_id,
'eventid': self.event_id,
'helpful-harmful': self.helpful_harmful,
'material-verbal': self.material_verbal,
'patients': sorted([x.ss_id for x in self.patients]),
'ref-events': sorted(self.ref_events),
'anchor_offsets': self.anchor_offsets,
'agent_offsets': self.__agent_offsets,
'patient_offsets': self.__patient_offsets
}
return data
class BasicEvent(Event):
def __init__(self, *, event_id, event_type,
anchor_span_set, agent_span_sets, patient_span_sets,
anchor_offsets=None, agent_offsets=None, patient_offsets=None,
ref_events=None,
ref_event_span_sets=None,
state_of_affairs=None,
money_span_sets=None, money_offsets=None,):
super().__init__(event_id=event_id, anchor_span_set=anchor_span_set, anchor_offsets=anchor_offsets)
self.__event_type = event_type
self.__agent_span_sets = agent_span_sets
self.__patient_span_sets = patient_span_sets
self.__agent_offsets = agent_offsets
self.__patient_offsets = patient_offsets
self.__ref_events = ref_events
if self.__ref_events is None:
self.__ref_events = []
self.__ref_event_span_sets = ref_event_span_sets
self.__state_of_affairs = state_of_affairs
self.__money_span_sets = money_span_sets
self.__money_offsets = money_offsets
@property
def agent_span_sets(self):
return self.__agent_span_sets
@property
def patient_span_sets(self):
return self.__patient_span_sets
@property
def agent_offsets(self):
return self.__agent_offsets
@property
def patient_offsets(self):
return self.__patient_offsets
@property
def event_type(self):
return self.__event_type
@property
def ref_event_span_sets(self):
return self.__ref_event_span_sets
@property
def ref_events(self):
return self.__ref_events
@property
def state_of_affairs(self):
return self.__state_of_affairs
@property
def money_span_sets(self):
return self.__money_span_sets
@property
def money_offsets(self):
return self.__money_offsets
class GranularTemplate(Event):
def __init__(self, *, event_id, event_type,
type_, completion, over_time, coordinated, project_type,
anchor_span_set, anchor_offsets=None,
role_to_args=None
):
super().__init__(event_id=event_id, anchor_span_set=anchor_span_set, anchor_offsets=anchor_offsets)
self.__event_type = event_type
self.__type = type_
self.__completion = completion
self.__over_time = over_time
self.__coordinated = coordinated
self.__project_type = project_type
self.__anchor_span_set = anchor_span_set
self.__role_to_args = role_to_args
@property
def event_type(self):
return self.__event_type
@property
def type_(self):
return self.__type
@property
def completion(self):
return self.__completion
@property
def over_time(self):
return self.__over_time
@property
def coordinated(self):
return self.__coordinated
@property
def project_type(self):
return self.__project_type
@property
def anchor_span_set(self):
return self.__anchor_span_set
@property
def role_to_args(self):
return self.__role_to_args
class SpanSet:
def __init__(self, *, span_set_name, spans):
self.__spans = spans
self.__ss_id = span_set_name
@property
def spans(self):
return self.__spans
@property
def ss_id(self):
return self.__ss_id
def to_json_dict(self):
spans = []
for span in self.spans:
if span.hstring is None:
spans.append({'string': span.string})
else:
spans.append({
'hstring': span.hstring,
'string': span.string
})
data = {
'spans': spans,
'ssid': self.ss_id
}
return data
def __repr__(self):
return json.dumps(self.to_json_dict())
class Span:
def __init__(self, *, hstring, string, start=-1, end=-1, hinferred=None, hstart=-1, hend=-1, synclass=None):
# hstring is the head of the span, but it is not always provided. If it
# was not provided, it will be None
self.__hstring = hstring
self.__hinferred = hinferred
self.__string = string
self.__start = start
self.__end = end
self.__hstart = hstart
self.__hend = hend
self.__synclass = synclass
def __eq__(self, other):
if isinstance(other, Span):
return (self.__hstring == other.__hstring
and self.__string == other.__string)
return NotImplemented
def __hash__(self):
return hash(
(self.__hstring, self.__hinferred,
self.__string, self.__start,
self.__end, self.__hstart,
self.__hend, self.__synclass,))
@property
def hstring(self):
return self.__hstring
@property
def string(self):
return self.__string
@property
def start(self):
return self.__start
@property
def end(self):
return self.__end
@property
def hinferred(self):
return self.__hinferred
@property
def hstart(self):
return self.__hstart
@property
def hend(self):
return self.__hend
@property
def synclass(self):
return self.__synclass
def _main(args):
corpus = Corpus.from_file(args.input_file)
print('Read {} segments'.format(len(corpus.segments)))
corpus.save(args.output_file)
def _parser_setup():
parser = argparse.ArgumentParser(
description="Test ingestion and serialization of MITRE's JSON format")
parser.add_argument('input_file', help='Input file')
parser.add_argument('output_file', help='Output file')
return parser
def test2():
f = "/d4m/better/data/auto_ir_dryrun_112320/app/ir-tasks.json"
with open(f) as fp:
ir_tasks = json.load(fp)
docid_to_annotation = dict()
for task in ir_tasks:
docid_to_annotation.update(task['task-docs'].items())
for request in task['requests']:
docid_to_annotation.update(request['req-docs'].items())
corpus_obj = Corpus({
"corpus-id":"From IR",
"entries":docid_to_annotation,
"format-type":__FORMAT_TYPE__,
"format-version":__FORMAT_VERSION__
})
if __name__ == '__main__':
_main(_parser_setup().parse_args())
|
import hashlib
# obj = hashlib.md5(b"jflkasdjklfjaskljfdfjdsakljfklajslfjaskljfklasjklasj") # 加盐
# obj.update("123456".encode("utf-8")) # 把要加密的内容给md5
print(hashlib.md5(b"jflkasdjklfjaskljfdfjdsakljfklajslfjaskljfkcclasjklasj").hexdigest())
|
def no_of_pies(N,pie_weight,total_pie,rack_cap,total_rack):
count = N
if rack_cap >= pie_weight:
print N
return(N)
else:
total_pie = total_pie - max(pie_weight)
pie_weight.remove(max(pie_weight))
N = N-1
no_of_pies(N,pie_weight,total_pie,rack_cap,total_pie)
T = input()
for t in xrange(T):
N = input()
pie_weight = []
rack_cap = []
total_pie,total_rack = 0,0
for n in xrange(N):
pie = input()
pie_weight.append(pie)
total_pie = total_pie + pie
for n in xrange(N):
rack = input()
rack_cap.append(rack)
total_rack = total_rack + rack
no_of_pies(N,pie_weight,total_pie,rack_cap,total_rack)
|
from datetime import date
from typing import List
from fastapi import FastAPI, UploadFile, File
from fastapi.responses import RedirectResponse
from easydoc_api.config.config import app_config
from easydoc_api.models.api_models import BaseResponse, ResponseStatus
from easydoc_api.models.db_models import ExpenseByTime
from easydoc_api.models.models import Invoice, InvoiceInfo, InvoiceItem
from easydoc_api.products.invoice.invoice_parser import parse_invoice
from easydoc_api.services.ai_service import AIService
from easydoc_api.services.db_service import DBService
from easydoc_api.settings import app_settings
app = FastAPI()
app_settings(app)
ai_service = AIService(app_config.ai_client_id, app_config.ai_client_secret)
db_service = DBService(**app_config.db_config.dict())
@app.get('/', response_class=RedirectResponse, include_in_schema=False)
async def home():
"""
Redirect home page to API docs
"""
return '/docs'
@app.get('/health', status_code=200)
async def health():
"""
Health endpoint for liveness probe.
"""
return 'ok'
@app.post('/extract_invoice', response_model=Invoice)
async def extract_invoice(file: UploadFile = File(...)):
raw_invoice = ai_service.extract_invoice(file.file)
return parse_invoice(raw_invoice)
@app.post('/invoice', response_model=BaseResponse)
async def save_invoice(request: Invoice):
db_service.save_invoice_info(request.info)
db_service.save_invoice_items(request.items)
return BaseResponse(status=ResponseStatus.SUCCESS)
@app.post('/invoice_info', response_model=BaseResponse)
async def save_invoice_info(request: InvoiceInfo):
db_service.save_invoice_info(request)
return BaseResponse(status=ResponseStatus.SUCCESS)
@app.post('/invoice_items', response_model=BaseResponse)
async def save_invoice_items(request: List[InvoiceItem]):
db_service.save_invoice_items(request)
return BaseResponse(status=ResponseStatus.SUCCESS)
@app.get('/stats/daily_expense', response_model=List[ExpenseByTime])
async def get_daily_expense(start_time: date, end_time: date):
return db_service.get_daily_expense(start_time, end_time)
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from PIL import Image
from datetime import date
import dateutil
from django.core.files.storage import default_storage as storage
# Create your models here.
class Profile(models.Model):
# creating a one to oe rel with a user model
user = models.OneToOneField(User, on_delete=models.CASCADE)
avatar = models.ImageField(default='default.jpg', upload_to='profile_pics')
dob = models.DateField(blank=True, null=True)
bio = models.TextField(max_length=400, blank=True)
following = models.ManyToManyField('self', related_name='follows', symmetrical=False, blank=True, null=True)
#follow = models.ManyToManyField('self', related_name='followed', symmetrical=False)
joined_at = models.DateField(auto_now_add=True)
COLLEGE_CHOICES = (
('ABSU', 'Abia State University, Uturu, Abia State'),
('EBSU', 'Ebonyi State University, Abakaliki'),
('UNILAG', 'University of Lagos, Idi Araba, Lagos State'),
('OAU', 'Obafemi Awolowo University, Ile Ife, Osun State'),
('UNIPORT', 'University of Port-Harcourt, Rivers State'),
('UDUSOK', 'Usman Dan Fodiyo University, Sokoto, Sokoto State'),
('UI', 'University of Ibadan, Oyo State'),
('UNILORIN', 'University of Ilorin, Kwara State'),
('UNICAL', 'University of Calabar, Cross River State'),
('LASU', 'Lagos State University, Ikeja, Lagos State'),
('AAU', 'Ambrose Alli University, Ekpoma, Edo State'),
('UNIJOS', 'University of Jos, Plateau State'),
('BUK', 'Bayero University, Kano, Kano State'),
('OOU', ' Olabisi Onabanjo (formerly Ogun State) University, Ago Iwoye, Ogun State'),
('IMSU', 'Imo State University, Owerri, Imo State'),
('MADONNA', 'Madonna University Okija'),
('UNIBEN', 'University of Benin, Benin-City, Edo State'),
('OBA OKUNADE', 'Oba Okunade College of Health Sciences Igbinedion University Okada, Benin -City, Edo State'),
('UNN', 'University of Nigeria, Ozara-Ituku. Enugu State'),
('UNIZIK', 'Nnamdi Azikiwe University, Nnewi, Anambra State'),
('ABU', 'Ahmadu Bello University Zaria, Kaduna State'),
('UNIMAID', 'University of Maiduguri, Borno State'),
('DELSU', 'Delta State University, Abraka, Delta State'),
('ESUT', 'Enugu State University of Science and Technology, Enugu, Enugu State'),
('UNIUYO', 'University of Uyo, Uyo, Cross River State'),
('BINGHAM UNI', 'Bingham University Karu, Nasarawa.State'),
('NIGER-DELTA UNI', 'Niger Delta University, Wilberforce Island, Bayelsa State'),
('BENSU', 'Benue State University, Makurdi, Benue State'),
('BABCOCK', 'Babcock University Ilishan-Remo, Ogun State'),
('UNIABJ', 'University of Abuja'),
('AFE-BABALOLA', 'Afe Babalola University Ado-Ekiti, Ekiti State'),
)
YEAR_IN_COLLEGE_CHOICES = (
('100', '100 Level'),
('200', '200 Level'),
('300', '300 Level'),
('400', '400 Level'),
('500', '500 Level'),
('600', '600 Level'),
)
year_in_college = models.CharField(max_length=3, choices=YEAR_IN_COLLEGE_CHOICES, default='')
college = models.CharField(max_length=15, choices=COLLEGE_CHOICES, default='')
# def get_no_of_followers(self):
# all_users = User.objects.all()
# for user in all_users:
# if self in user.profile.follows.all():
# self.followers.add(user.profile)
# return self.followers.count()
def get_full_name(self):
return self.user.first_name + ' ' + self.user.last_name
def calculate_age(self):
if self.dob:
today = date.today()
return today.year - self.dob.year - ((today.month, today.day) < (self.dob.month, self.dob.day))
else:
return False
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
# img = Image.open(self.avatar.path
img = Image.open(storage.open(self.avatar.name))
output_size = (200, 200)
img.thumbnail(output_size)
# img.save(self.avatar.url)
fh = storage.open(self.avatar.name, "w")
format = 'png' # You need to set the correct image format here
img.save(fh, format)
fh.close()
def __str__(self):
return f'Profile of ' + self.user.username
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
|
#!/usr/bin/env python
from tthAnalysis.HiggsToTauTau.safe_root import ROOT
from tthAnalysis.HiggsToTauTau.common import SmartFormatter, logging
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf as backend_pdf
import os
import argparse
import collections
import re
PILEUP_HISTOGRAM_NAME = 'pileup'
DATA_PILEUP = {
2016 : os.path.join(os.environ['CMSSW_BASE'], 'src/PhysicsTools/NanoAODTools/python/postprocessing/data/pileup/PileupData_ReRecoJSON_Full2016.root'),
2017 : os.path.join(os.environ['CMSSW_BASE'], 'src/PhysicsTools/NanoAODTools/python/postprocessing/data/pileup/PileupData_ReRecoJSON_v1_Full2017.root'),
2018 : os.path.join(os.environ['CMSSW_BASE'], 'src/PhysicsTools/NanoAODTools/python/postprocessing/data/pileup/PileupData_EarlyReRecoJSON_2018ABC_PromptEraD_Full2018.root'),
}
MC_PILEUP = [
os.path.join(os.environ['CMSSW_BASE'], 'src/tthAnalysis/HiggsToTauTau/data/pileup_{era}.root'),
os.path.join(os.environ['CMSSW_BASE'], 'src/hhAnalysis/multilepton/data/pileup_hh_{era}.root'),
os.path.join(os.environ['CMSSW_BASE'], 'src/hhAnalysis/bbww/data/pileup_hh_{era}.root'),
os.path.join(os.environ['CMSSW_BASE'], 'src/hhAnalysis/bbww/data/pileup_hh_{era}_ttbar.root'),
]
def get_histogram(filename, read_mc):
if not os.path.isfile(filename):
raise RuntimeError("No such file: %s" % filename)
fptr = ROOT.TFile.Open(filename, 'read')
histogram_names = [ key.GetName() for key in fptr.GetListOfKeys() ]
histograms = collections.OrderedDict()
for histogram_name in histogram_names:
if not read_mc and histogram_name != PILEUP_HISTOGRAM_NAME:
continue
histogram = fptr.Get(histogram_name)
histogram.Scale(1. / histogram.Integral())
histograms[histogram_name] = [ histogram.GetBinContent(bin_idx) for bin_idx in range(1, histogram.GetNbinsX() + 1) ]
fptr.Close()
return histograms[PILEUP_HISTOGRAM_NAME] if not read_mc else histograms
def plot(era, pdf, threshold, samples, plot_all):
logging.info("Processing era {}".format(era))
data_filename = DATA_PILEUP[era]
data = get_histogram(data_filename, False)
data_len = len(data)
plot_x = list(range(0, data_len))
for mc_filename_idx, mc_filename_expr in enumerate(MC_PILEUP):
mc_filename = mc_filename_expr.format(era = era)
mc = get_histogram(mc_filename, True)
for sample_idx, sample_name in enumerate(mc.keys()):
logging.info('Processing {} {}/{} ({}/{})'.format(sample_name, sample_idx + 1, len(mc), mc_filename_idx + 1, len(MC_PILEUP)))
if samples and not any(re.match(sample, sample_name) for sample in samples):
continue
mc_histogram = mc[sample_name]
assert(data_len == len(mc_histogram))
ratios = [
data[bin_idx] / mc_histogram[bin_idx] if mc_histogram[bin_idx] > 0. else 1. for bin_idx in range(data_len)
]
ratios_over_threshold = [ ratio for ratio in enumerate(ratios) if ratio[1] > threshold ]
if not ratios_over_threshold and not plot_all:
continue
plt.figure(figsize = (10, 8), dpi = 150)
plt.step(plot_x, data, label = 'Data', where = 'post')
plt.step(plot_x, mc_histogram, label = 'MC', where = 'post')
plt.xlim(0, data_len)
plt.yscale('log')
plt.grid(True)
for ratio in ratios_over_threshold:
ratio_idx = ratio[0]
rec = matplotlib.patches.Rectangle((ratio_idx - 1, 0), 1, 1, color = 'yellow', alpha = 0.5)
plt.gca().add_patch(rec)
plt.title('{} ({})'.format(sample_name, era))
plt.legend(loc = 'lower left')
plt.xlabel('# PU interactions')
plt.ylabel('Number of events, normalized to 1')
plt.savefig(pdf, format = 'pdf', bbox_inches = 'tight')
plt.close()
parser = argparse.ArgumentParser(
formatter_class = lambda prog: SmartFormatter(prog, max_help_position = 35)
)
parser.add_argument('-e', '--era',
type = int, dest = 'era', metavar = 'year', required = False, nargs = '+', choices = list(DATA_PILEUP.keys()),
default = list(DATA_PILEUP.keys()),
help = 'R|Era',
)
parser.add_argument('-o', '--output',
type = str, dest = 'output', metavar = 'path', required = True,
help = 'R|PDF file name',
)
parser.add_argument('-t', '--threshold',
type = float, dest = 'threshold', metavar = 'number', required = False, default = 3.0,
help = 'R|PU weight threshold for highlighting',
)
parser.add_argument('-s', '--sample',
type = str, dest = 'sample', metavar = 'name', required = False, nargs = '+', default = [],
help = 'R|Plot specific samples',
)
parser.add_argument('-a', '--all-plot',
dest = 'all_plot', action = 'store_true', default = False,
help = 'R|Also include PU profiles not exceeding the threshold',
)
args = parser.parse_args()
eras = args.era
output = os.path.abspath(args.output)
threshold = args.threshold
sample = args.sample
plot_all = args.all_plot
if not output.lower().endswith('.pdf'):
raise ValueError("Output file can be in PDF format only: %s" % output)
if not os.path.isdir(os.path.dirname(output)):
raise ValueError("Cannot create file %s because its directory does not exist" % output)
with backend_pdf.PdfPages(output) as pdf:
for era in eras:
plot(era, pdf, threshold, sample, plot_all)
|
# Following map_filter_01.py
# Comprehension dictionnary
dic_number = {10112: 'jean', 15324: 'eric', 21654: 'martine'}
print(dic_number)
print('User with number 15324: {}'.format(dic_number[15324]))
print()
# Now we want to know the number for a name
# We are converting key to value and value to key
dic_name = {dic_number[key]: key for key in dic_number}
print(dic_name)
print('User "martine" have the number: {}'.format(dic_name['martine']))
|
"""
Climate support for Toon thermostat.
Only for the rooted version.
configuration.yaml
climate:
- platform: toon_climate
name: Toon Thermostat
host: <IP_ADDRESS>
port: 80
scan_interval: 10
min_temp: 6.0
max_temp: 30.0
logger:
default: info
logs:
custom_components.toon_climate: debug
More details:
- https://developers.home-assistant.io/docs/core/entity/climate/
- https://github.com/cyberjunky/home-assistant-toon_climate
"""
import asyncio
import logging
from typing import Any, Dict, List, Optional
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_COMFORT,
PRESET_ECO,
PRESET_HOME,
PRESET_SLEEP,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_HOST,
CONF_NAME,
CONF_PORT,
TEMP_CELSIUS,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
try:
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
except ImportError:
from homeassistant.components.climate import (
PLATFORM_SCHEMA,
ClimateDevice as ClimateEntity,
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
"""
Supported preset modes:
PRESET_AWAY: The device is in away mode
PRESET_HOME: The device is in home mode
PRESET_COMFORT: The device is in comfort mode
PRESET_SLEEP: The device is in Sleep mode
PRESET_ECO: The device runs in a continuous energy savings mode. If
configured as one of the supported presets this mode can
be used to activate the vacation mode
"""
SUPPORT_PRESETS = [PRESET_AWAY, PRESET_HOME, PRESET_COMFORT, PRESET_SLEEP,
PRESET_ECO]
"""
Supported hvac modes:
- HVAC_MODE_HEAT: Heat to a target temperature (schedule off)
- HVAC_MODE_AUTO: Follow the configured schedule
- HVAC_MODE_OFF: The device runs in a continuous energy savings mode. If
configured as one of the supported hvac modes this mode
can be used to activate the vacation mode
"""
SUPPORT_MODES = [HVAC_MODE_HEAT, HVAC_MODE_AUTO]
DEFAULT_NAME = "Toon Thermostat"
BASE_URL = "http://{0}:{1}{2}"
"""
The Toon device can be set to a minumum of 6 degrees celsius and a maximum
of 30 degrees celsius. The below min and max values should not be changed.
"""
DEFAULT_MIN_TEMP = 6.0
DEFAULT_MAX_TEMP = 30.0
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=80): cv.positive_int,
vol.Optional(CONF_MIN_TEMP, default=DEFAULT_MIN_TEMP):
vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP, default=DEFAULT_MAX_TEMP):
vol.Coerce(float),
}
)
async def async_setup_platform(hass, config, add_devices, discovery_info=None):
"""
Setup the Toon thermostat
"""
session = async_get_clientsession(hass)
add_devices([ThermostatDevice(session, config)], True)
class ThermostatDevice(ClimateEntity):
"""
Representation of a Toon climate device
"""
def __init__(self, session, config) -> None:
"""
Initialize the Toon climate device
"""
self._session = session
self._name = config.get(CONF_NAME)
self._host = config.get(CONF_HOST)
self._port = config.get(CONF_PORT)
self._min_temp = (config.get(CONF_MIN_TEMP)
if config.get(CONF_MIN_TEMP) >= DEFAULT_MIN_TEMP
else DEFAULT_MIN_TEMP)
self._max_temp = (config.get(CONF_MAX_TEMP)
if config.get(CONF_MAX_TEMP) <= DEFAULT_MAX_TEMP
else DEFAULT_MAX_TEMP)
self._attr_unique_id = f"climate_{self._name}_{self._host}"
"""
Standard thermostat data for the first and second edition of Toon
"""
self._data = None
self._active_state = None
self._burner_info = None
self._modulation_level = None
self._current_setpoint = None
self._current_temperature = None
self._ot_comm_error = None
self._program_state = None
self._hvac_mode = None
"""
Dynamically construct valid preset list
0: PRESET_COMFORT
1: PRESET_HOME
2: PRESET_SLEEP
3: PRESET_AWAY
4: PRESET_ECO
"""
self._valid_presets = {}
if PRESET_COMFORT in SUPPORT_PRESETS:
self._valid_presets[0] = PRESET_COMFORT
if PRESET_HOME in SUPPORT_PRESETS:
self._valid_presets[1] = PRESET_HOME
if PRESET_SLEEP in SUPPORT_PRESETS:
self._valid_presets[2] = PRESET_SLEEP
if PRESET_AWAY in SUPPORT_PRESETS:
self._valid_presets[3] = PRESET_AWAY
if PRESET_ECO in SUPPORT_PRESETS:
self._valid_presets[4] = PRESET_ECO
_LOGGER.info("%s: Supported hvac modes %s. "
"Supported preset modes %s. "
"Temperature can be set between %s°C and %s°C",
self._name, SUPPORT_MODES,
SUPPORT_PRESETS,
self._min_temp, self._max_temp)
@staticmethod
async def do_api_request(name, session, url):
"""
Do an API request
"""
try:
async with async_timeout.timeout(5):
response = await session.get(
url, headers={"Accept-Encoding": "identity"}
)
except aiohttp.ClientError:
_LOGGER.error("Cannot poll %s using url: %s",
name, url)
return None
except asyncio.TimeoutError:
_LOGGER.error(
"Timeout error occurred while polling %s using url: %s",
name, url
)
return None
try:
response = await response.json(content_type="text/javascript")
_LOGGER.debug("Data received from %s: %s",
name, response)
except (TypeError, KeyError) as err:
_LOGGER.error("Cannot parse data received from %s: %s",
name, err)
return None
return response
@property
def should_poll(self):
"""
Polling needed for thermostat
"""
return True
async def async_update(self) -> None:
"""
Update local data with thermostat data (Toon 1 and Toon 2)
"""
_LOGGER.debug(
"%s: request 'getThermostatInfo'", self._name,
)
self._data = await self.do_api_request(
self._name, self._session,
BASE_URL.format(
self._host, self._port,
"/happ_thermstat?action=getThermostatInfo"
),
)
if self._data:
self._active_state = int(self._data["activeState"])
self._burner_info = int(self._data["burnerInfo"])
self._modulation_level = int(self._data["currentModulationLevel"])
self._current_setpoint = int(self._data["currentSetpoint"]) / 100
self._current_temperature = int(self._data["currentTemp"]) / 100
self._ot_comm_error = int(self._data["otCommError"])
self._program_state = int(self._data["programState"])
if self._active_state == 4:
self._hvac_mode = HVAC_MODE_OFF
elif self._program_state == 0:
self._hvac_mode = HVAC_MODE_HEAT
elif self._program_state == 1 or self._program_state == 2:
self._hvac_mode = HVAC_MODE_AUTO
else:
self._hvac_mode = None
@property
def supported_features(self) -> int:
"""
Return the list of supported features
"""
return SUPPORT_FLAGS
@property
def name(self) -> str:
"""
Return the name of the thermostat
"""
return self._name
@property
def temperature_unit(self) -> str:
"""
Return the unit of measurement (Celcius by default)
"""
return TEMP_CELSIUS
@property
def min_temp(self) -> float:
"""
Return the minimum temperature
"""
return self._min_temp
@property
def max_temp(self) -> float:
"""
Return the maximum temperature
"""
return self._max_temp
@property
def current_temperature(self) -> Optional[float]:
"""
Return the current temperature
"""
return self._current_temperature
@property
def target_temperature(self) -> Optional[float]:
"""
Return current target temperature (temp we try to reach)
"""
return self._current_setpoint
async def async_set_temperature(self, **kwargs) -> None:
"""
Set target temperature
"""
target_temperature = kwargs.get(ATTR_TEMPERATURE)
if target_temperature is None:
return
value = int(target_temperature * 100)
if (target_temperature < self._min_temp or
target_temperature > self._max_temp):
_LOGGER.warning(
"%s: set target temperature to %s°C is not supported. "
"The temperature can be set between %s°C and %s°C",
self._name, str(target_temperature),
self._min_temp, self._max_temp)
return
_LOGGER.info(
"%s: set target temperature to %s°C",
self._name, str(target_temperature),
)
_LOGGER.debug(
"%s: request 'setSetpoint' with 'Setpoint' value %s",
self._name, str(value),
)
self._data = await self.do_api_request(
self._name, self._session,
BASE_URL.format(
self._host, self._port,
"/happ_thermstat?action=setSetpoint"
"&Setpoint=" + str(value),
),
)
self._current_setpoint = target_temperature
@property
def hvac_action(self) -> Optional[str]:
"""
Return the current running hvac operation
Toon burnerInfo values
- 0: Burner is off
- 1: Burner is on (heating for current setpoint)
- 2: Burner is on (heating for generating warm water)
- 3: Burner is on (preheating for next setpoint)
"""
if (self._burner_info == 1) or (self._burner_info == 3):
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
@property
def preset_modes(self) -> List[str]:
"""
Return the list of available preset modes
"""
return SUPPORT_PRESETS
@property
def preset_mode(self) -> Optional[str]:
"""
Return the current preset mode
Toon activeState values
- 0: Comfort
- 1: Home
- 2: Sleep
- 3: Away
- 4: Vacation (not a default home assistant climate state) instead we
use the PRESET_ECO if configured as one of the supported presets
"""
try:
return self._valid_presets[self._active_state]
except KeyError:
return None
async def async_set_preset_mode(self, preset_mode) -> None:
"""
Set new preset mode (comfort, home, sleep, away, eco)
Toon programState values
- 0: Programm mode is off (manually changing presets)
- 1: Programm mode is on (automatically changing presets)
- 2: Programm mode is on but setpoint/preset is only changed until
the next scheduled preset is automatically activated
- 8: No official programm mode as according to the Toon API doc
this would be state 4. Testing reveiled it only works when we
use an 8. This results in the programm state to return back to
it's original state when another preset is selected.
Toon activeState values
- 0: Comfort
- 1: Home
- 2: Sleep
- 3: Away
- 4: Vacation (eco)
The requested preset will only be set if it is part of the
defined SUPPORT_PRESETS list
"""
if not preset_mode.lower() in SUPPORT_PRESETS:
_LOGGER.warning(
"%s: set preset mode to '%s' is not supported. "
"Supported preset modes are %s",
self._name, str(preset_mode.lower()), SUPPORT_PRESETS)
return None
if preset_mode.lower() == PRESET_COMFORT:
scheme_temp = 0
scheme_state = 2
elif preset_mode.lower() == PRESET_HOME:
scheme_temp = 1
scheme_state = 2
elif preset_mode.lower() == PRESET_SLEEP:
scheme_temp = 2
scheme_state = 2
elif preset_mode.lower() == PRESET_AWAY:
scheme_temp = 3
scheme_state = 2
elif preset_mode.lower() == PRESET_ECO:
scheme_temp = 4
scheme_state = 8
else:
scheme_temp = -1
scheme_state = 2
_LOGGER.info(
"%s: set preset mode to '%s'",
self._name, str(preset_mode.lower()),
)
_LOGGER.debug(
"%s: request 'changeSchemeState' with 'state' value %s "
"and 'temperatureState' value %s",
self._name, str(scheme_state),
str(scheme_temp),
)
self._data = await self.do_api_request(
self._name, self._session,
BASE_URL.format(
self._host, self._port,
"/happ_thermstat?action=changeSchemeState"
"&state=" + str(scheme_state) +
"&temperatureState=" + str(scheme_temp),
),
)
@property
def hvac_modes(self) -> List[str]:
"""
Return the list of available hvac operation modes
"""
return SUPPORT_MODES
@property
def hvac_mode(self) -> Optional[str]:
"""
Return the current operation mode
"""
return self._hvac_mode
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""
Set new target hvac mode
Support modes:
- HVAC_MODE_HEAT: Heat to a target temperature (manual mode)
- HVAC_MODE_AUTO: Follow the configured schedule (schedule mode)
- HVAC_MODE_OFF: Vacation mode (heat to a target architecture)
The requested hvac mode will only be set if it is part of the
defined SUPPORT_MODES list
"""
if hvac_mode not in SUPPORT_MODES:
_LOGGER.warning(
"%s: set hvac mode to '%s' is not supported. "
"Supported hvac modes are %s",
self._name, str(hvac_mode), SUPPORT_MODES)
return None
_LOGGER.info("%s: set hvac mode to '%s'", self._name, str(hvac_mode))
if (hvac_mode == HVAC_MODE_HEAT) and (self._active_state == 4):
_LOGGER.debug(
"%s: request 'changeSchemeState' with 'state' value %s "
"and 'temperatureState' value %s",
self._name, str(0), str(1),
)
self._data = await self.do_api_request(
self._name, self._session,
BASE_URL.format(
self._host, self._port,
"/happ_thermstat?action=changeSchemeState"
"&state=0"
"&temperatureState=1",
),
)
elif hvac_mode == HVAC_MODE_HEAT:
_LOGGER.debug(
"%s: request 'changeSchemeState' with 'state' value %s ",
self._name, str(0)
)
self._data = await self.do_api_request(
self._name, self._session,
BASE_URL.format(
self._host, self._port,
"/happ_thermstat?action=changeSchemeState"
"&state=0",
),
)
elif hvac_mode == HVAC_MODE_AUTO:
_LOGGER.debug(
"%s: request 'changeSchemeState' with 'state' value %s ",
self._name, str(1)
)
self._data = await self.do_api_request(
self._name, self._session,
BASE_URL.format(
self._host, self._port,
"/happ_thermstat?action=changeSchemeState"
"&state=1",
),
)
elif hvac_mode == HVAC_MODE_OFF:
_LOGGER.debug(
"%s: request 'changeSchemeState' with 'state' value %s "
"and 'temperatureState' value %s",
self._name, str(8), str(4),
)
self._data = await self.do_api_request(
self._name, self._session,
BASE_URL.format(
self._host, self._port,
"/happ_thermstat?action=changeSchemeState"
"&state=8"
"&temperatureState=4",
),
)
@property
def extra_state_attributes(self) -> Dict[str, Any]:
"""
Return additional Toon Thermostat status details
The information will be available in Home Assistant for reporting
or automations based on teh provided information
"""
return {
"burner_info": self._burner_info,
"modulation_level": self._modulation_level,
}
|
# Generated by Django 3.0.5 on 2020-05-04 02:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0006_auto_20200426_2254'),
]
operations = [
migrations.AddField(
model_name='chapter',
name='game_icon',
field=models.ImageField(blank=True, null=True, upload_to='game_icons/'),
),
]
|
num_customers = [1,2,3,4,5,6,7,8,9,10]
# TODO: Fill in values for the variables below
print 'average of first 7: '+str(sum(num_customers[:7])/7)
print 'average of last 7: '+str(sum(num_customers[-7:])/7)
print 'max no: '+str(max(num_customers))
print 'min no: '+str(min(num_customers))
|
#!/usr/bin/env python
from bluepy import btle
import binascii
def dump_services(dev):
services = sorted(dev.services, key=lambda s: s.hndStart)
for s in services:
if s.hndStart == s.hndEnd:
continue
chars = s.getCharacteristics()
for i, c in enumerate(chars):
props = c.propertiesToString()
if 'READ' in props:
val = c.read()
if c.uuid == btle.AssignedNumbers.device_name:
string = '\'' + \
val.decode('utf-8') + '\''
elif c.uuid == btle.AssignedNumbers.device_information:
string = repr(val)
else:
string = '<s' + binascii.b2a_hex(val).decode('utf-8') + '>'
else:
string = ''
if 'Anova' in string:
print ("%s, MAC: %s" % (string, dev.addr))
class ScanAnova(btle.DefaultDelegate):
def __init__(self):
btle.DefaultDelegate.__init__(self)
def handleDiscovery(self, dev, isNewDev, isNewData):
if dev.rssi < -128:
return
def main():
btle.Debugging = False
scanner = btle.Scanner(0).withDelegate(ScanAnova())
devices = scanner.scan(4)
for d in devices:
if not d.connectable or d.rssi < -128:
continue
try:
dev = btle.Peripheral(d)
dump_services(dev)
dev.disconnect()
except btle.BTLEException:
continue
if __name__ == "__main__":
main()
|
import pytest
from otx.mpa import Stage
from otx.mpa.cls.stage import ClsStage
from tests.test_suite.e2e_test_system import e2e_pytest_unit
from tests.unit.algorithms.classification.test_helper import setup_mpa_task_parameters
class TestOTXClsStage:
@pytest.fixture(autouse=True)
def setup(self) -> None:
self.model_cfg, self.data_cfg, recipie_cfg = setup_mpa_task_parameters(
task_type="incremental", create_test=True, create_val=True
)
self.stage = ClsStage(name="", mode="train", config=recipie_cfg, common_cfg=None, index=0)
@e2e_pytest_unit
def test_configure(self, mocker):
mock_cfg_model = mocker.patch.object(ClsStage, "configure_model")
mock_cfg_ckpt = mocker.patch.object(ClsStage, "configure_ckpt")
mock_cfg_data = mocker.patch.object(ClsStage, "configure_data")
mock_cfg_task = mocker.patch.object(ClsStage, "configure_task")
fake_arg = {"pretrained": True, "foo": "bar"}
returned_value = self.stage.configure(self.model_cfg, "", self.data_cfg, True, **fake_arg)
mock_cfg_model.assert_called_once_with(self.stage.cfg, self.model_cfg, True, **fake_arg)
mock_cfg_ckpt.assert_called_once_with(self.stage.cfg, "", fake_arg.get("pretrained", None))
mock_cfg_data.assert_called_once_with(self.stage.cfg, self.data_cfg, True, **fake_arg)
mock_cfg_task.assert_called_once_with(self.stage.cfg, True, **fake_arg)
assert returned_value == self.stage.cfg
@e2e_pytest_unit
def test_configure_model(self):
fake_arg = {"ir_model_path": {"ir_weight_path": "", "ir_weight_init": ""}}
self.stage.configure_model(self.stage.cfg, self.model_cfg, True, **fake_arg)
assert self.stage.cfg.model_task
@e2e_pytest_unit
def test_configure_data(self, mocker):
mock_super_cfg_data = mocker.patch.object(Stage, "configure_data")
self.stage.configure_data(self.stage.cfg, self.data_cfg, True, pretrained=None)
mock_super_cfg_data.assert_called_once()
assert self.stage.cfg.data
assert self.stage.cfg.data.train
assert self.stage.cfg.data.val
@e2e_pytest_unit
def test_configure_task(self, mocker):
mock_cfg_classes = mocker.patch.object(ClsStage, "configure_classes")
self.stage.configure_task(self.stage.cfg, True)
mock_cfg_classes.assert_called_once()
@e2e_pytest_unit
def test_configure_classes(self, mocker):
mocker.patch.object(Stage, "get_model_classes", return_value=["foo", "bar"])
mocker.patch.object(Stage, "get_data_cfg", return_value=self.data_cfg)
self.stage.configure_classes(self.stage.cfg)
assert self.stage.model_classes == ["foo", "bar"]
@e2e_pytest_unit
def test_configure_configure_topk(self):
self.stage.cfg.model.head.num_classes = 2
self.stage.configure_topk(self.stage.cfg)
assert self.stage.cfg.model.head.topk == (1,)
|
from flask import Flask, render_template, request, redirect, json, url_for, request, abort
from flask_dance.contrib.google import make_google_blueprint, google
from flask_dance.contrib.github import make_github_blueprint, github
import os
import sqlite3
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
application = app= Flask(__name__)
app.secret_key = "mysecretkey"
google_blueprint = make_google_blueprint(client_id='58639801113-hpseuv9faul6e21npea4qh55eqar7jrj.apps.googleusercontent.com', client_secret='g5rknjPxezf4sR-MC5G1qo4i', scope=['profile', 'email'], redirect_url='/google')
app.register_blueprint(google_blueprint, url_prefix='/google_login')
github_blueprint = make_github_blueprint(client_id='2ff15c228d87b9cdc1e9', client_secret='383b00b2fafca9bb1bb87fea5bd247b77b7e37e2', redirect_url='/github')
app.register_blueprint(github_blueprint, url_prefix='/github_login')
@application.route('/')
def home():
return render_template('homepage.html')
@application.route('/google')
def google_login():
if not google.authorized:
return redirect(url_for('google.login'))
resp = google.get("/oauth2/v2/userinfo")
assert resp.ok, resp.text
email=resp.json()["email"]
name=resp.json()["name"]
picture=resp.json()["picture"]
return render_template('welcome.html',email=email,name=name,picture=picture)
@application.route('/github')
def github_login():
if not github.authorized:
return redirect(url_for('github.login'))
resp = github.get("/user")
assert resp.ok, resp.text
email=resp.json()["email"]
name=resp.json()["login"]
picture=resp.json()["avatar_url"]
return render_template('welcome.html',email=email,name=name,picture=picture)
@application.route('/login_action', methods=["POST","GET"])
def login_action():
user_name=request.form['username']
pass_word=request.form['password']
database='users.db'
conn = sqlite3.connect(database)
c = conn.cursor()
c.execute("Select * from user where username='%s' and password='%s'" %(user_name, pass_word))
data=c.fetchall()
conn.close()
return "<h1>Wecome {}</h1>".format(data)
@application.route('/comment', methods=["POST","GET"])
def comment():
comm=request.form['comment']
return comm
@application.route('/Welcome')
def welcome():
return render_template('welcome.html')
if __name__ == "__main__":
application.debug=True
application.run(host='0.0.0.0', port='5000')
|
import random
def name_to_number(name):
"""
Helper function to convert number to string
"""
if name == "rock":
return 0
elif name == "Spock":
return 1
elif name == "paper":
return 2
elif name == "lizard":
return 3
elif name == "scissors":
return 4
else:
print "name to number ERROR"
def number_to_name(number):
"""
Helper function to convert string to number
"""
if number == 0:
return "rock"
elif number == 1:
return "Spock"
elif number == 2:
return "paper"
elif number == 3:
return "lizard"
elif number == 4:
return "scissors"
else:
print "number to name ERROR"
def rpsls(player_choice):
"""
This is the main function of the game.
It takes the player's choice as input and randomly makes a choice for the computer.
Finally it computes and prints the winner of the game.
"""
#prints a blank line
print
#prints player's choice and assigns a number to it
print "Player chooses", player_choice
player_number = name_to_number(player_choice)
#computes a number for the computer, converts it to a choice and prints it
comp_number = random.randrange(0,5)
comp_choice = number_to_name(comp_number)
print "Computer chooses", comp_choice
#computes the difference of the choices mod five
#this choice determines the winner
difference = (comp_number - player_number) % 5
#if the difference is 1 or 2, the first item wins
#if the difference is 3 or 4, the first item wins
if (difference == 1) or (difference == 2):
print "Computer wins!"
elif (difference == 3) or (difference == 4):
print "Player wins!"
elif (difference == 0):
print "Player and computer tie!"
else:
print "rpsls ERROR"
# test calls
rpsls("rock")
rpsls("Spock")
rpsls("paper")
rpsls("lizard")
rpsls("scissors")
|
# Unit tests should be fast, isolated, and repeatable
# The response of the LUISE engine depends of a a specific context
# in the futur add a CI/CD workflows, batch testing
class test_retrieve_luis_intent(object):
'''
tests :
- mock the api call
- topIntent
- entities
- scores exceeds a threshold defined
optimise :
- use for multiple tests @pytest.mark.parametrize()
- use pytest-benchmark to test the speed
- NLU.DevOps is recommanded for testing
'''
def test_intent(self,mock_requests_get):
'''
test the top intent
'''
assert mock_requests_get.prediction.topIntent == 'ModifyOrder'
def test_entities(self,mock_requests_get):
'''
test entites found
'''
assert mock_requests_get.entities.Order.Quantity == 'two'
def test_scores(self,mock_requests_get):
'''
test if scores exceeds 0.75 threshold
'''
assert mock_requests_get.intents.prediction.ModifyOrder.score > 0.75 |
import pandas
import requests
movies = pandas.read_csv('https://raw.githubusercontent.com/davidbailey/Notes/master/Movies.csv')
for movie in movies.Title:
r = requests.get('https://www.omdbapi.com/?t=' + movie)
print(r.json()['Title'], r.json()['Year'])
|
#!/usr/bin/env python3
"""
@author lsipii
"""
import sys, getopt
from flask import Flask
from apps.DeviceApp.DeviceApp import DeviceApp
from apps.DeviceApp.http.controllers.CoffeesHasWeController import CoffeesHasWeController
# Creates the device app
app = DeviceApp()
# Creates the flask router app
routerApp = Flask(__name__)
# The app controller
controller = CoffeesHasWeController(app)
# Defines the app routes
@routerApp.route('/', methods=controller.knownHttpMethods)
@routerApp.route('/<path>', methods=controller.knownHttpMethods)
def request(path = None):
return controller.getCoffeeResponse(path)
# App runner
if __name__ == '__main__':
argv = sys.argv[1:]
# Sets the app debug mode
debugMode = True
# Help texts
def printHelp():
print(app.getFullAppName())
print("Mission statement: "+ app.getAppMissionStatement(), "\n")
print("Usage: zoinks.py --help|--version|--production")
exit()
try:
opts, args = getopt.getopt(argv, "hvp", ["help", "version", "production"])
except getopt.GetoptError:
printHelp()
for opt, arg in opts:
if opt in ("-h", "--help"):
printHelp()
if opt in ("-v", "--version"):
print(app.getFullAppName())
exit()
if opt in ("-p", "--production"):
debugMode = False
# Run app
controller.setDebugMode(debugMode)
controller.validateZoinksFunctionality()
routerApp.run(debug=debugMode)
|
import pandas as pd
import psycopg2
import sqlalchemy
import matplotlib as plt
#%matplotlib inline
from sqlalchemy import create_engine
from sqlalchemy_utils import create_database, database_exists, drop_database
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import bpy
from ..rig import RigInfo
class MHC_OT_PoseRightOperator(bpy.types.Operator):
"""This is a diagnostic operator, which poses both the capture & final armatures one frame at a time."""
bl_idname = 'mh_community.pose_right'
bl_label = 'Next Frame'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
from ..mocap.sensor_runtime import Sensor
armature = context.object
problemMsg = None
rigInfo = RigInfo.determineRig(armature)
if rigInfo is None:
problemMsg = 'Unknown rigs are not supported.'
elif not rigInfo.isMocapCapable():
problemMsg = 'Rig is not capable of motion capture.'
elif len(context.scene.MhSensorAnimations) == 0:
problemMsg = 'No current capture being buffered.'
if problemMsg is not None:
self.report({'ERROR'}, problemMsg)
else:
Sensor.oneRight(rigInfo, context.scene.MhSensorAnimation_index)
return {'FINISHED'}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@classmethod
def poll(cls, context):
ob = context.object
return ob is not None and ob.type == 'ARMATURE' |
# -*- coding: utf-8 -*-
from app import db
import datetime
class Repo(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(64), index = True, unique = True)
path = db.Column(db.String(120), index = True, unique = True)
comment = db.Column(db.Text(), index = False, unique = False)
created_on = db.Column(db.DateTime(timezone=True), default=datetime.datetime.now)
updated_on = db.Column(db.DateTime(timezone=True), default=datetime.datetime.now, onupdate=datetime.datetime.now)
snapshots = db.relationship('Snapshot', backref='repo',
lazy='dynamic')
def __init__(self, name, path, comment):
self.name = name
self.path = path
self.comment = comment
def __repr__(self):
return '<Repo %r>' % (self.name)
class Snapshot(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(64), index = True, unique = True)
path = db.Column(db.String(120), index = True, unique = True)
type = db.Column(db.String(64), index = False, unique = False)
comment = db.Column(db.Text(), index = False, unique = False)
repo_id = db.Column(db.Integer, db.ForeignKey('repo.id'), nullable=False, index=True)
created_on = db.Column(db.DateTime(timezone=True), default=datetime.datetime.now)
def __init__(self, name, type='current', path='', comment='', repo_id=repo_id):
self.name = name
self.type = type
self.comment = comment
self.path = path
self.repo_id = repo_id
def __repr__(self):
return '<Snapshot %r>' % (self.name) |
# use dynamic programming, bottom up
# space O(n) if original data can't be overwritten
# time O(n^2), where n is the length of row
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
if not triangle: return []
dp = triangle[-1]
for row in range(len(triangle)-2, -1, -1): # bottom up
for i, num in enumerate(triangle[row]):
dp[i] = num + min(dp[i], dp[i+1])
return dp[0]
|
from smartninja_nosql.odm import Model
class User(Model):
def __init__(self, id, name, email, secret_number, **kwargs):
self.id = id
self.name = name
self.email = email
self.secret_number = secret_number
super().__init__(**kwargs)
|
t = (3, 30, 2019, 9, 25)
hour = str(t[0]).zfill(2)
minutes = str(t[1]).zfill(2)
year = str(t[2]).zfill(4)
month = str(t[3]).zfill(2)
day = str(t[4]).zfill(2)
print("{month}/{day}/{year} {hour}:{minutes}".format(month=month, day=day, year=year, hour=hour, minutes=minutes)) |
import logging
from urllib import urlencode
<<<<<<< HEAD
from ewt.scraper import EWTScraper
=======
from EWTScraper import EWTScraper
>>>>>>> ace1da00fd9afc9f38280055e9751ec1562994bb
class FantasyFootballCalculatorScraper(EWTScraper):
'''
Obtains html content of NFL fantasy projections or ADP page of fantasycalculator.com
Example:
s = FantasyCalculatorNFLScraper()
content = s.get_projections()
content = s.get_projections(url)
content = s.get_projections(url, fn)
content = s.get_adp()
content = s.get_adp(url)
content = s.get_adp(url, fn)
'''
def __init__(self, **kwargs):
'''
Args:
**kwargs: projections_url (str)
'''
# see http://stackoverflow.com/questions/8134444/python-constructor-of-derived-class
EWTScraper.__init__(self, **kwargs)
if 'format' in 'kwargs':
self.format = kwargs['format']
else:
self.format = 'standard'
if 'projections_url' in 'kwargs':
self.projections_url = kwargs['projections_url']
else:
self.projections_url = 'https://fantasyfootballcalculator.com/rankings'
if 'teams' in 'kwargs':
self.teams = kwargs['teams']
else:
self.teams = '14'
if 'year' in 'kwargs':
self.year = kwargs['year']
else:
self.year = '2015'
def _adp_url(self):
'''
URL encode parameters for url for average draft position(ADP) page
'''
base_url = 'https://fantasyfootballcalculator.com/adp_xml.php?'
params = {
'format': self.format,
'teams': self.teams,
}
url = base_url + urlencode(params)
logging.debug('adp url is %s' % url)
return url
def get_adp(self, url=None, fname=None):
'''
Fetch adp url, try cache, then file, then web
Args:
url (str): url for the fantasy football calculator ADP page
Returns:
Str if successful, None otherwise.
'''
if not url:
url = self._adp_url()
if not url and not fname:
raise ValueError('invalid or missing URL or filename')
return self.get(url, fname)
def get_projections(self, url=None, fname=None):
'''
Fetch projections url, try cache, then file, then web
Args:
url (str): url for the fantasy football calculator projections page
Returns:
Str if successful, None otherwise.
'''
if not url:
url = self.projections_url
return self.get(url, fname)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
s = FantasyFootballCalculatorScraper()
content = s.get_adp()
logging.debug(content)
|
import matplotlib.pyplot as plt
import numpy as np
'''
demonstrating a support whose dimension is lower than the space in which it is embedded
'''
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
'''
# Cylinder
x=np.linspace(-1, 1, 100)
z=np.linspace(0, 1.0/(2.0*3.14159), 100)
Xc, Zc=np.meshgrid(x, z)
Yc = np.sqrt(1-Xc**2)
# Draw parameters
rstride = 20
cstride = 10
ax.plot_surface(Xc, Yc, Zc, alpha=0.3, rstride=1, cstride=1, cmap='coolwarm')
ax.plot_surface(Xc, -Yc, Zc, alpha=0.3, rstride=1, cstride=1, cmap='coolwarm')
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.show()
'''
theta = np.linspace(0, 2 * np.pi, 201)
x = np.cos(theta)
y = np.sin(theta)
z=((1/(2.0*3.14159)) + np.sin(0.5*theta))
ax.plot(x,y,z)
#plotting the support
#circle = plt.Circle((0,0), 1.0)
fig,ax = plt.subplots()
plt.plot(x,y)
plt.axis('scaled')
#plt.ylim((-2.0,2.0))
#plt.xlim((-2.0,2.0))
#ax.add_patch(circle)
plt.show()
|
# Generated by Django 3.0.2 on 2020-06-28 21:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(help_text='Никнейм', max_length=12, unique='true')),
('realname', models.CharField(help_text='Имя', max_length=24)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(max_length=300)),
('id_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='C.User')),
],
),
]
|
from django.core.cache import cache
from TechSekai.forms import RegisterDjangoUserForm, LoginDjangoUserForm
from TechSekai.models import *
from TechSekai.templatetags.auth_extras import *
def category_context_processor(request):
categories = cache.get('categories')
if not categories:
categories = Category.objects.all()
cache.set('categories', categories)
content = {'all_categories': categories.exclude(name='Other')}
if request.user.is_authenticated and request.user.username != 'Admin' and not has_group(request.user, 'shops'):
try:
u1 = User.objects.get(django_user=request.user)
except:
u1 = User(django_user=request.user)
u1.save()
cart = Cart(user=u1)
wishlist = WishList(user=u1)
cart.save()
wishlist.save()
c = Cart.objects.filter(user=u1)
if c.exists():
cart = Cart_Item.objects.filter(cart=c[0]).count()
content.update({'cart': cart})
if WishList.objects.filter(user=u1).exists():
w = WishList.objects.get(user=u1)
wishList = len(w.prods.all())
content.update({'wishList': wishList})
viewed = []
if 'viewed' in request.session:
for i in request.session['viewed']:
viewed.append(Product.objects.get(id=i))
content.update({'viewed': viewed})
return content
|
from flask import Flask
from flask import render_template, request
from function import artist_data, get_recommandation, recommandation_for_you, for_you, for_you_sim
import pandas as pd
import numpy as np
app = Flask(__name__)
@app.route('/', methods=['GET','POST'])
def index():
artist = artist_data()
if request.method == 'POST':
#print(request.form.getlist("artistcheckbox"))
return 'Done'
return render_template('index.html', artist = artist)
@app.route('/page1', methods=['GET','POST'])
def artist_recommande():
request.method == 'POST'
artist_choice = request.form.getlist("artistcheckbox")
recommandation = get_recommandation(artist_choice)
recom_for_you = recommandation_for_you(artist_choice)
For_you = for_you(artist_choice)
For_you_sim = for_you_sim(artist_choice)
return render_template("page.html", recommandation= recommandation, recom_for_you = recom_for_you
, For_you=For_you, For_you_sim = For_you_sim)
|
def switch(a, n):
d = ""
for x in reversed(a[:n]):
d += x
d2 = ""
for l in d:
if l=="+":
d2 += "-"
else:
d2 += "+"
for q in a[n:]:
d2 += q
return d2
with open("/Users/danielvebman/Downloads/pancake.in.txt", "r") as input:
cases = []
for line in input:
if "+" in line.rstrip() or "-" in line.rstrip():
cases.append(line.rstrip())
stackNum = 0
for stack in cases:
stackNum += 1
switches = 0
while "-" in stack:
init = stack[0]
n = 0
c = True
for p in stack:
if c:
if p is init:
n += 1
else:
c = False
stack = switch(stack, n)
switches += 1
with open("/Users/danielvebman/Downloads/pancake.out.txt", "a") as output:
output.write("Case #"+str(stackNum)+": " + str(switches) + "\n")
"""
init = "|"
while("-" in stack):
n = 0
for c in stack:
if init=="|":
init = c
n+=1
elif c is init:
n+=1
else:
break
switch(stack, n)
switches += 1
""" |
# -*- coding: utf-8 -*-
import pymysql
from maotuying_test import settings
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class MaotuyingTestPipeline(object):
def __init__(self):
self.connect = pymysql.connect(
host=settings.MYSQL_HOST,
db=settings.MYSQL_DBNAME,
user=settings.MYSQL_USER,
passwd=settings.MYSQL_PASSWD,
charset='utf8',
use_unicode=True)
self.cursor = self.connect.cursor()
def process_item(self, item, spider):
try:
self.cursor.execute(
"""insert into restaurant(restaurant_rank,
restaurant_name,restaurant_evaluate,restaurant_addr,restaurant_phone,restaurant_pic_num,restaurant_food_tpye
)
value(%s,%s,%s,%s,%s,%s,%s)""",
(item['restaurant_rank'],
item['restaurant_name'],
item['restaurant_evaluate'],
item['restaurant_addr'],
item['restaurant_phone'],
item['restaurant_pic_num'],
item['restaurant_food_tpye']
#item['restaurant_special'],
#item['restaurant_time']))
))
self.connect.commit()
except Exception as err:
print("重复插入了==》错误信息:" + str(err))
return item
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
if not l1 and not l2:
return None
if not l1:
return l2
if not l2:
return l1
stack1, stack2 = [], []
it1, it2 = l1, l2
while it1:
stack1.append(it1.val)
it1 = it1.next
while it2:
stack2.append(it2.val)
it2 = it2.next
carry = 0
prev = None
while stack1 or stack2 or carry:
if stack1:
carry += stack1.pop()
if stack2:
carry += stack2.pop()
node = ListNode(carry % 10)
node.next = prev
prev = node
carry = carry // 10
return node |
"""
Design an algorithm and write code to remove the duplicate characters in a string
without using any additional buffer. NOTE: One or two additional variables are fine.
An extra copy of the array is not.
FOLLOW UP
Write the test cases for this method.
"""
def remove_duplicate_characters(string):
unique_chars_count = len(set(string))
for i in range(unique_chars_count):
for j in range(unique_chars_count):
if i != j and string[i] == string[j]:
# REMOVE CHAR AT INDEX
return string
remove_duplicate_characters("recepient")
|
# Generated by Django 3.1.7 on 2021-03-26 10:53
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Gallery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255)),
('slugid', models.SlugField(blank=True, max_length=16)),
('image', models.ImageField(upload_to='gallery_images/')),
],
),
migrations.CreateModel(
name='LightLabImage',
fields=[
('name', models.CharField(blank=True, max_length=255)),
('image', models.ImageField(upload_to='light_lab_images/')),
('image_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
],
),
]
|
# encoding UTF-8
# Autor: Mauricio Medrano Castro, A01272273
# Calcular el costo de la renta de peliculas de estreno y normales
def calcularRenta(numeroEstrenos,numeroNormales): #calcula el total por pagar por las peliculas estrenadas y normales
numeroEstrenos = numeroEstrenos * 45
numeroNormales = numeroNormales * 27
pagoTotal = numeroEstrenos + numeroNormales
return pagoTotal
def main(): #funcion principal
numeroEstrenos = int(input("Peliculas de estreno"))
numeroNormales = int(input("Peliculas normales"))
pagoTotal = calcularRenta(numeroEstrenos,numeroNormales)
print("Numero de peliculas de estreno rentadas:", numeroEstrenos)
print("Numero de peliculas normales rentadas:", numeroNormales)
print("Total a pagar: $","%.2f" % pagoTotal)
main()
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reorderList(self, head):
"""
:type head: ListNode
:rtype: void Do not return anything, modify head in-place instead.
"""
if head==None or head.next==None:
return head
p=head
l=0
while p!=None:
l+=1
p=p.next
mid=(l-1)/2
p=head.next
count=1
while count<mid:
p=p.next
count += 1
q=p.next
while q!=None:
t=q
q=q.next
t.next=p.next
p.next=t
sorthead=p.next
self.printlinklist(head)
p.next=None
h=head
q=sorthead
# self.printlinklist(q)
while h!=None and q!=None:
t=q
q=q.next
t.next=h.next
h.next=q
h=t.next
def printlinklist(self,head):
while head!=None:
print head.val
head=head.next
|
# https://leetcode.com/problems/insert-interval/
class Solution(object):
def insert(self, intervals, new):
"""
:type intervals: List[List[int]]
:type newInterval: List[int]
:rtype: List[List[int]]
"""
# find last idx nums[idx][1] < interval[0]
# find first idx nums[idx][1] >= interval[0], l - 1
# find first idx nums[idx][0] > interval[1]
n = len(intervals)
def bisect(func):
l, r = 0, n
while l < r:
mid = (l + r) >> 1
if func(mid):
r = mid
else:
l = mid + 1
return l
start = bisect(lambda x: intervals[x][1] >= new[0]) - 1
end = bisect(lambda x: intervals[x][0] > new[1])
if start < n - 1: new[0] = min(new[0], intervals[start + 1][0])
if end > 0: new[1] = max(new[1], intervals[end - 1][1])
return intervals[:start + 1] + [new] + intervals[end:]
|
no_of_properties = 5
no_of_combinations = 5
no_of_rules = 15
properties = [["Red", "Green", "Yellow", "Blue", "White"], ["Dogs", "Cats", "Fish", "Birds", "Horses"], ["Tea", "Water", "Beer", "Milk", "Coffee"], ["Brit", "Swede", "Dane", "Norwegian", "German"], ["Pall Mall", "Dunhill", "Blend", "Bluemaster", "Prince"]]
relations = ["Left", "Right", "Has", "Neighbor", "At"]
locations = ["1", "2", "3", "4", "5"]
def search(p, ps):
return any(p in search for search in ps)
class Node():
def __init__(self, action):
children = []
def up(self):
pass
def down(self, target):
pass
class Rule():
def __init__(self, p1, r, p2, grid):
self.p1 = p1
self.p2 = p2
self.r = r
self.grid = grid
def validate(self):
if self.r != "At":
p1_cells = self.grid.find_property(self.p1)
p2_cells = self.grid.find_property(self.p2)
if self.r == "Left":
for p1_cell in p1_cells:
ok = False
for p2_cell in p2_cells:
if p1_cell[1] == p2_cell[1] - 1:
ok = True
break
if not ok:
self.grid.values[p1_cell[0]][p1_cell[1]] = ""
for p2_cell in p2_cells:
ok = False
for p1_cell in p1_cells:
if p2_cell[1] == p1_cell[1] + 1:
ok = True
break
if not ok:
self.grid.values[p2_cell[0]][p2_cell[1]] = ""
elif self.r == "Right":
for p1_cell in p1_cells:
ok = False
for p2_cell in p2_cells:
if p1_cell[1] == p2_cell[1] + 1:
ok = True
break
if not ok:
self.grid.values[p1_cell[0]][p1_cell[1]] = ""
for p2_cell in p2_cells:
ok = False
for p1_cell in p1_cells:
if p2_cell[1] == p1_cell[1] - 1:
ok = True
break
if not ok:
self.grid.values[p2_cell[0]][p2_cell[1]] = ""
elif self.r == "Has":
for p1_cell in p1_cells:
ok = False
for p2_cell in p2_cells:
if p2_cell[1] == p1_cell[1]:
ok = True
break
if not ok:
self.grid.values[p1_cell[0]][p1_cell[1]] = ""
for p2_cell in p2_cells:
ok = False
for p1_cell in p1_cells:
if p1_cell[1] == p2_cell[1]:
ok = True
break
if not ok:
self.grid.values[p2_cell[0]][p2_cell[1]] = ""
elif self.r == "Neighbor":
for p1_cell in p1_cells:
ok = False
for p2_cell in p2_cells:
if p1_cell[1] == p2_cell[1] - 1 or p1_cell[1] == p2_cell[1] + 1:
ok = True
break
if not ok:
self.grid.values[p1_cell[0]][p1_cell[1]] = ""
for p2_cell in p2_cells:
ok = False
for p1_cell in p1_cells:
if p2_cell[1] == p1_cell[1] - 1 or p2_cell[1] == p1_cell[1] + 1:
ok = True
break
if not ok:
self.grid.values[p2_cell[0]][p2_cell[1]] = ""
else:
for row in range(0, len(self.grid.values)):
for col in range(0, no_of_combinations):
test = self.grid.values[row][col]
ok = False
for property in properties:
if test in property and self.p1 in property:
ok = True
if ok:
if self.grid.values[row][col] != self.p1 and str(col + 1) == self.p2:
self.grid.values[row][col] = ""
elif self.grid.values[row][col] == self.p1 and str(col + 1) != self.p2:
self.grid.values[row][col] = ""
class Grid():
def __init__(self):
self.values = []
self.rules = []
for property in properties:
for each in property:
temp = [each, each, each, each, each]
self.values.append(temp)
def __str__(self):
l = 0
output = ""
for row in self.values:
for col in row:
if len(col) > l:
l = len(col)
for i in range(0, no_of_properties):
output += "+"
output += "-" * l
output += "+"
output += "\n"
for loc in locations:
output += "|"
output += "{message: <{fill}}".format(message=str(loc), fill=str(l))
output += "|"
output += "\n"
for i in range(0, no_of_properties):
output += "+"
output += "-" * l
output += "+"
output += "\n"
for row in self.values:
for col in row:
output += "|"
output += "{message: <{fill}}".format(message=col, fill=str(l))
output += "|"
output += "\n"
if self.values.index(row) % no_of_properties == 4:
for i in range(0, no_of_properties):
output += "+"
output += "-" * l
output += "+"
output += "\n"
return output.rstrip()
def add_rule(self, p1, r, p2):
if r == "At":
if search(p1, properties) and search(p2, locations) and search(r, relations):
self.rules.append(Rule(p1, r, p2, self))
else:
print("Invalid properties:",p1,r,p2)
else:
if search(p1, properties) and search(p2, properties) and search(r, relations):
self.rules.append(Rule(p1, r, p2, self))
else:
print("Invalid properties:",p1,r,p2)
def find_property(self, p):
results = []
if not search(p, properties):
print("Invalid property:", p)
else:
for row in range(0, len(self.values)):
for col in range(0, no_of_properties):
if self.values[row][col] == p:
results.append([row,col])
return results
def validate(self):
for rule in self.rules:
rule.validate()
counter = 0
for row in range(0, len(self.values)):
if self.values[row].count("") == len(self.values[row]) - 1:
for property in self.values[row]:
if property != "":
p = property
i = self.values[row].index(p)
break
for row2 in range(0, len(self.values)):
for col in range(0, no_of_properties):
if col == i:
if self.values[row2][col] != "" and self.values[row2][col] != p:
ok = False
for property in properties:
if self.values[row2][col] in property and p in property:
ok = True
if ok:
counter += 1
self.values[row2][col] = ""
for prop in range(0, no_of_properties):
for col in range(0, no_of_properties):
n = 0
for row in range(0 + (prop * no_of_combinations), no_of_combinations + (prop * no_of_combinations)):
if self.values[row][col] != "":
n += 1
i = row
j = col
if n > 1:
break
if n == 1:
for col2 in range(0, no_of_properties):
if col2 != j and self.values[i][col2] != "":
counter += 1
self.values[i][col2] = ""
if counter > 0:
self.validate()
class Main():
def __init__(self):
puzzle = Grid()
puzzle.add_rule("Brit", "Has", "Red")
puzzle.add_rule("Swede", "Has", "Dogs")
puzzle.add_rule("Dane", "Has", "Tea")
puzzle.add_rule("Green", "Left", "White")
puzzle.add_rule("Green", "Has", "Coffee")
puzzle.add_rule("Pall Mall", "Has", "Birds")
puzzle.add_rule("Yellow", "Has", "Dunhill")
puzzle.add_rule("Milk", "At", "3")
puzzle.add_rule("Norwegian", "At", "1")
puzzle.add_rule("Blend", "Neighbor", "Cats")
puzzle.add_rule("Dunhill", "Neighbor", "Horses")
puzzle.add_rule("Bluemaster", "Has", "Beer")
puzzle.add_rule("German", "Has", "Prince")
puzzle.add_rule("Norwegian", "Neighbor", "Blue")
puzzle.add_rule("Blend", "Neighbor", "Water")
puzzle.validate()
print(puzzle)
Main()
|
# %%
import re
import sys
import os
import glob
from collections import Counter
from quantities import units
from quantities.units.area import D
from quantities.unitquantity import UnitQuantity as UQ
import scipy
import pandas as pd
import spacy
import corenlp
import sklearn_crfsuite
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import make_scorer
from sklearn.model_selection import cross_val_score
from sklearn_crfsuite import scorers
from sklearn_crfsuite import metrics
# %%
tags = {'QUANT' : 'Quantity', 'ME' : 'MeasuredEntity', 'MP' : 'MeasuredProperty'}
lang_model = spacy.load('en_core_web_sm')
# %%
units_list = []
train_raw_files = glob.glob("train/text/*.txt")
train_tsv_files = glob.glob("train/tsv/*.tsv")
test_raw_files = glob.glob("trial/txt/*.txt")
test_tsv_files = glob.glob("trial/tsv/*.tsv")
eval_raw_files = glob.glob("eval/text/*.txt")
# %%
def raw_text_to_df(raw_files):
"""converting raw training files to dataframes"""
# dict to be exported as dataframe
documents_of_interest = {
'document_name': [],
'sentence': [],
'entities': [],
'np': []
}
# filling the dict
for raw_file in raw_files:
with open(raw_file, "r") as file:
doc_name = file.name.split("/")[2]
doc_name = doc_name.split('.')[0]
file_content = lang_model(file.read())
for sentence in file_content.sents:
sentence_pos_tags = [word.tag_ for word in sentence]
documents_of_interest['document_name'].append(doc_name)
documents_of_interest['sentence'].append(sentence)
entities = []
for measurement in sentence.ents:
entities.append((measurement.label_,(measurement.start, measurement.end)))
documents_of_interest['entities'].append(
entities
)
noun_phrases = []
for chunk in sentence.noun_chunks:
noun_phrases.append((chunk.text, (chunk.start, chunk.end)))
documents_of_interest['np'].append(noun_phrases)
# break
dataframe = pd.DataFrame(
documents_of_interest,
columns=['document_name', 'sentence', 'entities', 'np'],
)
return dataframe
# %%
def get_text_labels(text_dataframe, tsv_dataframe):
labels = []
for _, text_row in text_dataframe.iterrows():
sentence_tag_placeholders = []
for word in text_row['sentence']:
sentence_tag_placeholders.append(
['O', (word.idx, (word.idx + len(word)))]
)
# O means not a quantity QUANT means quantity
document_name = text_row['document_name']
doc_id = tsv_dataframe['docId'] == document_name
for _, annot_row in tsv_dataframe[doc_id].iterrows():
annotType = annot_row['annotType']
if annotType == 'Qualifier':
continue
for key, value in tags.items():
if annotType == value:
annotType = key
break
for i, item in enumerate(sentence_tag_placeholders):
if item[0] != 'O':
continue
if (annot_row['startOffset'] <= item[1][0] < annot_row['endOffset']) or (annot_row['startOffset'] < item[1][1] <= annot_row['endOffset']):
sentence_tag_placeholders[i][0] = annotType
labels.append([label for label, _ in sentence_tag_placeholders])
return labels
# %%
def get_units() :
units_list = ['%'] # Add possible unit symbols
for key, value in units.__dict__.items():
if isinstance(value, UQ):
if key not in units_list :
units_list.append(key.lower())
if value.name not in units_list :
units_list.append(value.name.lower())
return units_list
def is_unit(token):
return token.lower_ in units_list or token.lemma_ in units_list
# %%
def features_word(word, entities, nouns, length, pos):
features = {
'bias': 1.0,
'lemma': word.lemma_,
'upper': word.is_upper,
'title': word.is_title,
'digit': word.is_digit,
'numlike': word.like_num,
'unit': is_unit(word),
'postag': word.tag_,
'dep': word.dep_
}
for entity in entities:
if entity[1][0] <= word.i < entity[1][1]:
features['entity'] = entity[0]
break
for noun in nouns:
if noun[1][0] <= word.i < noun[1][1]:
features['np'] = list(noun[0])
break
if pos >= 1 :
new_word = word.nbor(-1)
features.update({
'-lemma': new_word.lemma_,
'-upper': new_word.is_upper,
'-title': new_word.is_title,
'-digit': new_word.is_digit,
'-numlike': new_word.like_num,
'-unit': is_unit(new_word),
'-postag': new_word.tag_,
'-dep': new_word.dep_
})
else:
features['start'] = True
if pos <= length-2 :
new_word = word.nbor(1)
features.update({
'+lemma': new_word.lemma_,
'+upper': new_word.is_upper,
'+title': new_word.is_title,
'+digit': new_word.is_digit,
'+numlike': new_word.like_num,
'+unit': is_unit(new_word),
'+postag': new_word.tag_,
'+dep': new_word.dep_
})
else:
features['end'] = True
window_3 = {'lower': [], 'lemma': [], 'pos': [], 'dep': []}
window_5 = {'lower': [], 'lemma': [], 'pos': [], 'dep': []}
# window_8 = {'lower': [], 'lemma': [],'pos': [], 'dep': []}
for x in range(-5, 6):
if x == 0 or (pos + x < 0) or (pos + x > length - 1):
continue
window_word = word.nbor(x)
if x in range(-3, 4):
window_3['lemma'].append(window_word.lemma_)
window_3['pos'].append(window_word.pos_)
window_3['dep'].append(window_word.dep_)
# if x in range(-2, 3):
# window_2['lower'].append(window_word.lower_)
# window_2['lemma'].append(window_word.lemma_)
# window_2['pos'].append(window_word.pos_)
# window_2['dep'].append(window_word.dep_)
window_5['lemma'].append(window_word.lemma_)
window_5['pos'].append(window_word.pos_)
window_5['dep'].append(window_word.dep_)
features.update({
'window_3_words:lemma': window_3['lemma'],
'window_3_words:pos': window_3['pos'],
'window_3_words:dep': window_3['dep'],
'window_5_words:lemma': window_5['lemma'],
'window_5_words:pos': window_5['pos'],
'window_5_words:dep': window_5['dep']
})
return features
# %%
def features_sentence(sentence, entities, nouns):
sentence_features = []
for i in range(0, len(sentence)) :
word_features = features_word(sentence[i], entities, nouns, len(sentence), i)
sentence_features.append(word_features)
return sentence_features
# %%
def find_closest(span, start, end):
if len(span) == 0:
return -1;
ind = -1
min_dist = 100000
for i in range(len(span)):
dist = 100000
if (start > span[i][1]) :
dist = start - span[i][1]
else :
dist = span[i][0] - end
if dist <= min_dist :
min_dist = dist
ind = i
return ind
# %%
global annot_set_index
global annot_index
# %%
def merge_tags(output_list, quant_list, mp_list, me_list, quantspan, mpspan, mespan):
global annot_set_index
global annot_index
if (len(quant_list) <= 0) :
return output_list
me_ind = []
mp_ind = []
for i in range(len(quant_list)):
me_ind.append([])
mp_ind.append([])
for i in range(len(mpspan)):
closest = find_closest(quantspan, mpspan[i][0], mpspan[i][1])
mp_ind[closest].append(i)
for i in range(len(mespan)):
closest = find_closest(quantspan, mespan[i][0], mespan[i][1])
me_ind[closest].append(i)
for i in range(len(quant_list)):
# quant_list[i][1] = annot_set_index
quant_list[i][5] = annot_index
# annot_set_index += 1
annot_index += 1
output_list.append(quant_list[i])
if len(mp_ind[i]) > 0:
for x in mp_ind[i]:
mp_list[x][1] = quant_list[i][1]
mp_list[x][5] = annot_index
annot_index += 1
mp_list[x][7] = '{"HasQuantity": "' + str(quant_list[i][5]) + '"}'
output_list.append(mp_list[x])
if len(me_ind[i]) > 0:
for x in me_ind[i]:
me_list[x][1] = quant_list[i][1]
me_list[x][5] = annot_index
annot_index += 1
me_list[x][7] = '{"HasQuantity": "' + str(quant_list[i][5]) + '"}'
output_list.append(me_list[x])
return output_list
# %%
def write_predictions_to_tsv(text_dataframe, y_pred, dirname, units):
tsv_columns = [
"docId",
"annotSet",
"annotType",
"startOffset",
"endOffset",
"annotId",
"text",
"other"
]
# save the results in appropriate format in a new dataframe
row_id = 0
output_list = []
prev_file = ""
global annot_set_index
annot_set_index = 1
global annot_index
annot_index = 1
for i, text_row in text_dataframe.iterrows():
quant_list = []
mp_list = []
me_list = []
quantspan = []
mpspan = []
mespan = []
file_name = text_row['document_name']
if i > 0 and file_name != prev_file:
result_df = pd.DataFrame(output_list, columns=tsv_columns)
result_df.to_csv(dirname + prev_file + '.tsv', sep="\t", index=False)
output_list = []
annot_set_index = 1
annot_index = 1
pred_pos = y_pred[row_id]
row_id += 1
sentence = text_row['sentence']
word_ind = 0
while word_ind < len(pred_pos) :
if pred_pos[word_ind] != 'QUANT':
word_ind += 1
continue
start_ind = word_ind
while word_ind < len(pred_pos) and pred_pos[word_ind] == 'QUANT':
word_ind += 1
end_ind = word_ind - 1
quant_text = sentence.doc[sentence[start_ind].i : sentence[end_ind].i + 1]
unit = 'default'
flag = False
for i in range(start_ind, end_ind + 1):
if sentence[i].text in units:
unit = sentence[i].text
flag = True
if not flag:
unit = sentence[end_ind].text
quant_list.append([file_name, annot_set_index, tags['QUANT'], sentence[start_ind].idx, sentence[end_ind].idx + len(sentence[end_ind]), 0, quant_text, '{"unit": "' + unit + '"}'])
quantspan.append([sentence[start_ind].i, sentence[end_ind].i])
annot_set_index += 1
word_ind = 0
while word_ind < len(pred_pos) :
if pred_pos[word_ind] != 'MP':
word_ind += 1
continue
start_ind = word_ind
while word_ind < len(pred_pos) and pred_pos[word_ind] == 'MP':
word_ind += 1
end_ind = word_ind - 1
mp_text = sentence.doc[sentence[start_ind].i : sentence[end_ind].i + 1]
mp_list.append([file_name, 0, tags['MP'], sentence[start_ind].idx, sentence[end_ind].idx + len(sentence[end_ind]), 0, mp_text, '{"HasQuantity": "0"}'])
mpspan.append([sentence[start_ind].i, sentence[end_ind].i])
word_ind = 0
while word_ind < len(pred_pos) :
if pred_pos[word_ind] != 'ME':
word_ind += 1
continue
start_ind = word_ind
while word_ind < len(pred_pos) and pred_pos[word_ind] == 'ME':
word_ind += 1
end_ind = word_ind - 1
me_text = sentence.doc[sentence[start_ind].i : sentence[end_ind].i + 1]
me_list.append([file_name, 0, tags['ME'], sentence[start_ind].idx, sentence[end_ind].idx + len(sentence[end_ind]), 0, me_text, '{"HasQuantity": "0"}'])
mespan.append([sentence[start_ind].i, sentence[end_ind].i])
output_list = merge_tags(output_list, quant_list, mp_list, me_list, quantspan, mpspan, mespan)
# output_list = tmp_ret[0]
# annot_set_index = tmp_ret[1]
# annot_index = tmp_ret[2]
# print("Here", annot_set_index)
prev_file = file_name
result_df = pd.DataFrame(output_list, columns=tsv_columns)
result_df.to_csv(dirname + prev_file + '.tsv', sep="\t", index=False)
return
# %%
def modify_tsv(dirname):
_, _, filenames = next(os.walk(dirname))
tsv_columns = [
"docId",
"annotSet",
"annotType",
"startOffset",
"endOffset",
"annotId",
"text",
"other"
]
for file in filenames:
df = pd.read_csv(dirname + file, sep="\t", encoding="utf-8")
data = df.values.tolist()
prop_id = '0'
for i in range(len(data)):
item = data[i]
if item[2] == 'Quantity':
prop_id = '0'
elif item[2] == 'MeasuredProperty':
prop_id = str(item[5])
elif item[2] == 'MeasuredEntity':
if prop_id != '0':
data[i][7] = '{"HasProperty": "' + prop_id + '"}'
result_df = pd.DataFrame(data, columns=tsv_columns)
result_df.to_csv(dirname + file, sep="\t", index=False)
# %%
def print_transitions(trans_features):
for (label_from, label_to), weight in trans_features:
print("%-6s -> %-7s %0.6f" % (label_from, label_to, weight))
# %%
def print_state_features(state_features):
for (attr, label), weight in state_features:
print("%0.6f %-8s %s" % (weight, label, attr))
# %%
units_list = get_units()
train_text_dataframe = raw_text_to_df(train_raw_files)
# train_text_dataframe.to_csv("./CSV/train_text_dataframe.csv")
each_file_df = []
for tsv_file in train_tsv_files:
each_file_df.append(pd.read_csv(tsv_file, sep="\t", header=0))
train_tsv_dataframe = pd.concat(each_file_df)
# train_tsv_dataframe.to_csv("./CSV/train_tsv_dataframe.csv")
test_text_dataframe = raw_text_to_df(test_raw_files)
# test_text_dataframe.to_csv("./CSV/test_text_dataframe.csv")
each_file_test = []
for tsv_file in test_tsv_files:
each_file_test.append(pd.read_csv(tsv_file, sep="\t", header=0))
test_tsv_dataframe = pd.concat(each_file_test)
# test_tsv_dataframe.to_csv("./CSV/test_tsv_dataframe.csv")
# %%
X_train = []
for _, row in train_text_dataframe.iterrows() :
features = features_sentence(row['sentence'], row['entities'], row['np'])
X_train.append(features)
y_train = get_text_labels(train_text_dataframe, train_tsv_dataframe)
# %%
X_test = []
for _, row in test_text_dataframe.iterrows() :
features = features_sentence(row['sentence'], row['entities'], row['np'])
X_test.append(features)
y_test = get_text_labels(test_text_dataframe, test_tsv_dataframe)
# %%
X_train = X_train + X_test
y_train = y_train + y_test
# %%
eval_text_dataframe = raw_text_to_df(eval_raw_files)
X_eval = []
for _, row in eval_text_dataframe.iterrows() :
features = features_sentence(row['sentence'], row['entities'], row['np'])
X_eval.append(features)
# %%
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
c1=0.1,
c2=0.1,
max_iterations=300,
all_possible_transitions=True
)
crf.fit(X_train, y_train)
# %%
labels = list(crf.classes_)
labels.remove('O')
# %%
y_pred = crf.predict(X_eval)
sorted_labels = sorted(
labels,
key=lambda name: (name[1:], name[0])
)
# %%
# if not os.path.exists('results_task3'):
# os.makedirs('results_task3')
# write_predictions_to_tsv(eval_text_dataframe, y_pred, 'results_task3/', units_list)
# modify_tsv('results_task3/')
# %%
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
max_iterations=150,
all_possible_transitions=True
)
params_space = {
'c1': scipy.stats.expon(scale=2),
'c2': scipy.stats.expon(scale=2),
}
f1_scorer = make_scorer(metrics.flat_f1_score,
average='weighted', labels=labels)
rs = RandomizedSearchCV(crf, params_space,
cv=3,
verbose=1,
n_jobs=-1,
n_iter=50,
scoring=f1_scorer)
rs.fit(X_train, y_train)
# %%
crf = rs.best_estimator_
y_pred = crf.predict(X_eval)
# print(metrics.flat_classification_report(
# y_test, y_pred, labels=sorted_labels, digits=3
# ))
# %%
if not os.path.exists('results_task3'):
os.makedirs('results_task3')
write_predictions_to_tsv(eval_text_dataframe, y_pred, 'results_task3/', units_list)
modify_tsv('results_task3/')
# %%
print('best params:', rs.best_params_)
print('best CV score:', rs.best_score_)
print('model size: {:0.2f}M'.format(rs.best_estimator_.size_ / 1000000))
# %%
print("Top likely transitions:")
print_transitions(Counter(crf.transition_features_).most_common(4))
# %%
print("\nTop unlikely transitions:")
print_transitions(Counter(crf.transition_features_).most_common()[-4:])
# %%
print("Top positive:")
print_state_features(Counter(crf.state_features_).most_common(20))
# %%
print("\nTop negative:")
print_state_features(Counter(crf.state_features_).most_common()[-10:]) |
# Предложить пользователю ввести ряд чисел, разделяя их пробелом;
# Посчитать количество четных и нечетных чисел введенных пользователем;
# Вывести на экран сообщение с количеством четных и нечетных чисел.
# Подсказка, чтобы проверить четность или нечетность числа, нужно узнать его остаток от деления на 2.
# Четное число будет иметь 0 в остатке, а нечетное 1. Чтобы узнать остаток от деления, делим число при помощи символа "%".
# 8 % 2, вернет нам остаток от деления, равный нулю, а 9 % 2, вернет единицу.
print('Предложить пользователю ввести ряд чисел, разделяя их пробелом;')
input_str = input()
list_nums = input_str.split(' ')
even_nums = 0
odd_nums = 0
for item in list_nums:
if(int(item) % 2):
odd_nums += 1
else:
even_nums +=1
print('Even numbers: {}, Odd numbers: {}'.format(even_nums, odd_nums)) |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
from akg.utils import kernel_exec as utils
from test_op import conv_mansch
from test_run.conv_utils import conv_forward_naive
from test_run.conv_utils import random_gaussian
from akg.utils.result_analysis import result_compare
def conv_run_mansch(FMap_shape, Filter_shape, Pad, Stride, Dilation=None, use_bias=False, bypass_L1=False,
dump_data=False, Tile=None, attrs=None):
conv_dtype = 'float16'
fp32_mad = True
if attrs is not None and 'fp32mmad' in attrs:
fp32_mad = attrs['fp32mmad']
mod = conv_mansch.test_CCE_Conv(FMap_shape, Filter_shape, Pad, Stride,
Tile[0], Tile[1], Tile[2], Tile[3], Tile[4],
use_bias=use_bias, fp32_mad=fp32_mad, kernel_name="conv_mansch")
source_code = mod.imported_modules[0].get_source()
utils.create_code("conv_mansch", ".", source_code)
A, B, bias_data, expect = gen_data(FMap_shape, Filter_shape, Pad, Stride, Dilation, use_bias)
expect = expect.reshape((expect.shape[0], expect.shape[1], expect.shape[2]*expect.shape[3],expect.shape[4])) # output on conv2d is in 4d format
out_data = 60000.0*np.ones(expect.shape).astype(conv_dtype)
if use_bias:
out_data = utils.mod_launch(mod, [A.astype(conv_dtype), B.astype(conv_dtype), bias_data.astype(conv_dtype),
out_data.astype(conv_dtype)], expect=expect)
else:
out_data = utils.mod_launch(mod, [A.astype(conv_dtype), B.astype(conv_dtype), out_data.astype(conv_dtype)],
expect=expect)
np.set_printoptions(threshold=sys.maxsize)
assert_res = True
try:
assert_res = result_compare(out_data, expect, r_tol=5e-3)
np.testing.assert_allclose(out_data, expect, rtol=5e-02, atol=1e-2, equal_nan=True, verbose=True)
print("conv_test_Succeed")
except BaseException as e:
data_len = expect.size
np.savetxt("actual.txt", out_data.reshape(data_len))
np.savetxt("expect.txt", expect.reshape(data_len))
print(str(e))
return (A, B), out_data, expect, assert_res
def gen_data(fm_shape, w_shape, pad, stride, dilation, bias):
if isinstance(stride,int):
stride = [stride]*2
elif isinstance(stride,(list,tuple)) and 1== len(stride):
stride = list(stride)*2
elif isinstance(stride,(list,tuple)) and 2== len(stride):
pass
else:
raise RuntimeError('stride para illegal !!!')
if isinstance(pad,int):
pad = [pad]*4
elif isinstance(pad,(list,tuple)) and 1==len(pad):
pad = list(pad) *4
elif isinstance(pad,(list,tuple)) and 4==len(pad):
pass
else:
raise RuntimeError('pad para illegal !!!')
if isinstance(dilation,int):
dilation = [dilation]*2
elif isinstance(dilation,(list,tuple)) and 1 == len(dilation):
dilation = list(dilation)*2
elif isinstance(dilation,(list,tuple)) and 2 == len(dilation):
pass
else:
raise RuntimeError('dilation para illegal !!!')
S_h,S_w = stride
P_top,P_bottom,P_left,P_right = pad
D_h,D_w = dilation
IN, IC, IH, IW = fm_shape
C0 = 16
IC = ((IC+C0-1)//C0)*C0
WN, WC, WH, WW = w_shape
WN = ((WN+C0-1)//C0)*C0
WC = ((WC+C0-1)//C0)*C0
ON = IN
OC = WN
WHD = (WH - 1) * D_h + 1
WWD = (WW - 1) * D_w + 1
OH = (IH + P_top+P_bottom - WHD)//S_h + 1
OW = (IW + P_left+P_right - WWD)//S_w + 1
x = random_gaussian((IN, IC, IH, IW), miu=1, sigma=0.1).astype(np.float16)
w = random_gaussian((WN, WC, WH, WW), miu=0.5, sigma=0.01).astype(np.float16)
if bias:
b = np.random.rand(WN).astype(np.float16, copy=False)
else:
b = (np.array(np.zeros(WN))).astype(np.float16, copy=False)
conv_param = {'stride': stride, 'pad': pad, 'dilation': dilation}
out = conv_forward_naive(x, w, b, conv_param)
''' transpose to 5D - NC1HWC0 '''
feature = x.reshape(IN, IC//C0, C0, IH, IW).transpose(0, 1, 3, 4, 2).copy()
''' transpose to 5D - C1HWNC0 '''
filter = w.reshape(WN, WC//C0, C0, WH, WW).transpose(1, 3, 4, 0, 2).copy()
filter = filter.reshape(WC//C0*WH*WW, WN//16, 16,C0)
bb = b.reshape(1,WN//16,1,1,16)
''' transpose to 5D - NC1HWC0 '''
output = out.reshape(ON, OC//C0, C0, OH, OW).transpose(0, 1, 3, 4, 2).copy()
return feature, filter, bb, output
|
from boltons import dictutils
alcoholic_drinks = [
("distilled", "Gin"),
("undistilled", "Beer"),
("distilled", "Brandy"),
("distilled", "Whiskey"),
("undistilled", "Wine"),
("distilled", "Rum"),
("undistilled", "Hard Cider"),
("distilled", "Rum"),
("undistilled", "Hard Cider"),
("distilled", "Tequila"),
("undistilled", "Mead"),
("distilled", "Vodka"),
("undistilled", "Sake"),
("distilled", "Absinthe"),
("distilled", "Everclear"),
]
omd = dictutils.OrderedMultiDict(alcoholic_drinks)
print(omd.todict(multi=True))
|
"""
This type stub file was generated by pyright.
"""
from .vtkAbstractWidget import vtkAbstractWidget
class vtkPolyLineWidget(vtkAbstractWidget):
"""
vtkPolyLineWidget - widget for vtkPolyLineRepresentation.
Superclass: vtkAbstractWidget
vtkPolyLineWidget is the vtkAbstractWidget subclass for
vtkPolyLineRepresentation which manages the interactions with
vtkPolyLineRepresentation. This is based on vtkPolyLineWidget.
This widget allows the creation of a polyline interactively by adding
or removing points based on mouse position and a modifier key.
- ctrl+click inserts a new point on the selected line
- shift+click deletes the selected point
- alt+click adds a new point anywhere depending on last selected
point. If the first point is selected, the new point is added at
the beginning, else it is added at the end.
@sa
vtkPolyLineRepresentation, vtkPolyLineWidget
"""
def CreateDefaultRepresentation(self):
"""
V.CreateDefaultRepresentation()
C++: void CreateDefaultRepresentation() override;
Create the default widget representation if one is not set. By
default, this is an instance of the vtkPolyLineRepresentation
class.
"""
...
def GetNumberOfGenerationsFromBase(self, string):
"""
V.GetNumberOfGenerationsFromBase(string) -> int
C++: vtkIdType GetNumberOfGenerationsFromBase(const char *type)
override;
Standard macros implementing standard VTK methods.
"""
...
def GetNumberOfGenerationsFromBaseType(self, string):
"""
V.GetNumberOfGenerationsFromBaseType(string) -> int
C++: static vtkIdType GetNumberOfGenerationsFromBaseType(
const char *type)
Standard macros implementing standard VTK methods.
"""
...
def IsA(self, string):
"""
V.IsA(string) -> int
C++: vtkTypeBool IsA(const char *type) override;
Standard macros implementing standard VTK methods.
"""
...
def IsTypeOf(self, string):
"""
V.IsTypeOf(string) -> int
C++: static vtkTypeBool IsTypeOf(const char *type)
Standard macros implementing standard VTK methods.
"""
...
def NewInstance(self):
"""
V.NewInstance() -> vtkPolyLineWidget
C++: vtkPolyLineWidget *NewInstance()
Standard macros implementing standard VTK methods.
"""
...
def SafeDownCast(self, vtkObjectBase):
"""
V.SafeDownCast(vtkObjectBase) -> vtkPolyLineWidget
C++: static vtkPolyLineWidget *SafeDownCast(vtkObjectBase *o)
Standard macros implementing standard VTK methods.
"""
...
def SetEnabled(self, p_int):
"""
V.SetEnabled(int)
C++: void SetEnabled(int enabling) override;
Override superclasses' SetEnabled() method because the line
widget must enable its internal handle widgets.
"""
...
def SetRepresentation(self, vtkPolyLineRepresentation):
"""
V.SetRepresentation(vtkPolyLineRepresentation)
C++: void SetRepresentation(vtkPolyLineRepresentation *r)
Specify an instance of vtkWidgetRepresentation used to represent
this widget in the scene. Note that the representation is a
subclass of vtkProp so it can be added to the renderer
independent of the widget.
"""
...
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
...
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
...
def __init__(self, *args, **kwargs) -> None:
...
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
...
def __repr__(self, *args, **kwargs):
""" Return repr(self). """
...
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
...
def __str__(self, *args, **kwargs) -> str:
""" Return str(self). """
...
__this__ = ...
__dict__ = ...
__vtkname__ = ...
|
import logging
import re
import networkx as nx
import matplotlib.pyplot as plt
import os
import pathlib
import copy
import pygraphviz
import math
from datetime import datetime
def add_root(G, maxnode):
"""
break the edge between the max node and next largest node connected to max node
insert a rootnode between the nodes
"""
rootnode = maxnode + 1
a = maxnode
b = maxnode - 1
while not G.has_edge(a, b):
b -= 1
G.remove_edge(a, b)
G.add_edge(rootnode, a)
G.add_edge(rootnode, b)
nx.set_node_attributes(G, {rootnode: 0}, name="leaf")
nx.set_node_attributes(G, {rootnode: ""}, name="char")
nx.set_node_attributes(G, {rootnode: 0}, name="tag")
def parsimony(G, leafnodes, maxnode):
"""
compute the min parsimony score for graph
"""
g_ref = copy.deepcopy(G)
graph_list = []
seq_sample = G.nodes[0]["char"]
score_sum = 0
# make a separate tree for each nucleotide position
for pos in range(len(seq_sample)):
GG = copy.deepcopy(G)
# update leaf node values
for node in leafnodes:
newchar = GG.nodes[node]["char"][pos]
nx.set_node_attributes(GG, {node: 1}, name="leaf")
nx.set_node_attributes(GG, {node: newchar}, name="char")
nx.set_node_attributes(GG, {node: 1}, name="tag")
dct = {}
for sym in ["A", "C", "G", "T"]:
dct[sym] = 1 if newchar != sym else 0
nx.set_node_attributes(GG, {node: dct}, name="score")
# run small parsimony on the tree
small_parsimony(GG)
# get the root symbol and score
root_char, root_score = min(GG.nodes[maxnode]["score"].items(), key=lambda x: x[1])
GG.nodes[maxnode]["char"] = root_char
# fill all internal node symbols
char_filler(GG, len(leafnodes), maxnode)
# store graph and the score
graph_list.append(GG)
score_sum += root_score
# make a single tree with entire sequence at each node
combine_g = combine_graphs(graph_list, maxnode)
return score_sum, combine_g
def combine_graphs(graph_list, maxnode):
"""
combine trees of single nucleotide nodes into a tree of sequence nodes
"""
GG = graph_list[0]
# combine chars in each tree to make a tree with sequences
for tree in graph_list[1:]:
for node in range(maxnode + 1):
GG.nodes[node]["char"] += tree.nodes[node]["char"]
return GG
def small_parsimony(G):
"""
complete the a partial small parsimony graph with only leaf sequences
"""
ripes = ripe_nodes(G)
while ripes:
v = ripes[0]
G.nodes[v]["tag"] = 1
nx.set_node_attributes(G, {v: score_builder(G, v)}, name="score")
ripes = ripe_nodes(G)
def score_builder(G, v):
"""
build score map for internal nodes
"""
v_score = {}
for v_sym in ["A", "C", "G", "T"]:
v_sco = 0
for child in G.successors(v):
min_sco = math.inf
for child_sym in ["A", "C", "G", "T"]:
alpha = 1 if v_sym != child_sym else 0
sco = G.nodes[child]["score"][child_sym] + alpha
min_sco = min(min_sco, sco)
v_sco += min_sco
v_score[v_sym] = v_sco
return v_score
def ripe_nodes(G):
"""
get all the ripe nodes (childs ar both tag=1)
"""
ripes = []
for n in G.nodes:
if not G.nodes[n]["leaf"] and G.nodes[n]["tag"] == 0:
succ = list(G.successors(n))
if sum([G.nodes[s]["tag"] for s in succ]) == len(succ):
ripes.append(n)
return ripes
def char_filler(G, n, start_node):
"""
adds a char to intermediate nodes
for each node, if char=parent_sym has lowest score, use parent_sym for the node's char
else use the char with the best score
"""
explore = list(G.successors(start_node))
while explore:
new_explore = []
for node in explore:
parent = list(G.predecessors(node))[0]
parent_sym = G.nodes[parent]["char"]
scores = G.nodes[node]["score"]
min_sco = min(scores.values())
if scores[parent_sym] == min_sco:
G.nodes[node]["char"] = parent_sym
else:
best_char, best_score = min(G.nodes[node]["score"].items(), key=lambda x: x[1])
G.nodes[node]["char"] = best_char
new_explore += G.successors(node)
explore = new_explore
def edge_maker(G):
"""
creates the edges in the format for submit, with each line as an item in the list
"""
string_edges = []
for edge in G.edges:
a, b = edge[0], edge[1]
dist = str(ham_dist(G.nodes[a]["char"], G.nodes[b]["char"]))
string_edges.append(G.nodes[a]["char"] + "->" + G.nodes[b]["char"] + ":" + dist)
string_edges.append(G.nodes[b]["char"] + "->" + G.nodes[a]["char"] + ":" + dist)
return sorted(string_edges)
def ham_dist(p, q):
"""
hamming distance is the number of mismatches between equal length sequences p and q
returns integer
"""
count = 0
for i in range(len(p)):
if p[i] != q[i]:
count += 1
return count
def draw_graph(G):
"""
draw graph G to file
"""
## draw graph boilerplate ##
# pos = nx.kamada_kawai_layout(G)
pos = nx.nx_agraph.graphviz_layout(G, prog="dot")
nx.draw(G, pos, with_labels=True)
pos_off = copy.deepcopy(pos)
for k in pos_off.keys():
pos_off[k] = (pos_off[k][0], pos_off[k][1] - 10)
# labels = nx.get_node_attributes(G, "char")
labels = nx.get_node_attributes(G, "score")
for key, val in labels.items():
labels[key] = str(list(val.values())) + "\n" + str(G.nodes[key]["char"])
node_labels = labels
nx.draw_networkx_labels(G, pos_off, labels=node_labels)
edge_labels = nx.get_edge_attributes(G, "dist")
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
p = os.path.join(pathlib.Path(__file__).parent.resolve(), "logs", "graph%s.png" % (datetime.now().strftime("%Y-%m-%dT%H%M%S.%f")[:-3]))
plt.savefig(p)
plt.clf()
## end graph ##
def g_nearest_neighbors(G, e1, e2):
"""
make graphs of the nearest neighbors of graph G at edge e1-e2
"""
print("edge", e1, e2)
e1n = G.neighbors(e1)
e2n = G.neighbors(e2)
list1 = [e for e in list(e1n) if e not in [e1, e2]]
list2 = [e for e in list(e2n) if e not in [e1, e2]]
glist = []
per = 0
for i, _ in enumerate(list1):
for j, _ in enumerate(list2):
per += 1
GG = copy.deepcopy(G)
GG.remove_edge(e1, list1[i])
GG.add_edge(e2, list1[i])
GG.remove_edge(e2, list2[j])
GG.add_edge(e1, list2[j])
glist.append(GG)
return glist
def get_internal_edges(G, leafnodes):
"""
get all internal edges
"""
all_edges = G.edges
internals = []
for edge in all_edges:
if len(leafnodes) == sum([i not in edge for i in leafnodes]):
internals.append(edge)
return internals
if __name__ == "__main__":
# open text file
with open("dataset_10336_8 (5).txt") as f:
data = f.read().splitlines()
n = int(data[0])
d = data[1:]
# clean up data
d = sorted(d)
d = d[:-n]
internals = []
leafs = []
for line in d:
has_letters = re.search("[a-zA-Z]", line)
if has_letters:
leafs.append(line)
continue
a, b = [int(c) for c in line.split("->")]
if a > b:
internals.append(line)
leafs.sort()
internals.sort()
# write out a sample sequence in the file
seq_sample = leafs[1].split("->")[1]
print("seq_sample", seq_sample)
# make graphs
graph_list = []
score_sum = 0
maxnode = int(internals[-1].split("->")[0]) # highest number node
# build the starting graph
G = nx.DiGraph()
# for leaf nodes, add to graph node number, edge to parent, char
leafnodes = []
for node, line in enumerate(leafs):
node = int(node)
leafnodes.append(node)
parent, sequence = line.split("->")
parent = int(parent)
G.add_node(node)
char = sequence
nx.set_node_attributes(G, {node: 1}, name="leaf")
nx.set_node_attributes(G, {node: char}, name="char")
nx.set_node_attributes(G, {node: 1}, name="tag")
dct = {}
for sym in ["A", "C", "G", "T"]:
dct[sym] = 1 if char != sym else 0
nx.set_node_attributes(G, {node: dct}, name="score")
G.add_edge(parent, node)
# for non-leaf, add nodes and edges to graph, add blank char
for line in internals:
a, b = line.split("->")
a, b = int(a), int(b)
G.add_edge(a, b)
nx.set_node_attributes(G, {a: 0, b: 0}, name="leaf")
nx.set_node_attributes(G, {a: "", b: ""}, name="char")
nx.set_node_attributes(G, {a: 0, b: 0}, name="tag")
# loop through graphs
best_score = 2 ** 32
this_score = 2 ** 32 - 1
edges_collection = []
epoch = 0
while best_score > this_score:
epoch += 1
print("epoch", epoch)
best_score = this_score
# create all nearest neighor interchange
graph_neighbors = [G]
for edge in get_internal_edges(G, leafnodes):
e1, e2 = edge
graph_neighbors += g_nearest_neighbors(G, e1, e2)
# modify graphs to add root node
for G in graph_neighbors:
add_root(G, maxnode)
maxnode += 1 # maxnode is root node
ith_g = -1
min_gscore = math.inf
G_save = None
# run through each graph and save best score
for i, G in enumerate(graph_neighbors):
g_score, combine_g = parsimony(G, leafnodes, maxnode)
if g_score < min_gscore:
ith_g = i
G_save = combine_g
min_gscore = g_score
print("ith graph", i, "g_score", g_score)
print("best graph", ith_g, "min_gscore", min_gscore)
# remove extra node
a, b = G_save.successors(maxnode)
G_save.remove_edge(maxnode, a)
G_save.remove_edge(maxnode, b)
G_save.add_edge(max(a, b), min(a, b))
G_save.remove_node(maxnode)
maxnode -= 1
# add to results
draw_graph(G_save)
edges_collection.append([str(min_gscore)] + edge_maker(G_save))
# reset graph info
for node in G_save.nodes:
if not G_save.nodes[node]["leaf"]:
G_save.nodes[node]["tag"] = 0
G_save.nodes[node]["char"] = 0
G_save.nodes[node]["score"] = {}
G = G_save
this_score = min_gscore
## write out result file
with open("dataset_10336_8out.txt", "w") as f:
for string_edges in edges_collection[:-1]:
for line in string_edges:
f.write(line + "\n")
f.write("\n")
|
# Generated by Django 2.0.6 on 2018-10-25 20:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moretvtime', '0008_auto_20181024_2328'),
]
operations = [
migrations.CreateModel(
name='TrackingSummary',
fields=[
],
options={
'verbose_name': 'TrackingSummary',
'verbose_name_plural': 'Tracking Summary',
'proxy': True,
'indexes': [],
},
bases=('moretvtime.tracking',),
),
migrations.AlterModelOptions(
name='tracking',
options={'ordering': ['-date']},
),
migrations.AddIndex(
model_name='tracking',
index=models.Index(fields=['sub_id', '-date'], name='moretvtime__sub_id_877007_idx'),
),
migrations.AddIndex(
model_name='tracking',
index=models.Index(fields=['ip', '-date'], name='moretvtime__ip_583604_idx'),
),
migrations.AddIndex(
model_name='tracking',
index=models.Index(fields=['sign', '-date'], name='moretvtime__sign_465d9d_idx'),
),
]
|
from flask import Flask, render_template, request, redirect
from flask import Blueprint
from models.booking import Booking
import repositories.booking_repository as booking_repository
import repositories.member_repository as member_repository
import repositories.yogaclass_repository as yogaclass_repository
bookings_blueprint = Blueprint("bookings", __name__)
@bookings_blueprint.route("/bookings/successful")
def booking_successful():
return render_template("bookings/successful.html", title="Booking Succesful!")
@bookings_blueprint.route("/bookings/unsuccessful")
def booking_unsuccessful():
return render_template("bookings/unsuccessful.html", title="Class is Full!")
@bookings_blueprint.route("/bookings/new", methods=['GET'])
def new_booking():
members = member_repository.select_all()
yogaclasses = yogaclass_repository.select_all()
return render_template("bookings/new.html", title="Make a Booking", all_members = members, all_yogaclasses = yogaclasses)
@bookings_blueprint.route("/bookings", methods=['POST'])
def create_booking():
member_id = request.form['member_id']
yogaclass_id = request.form['yogaclass_id']
member = member_repository.select(member_id)
yogaclass = yogaclass_repository.select(yogaclass_id)
members = yogaclass_repository.members(yogaclass)
booking = Booking(member, yogaclass)
yogaclass.check_if_capacity(members)
if yogaclass.available == True:
booking_repository.save(booking)
return redirect('/bookings/successful')
else:
return redirect('/bookings/unsuccessful')
@bookings_blueprint.route("/bookings/<id>/delete", methods=['POST'])
def delete_booking(id):
yogaclass_id = booking_repository.select(id).yogaclass.id
booking_repository.delete(id)
return redirect(f'/yogaclasses/{yogaclass_id}')
|
import platform
import sys
import os
dir = os.path.dirname(os.path.realpath(__file__ ))
sys.path.append(dir)
from robots import Wikipedia
from robots import Diretorios
from robots import Text_Robots
from robots import Docx
if __name__ == '__main__':
def clear():
so = platform.system()
if so == 'Windows':
os.system('cls')
elif so == 'Linux':
os.system('clear')
else:
print('Sistema operacional não suportado')
sys.exit()
clear()
print('''
========================WikiText========================
Versão: 1.4
Autor: Raphael Nascimento
ID: Nask!
Notas: Para sair digite 'exit' ou selecione a opção 4.
''')
termo = input('Digite um termo para o Wikipedia: ')
if termo == 'exit' or termo == 'sair':
print('Finalizando')
sys.exit()
print()
wiki = Wikipedia.Wikipedia(termo)
if wiki.searchs() is True:
possibleSearchs = wiki.search()
print('Escolha um item para ser pesquisado:')
for index, item in enumerate(possibleSearchs): # Itero minha lista para enumerar
print(index + 1, item) # Escrevo o conteudo com uma soma para nao aparecer o valor 0
choice = input('>> ')
if choice == '!EXIT' or choice == '!SAIR':
print('FINISH')
sys.exit()
else:
termo = possibleSearchs[int(choice) - 1]
wiki.page(termo)
dire = Diretorios.start(termo)
content = wiki.content()
robot = Text_Robots.TextRobots()
robot.write(content, dire + wiki.title())
texto = robot.formatarTexto(dire + wiki.title(), wiki.references())
robot.resumir(texto, dire + wiki.title())
lista = robot.read(dire + wiki.title())
doc = Docx.Docx()
doc.docx(lista, dire, wiki.title())
robot.apagar(dire + wiki.title())
print('\nFINISH') |
#!/usr/bin/python
import os
import urllib2
import numpy as np
import theano
import theano.tensor as T
import pdb
#from ipdb import set_trace
def download_embeddings(embbeding_name, target_file):
'''
Downloads file through http with progress report
Obtained in stack overflow:
http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http
-using-python
'''
# Embedding download URLs
if embbeding_name == 'senna_50':
# senna_50 embeddings
source_url = 'http://lxmls.it.pt/2015/wp-content/uploads/2015/senna_50'
else:
raise ValueError, ("I do not have embeddings %s for download"
% embbeding_name)
target_file_name = os.path.basename('data/senna_50')
u = urllib2.urlopen(source_url)
with open(target_file, 'wb') as f:
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
file_size_dl = 0
block_sz = 8192
print "Downloading: %s Bytes: %s" % (target_file_name, file_size)
while True:
text_buffer = u.read(block_sz)
if not text_buffer:
break
file_size_dl += len(text_buffer)
f.write(text_buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl,
file_size_dl*100./file_size)
status = status + chr(8)*(len(status)+1)
print status,
print ""
def extract_embeddings(embedding_path, word_dict):
'''
Given embeddings in text form and a word dictionary construct embedding
matrix. Words with no embedding get initialized to random.
'''
with open(embedding_path) as fid:
for i, line in enumerate(fid.readlines()):
# Initialize
if i == 0:
N = len(line.split()[1:])
E = np.random.uniform(size=(N, len(word_dict)))
n = 0
word = line.split()[0].lower()
if word[0].upper() + word[1:] in word_dict:
idx = word_dict[word[0].upper() + word[1:]]
E[:, idx] = np.array(line.strip().split()[1:]).astype(float)
n += 1
elif word in word_dict:
idx = word_dict[word]
E[:, idx] = np.array(line.strip().split()[1:]).astype(float)
n += 1
print "\rGetting embeddings for the vocabulary %d/%d" % (n, len(word_dict)),
OOV_perc = (1-n*1./len(word_dict))*100
print "\n%2.1f%% OOV, missing embeddings set to random" % OOV_perc
return E
class NumpyRNN():
def __init__(self, W_e, n_hidd, n_tags, seed=None):
'''
E numpy.array Word embeddings of size (n_emb, n_words)
n_hidd int Size of the recurrent layer
n_tags int Total number of tags
seed int Seed to random initialization of parameters (default=None)
'''
# Dimension of the embeddings
n_emb = W_e.shape[0]
# MODEL PARAMETERS
np.random.seed(seed)
W_x = np.random.uniform(size=(n_hidd, n_emb)) # Input layer
W_h = np.random.uniform(size=(n_hidd, n_hidd)) # Recurrent layer
W_y = np.random.uniform(size=(n_tags, n_hidd)) # Output layer
# Class variables
self.n_hidd = n_hidd
self.param = [W_e, W_x, W_h, W_y]
#self.param_names = ['W_e', 'W_x', 'W_h', 'W_y']
self.activation_function = 'logistic' # 'tanh' 'relu' 'logistic'
def apply_activation(self, x, function_name):
'''
'''
if function_name == 'logistic':
z = 1 / (1 + np.exp(-x))
elif function_name == 'tanh':
z = np.tanh(x)
elif function_name == 'relu':
z = x
ind = np.where(z < 0.)
z[ind] = 0.
else:
raise NotImplementedError
return z
def derivate_activation(self, z, function_name):
'''
'''
if function_name == 'logistic':
dx = z * (1. - z)
elif function_name == 'tanh':
dx = (1. - z * z)
elif function_name == 'relu':
dx = (np.sign(z)+1)/2.
else:
raise NotImplementedError
#pdb.set_trace()
return dx
def soft_max(self, x, alpha=1.0):
'''
'''
e = np.exp(x / alpha)
return e / np.sum(e)
def forward(self, x, allOuts=False, outputs=[]):
'''
Forward pass
allOuts = True return intermediate activations; needed to comput backpropagation
'''
# Get parameters in nice form
W_e, W_x, W_h, W_y = self.param
z1, h, y, p, p_y = {}, {}, {}, {}, {}
h[-1] = np.zeros(self.n_hidd)
loss = 0.
for t in xrange(len(x)):
z1[t] = W_e[:, x[t]].T
h[t] = self.apply_activation( W_x.dot(z1[t]) + W_h.dot(h[t-1]),
self.activation_function)
y[t] = W_y.dot(h[t])
ymax = max(y[t])
logsum = ymax + np.log(sum(np.exp(y[t]-ymax)))
p[t] = np.exp(y[t] - logsum)
p_y[t] = p[t] / np.sum(p[t]) ##
# # Annother way of computing p_y[t]
# p_y[t] = self.soft_max(y[t])
if outputs:
loss += -np.log(p_y[t][outputs[t]]) # Cross-entropy loss.
loss = loss/len(x) # Normalize to get the mean
if allOuts:
return loss, p_y, p, y, h, z1, x
else:
return p_y
def grads(self, x, outputs):
'''
Compute gradientes, with the back-propagation method
inputs:
x: vector with the (embedding) indicies of the words of a sentence
outputs: vector with the indicies of the tags for each word of the sentence
outputs:
nabla_params: vector with parameters gradientes
'''
# Get parameters
W_e, W_x, W_h, W_y = self.param
loss, p_y, p, y, h, z1, x = self.forward(x, allOuts=True, outputs=outputs)
# Initialize gradients with zero entrances
nabla_W_e = np.zeros(W_e.shape)
nabla_W_x = np.zeros(W_x.shape)
nabla_W_h = np.zeros(W_h.shape)
nabla_W_y = np.zeros(W_y.shape)
# backward pass, with gradient computation
dh_next = np.zeros_like(h[0])
for t in reversed(xrange(len(x))):
dy = np.copy(p[t])
dy[outputs[t]] -= 1. # backprop into y (softmax grad).
nabla_W_y += dy[:,None].dot(h[t][None,:])
dh = W_y.T.dot(dy) + dh_next # backprop into h.
# backprop through nonlinearity.
dh_raw = self.derivate_activation(h[t], self.activation_function) * dh
nabla_W_h += dh_raw[:,None].dot(h[t-1][None,:])
nabla_W_x += dh_raw[:,None].dot(z1[t][None,:])
d_z1 = W_x.T.dot(dh_raw)
nabla_W_e[:,x[t]] += d_z1
dh_next = W_h.T.dot(dh_raw)
# Normalize to be in agrement with the loss
nabla_params = [nabla_W_e/len(x), nabla_W_x/len(x), nabla_W_h/len(x), nabla_W_y/len(x)]
return nabla_params
def save(self, model_path):
'''
Save model
'''
pass
# par = self.params + self.actvfunc
# with open(model_path, 'wb') as fid:
# cPickle.dump(par, fid, cPickle.HIGHEST_PROTOCOL)
def load(self, model_path):
'''
Load model
'''
pass
# with open(model_path) as fid:
# par = cPickle.load(fid, cPickle.HIGHEST_PROTOCOL)
# params = par[:len(par)/2]
# actvfunc = par[len(par)/2:]
# return params, actvfunc
class RNN():
def __init__(self, W_e, n_hidd, n_tags, seed=None):
'''
E numpy.array Word embeddings of size (n_emb, n_words)
n_hidd int Size of the recurrent layer
n_tags int Total number of tags
'''
# Dimension of the embeddings
n_emb = W_e.shape[0]
# MODEL PARAMETERS
np.random.seed(seed)
W_x = np.random.uniform(size=(n_hidd, n_emb)) # Input layer
W_h = np.random.uniform(size=(n_hidd, n_hidd)) # Recurrent layer
W_y = np.random.uniform(size=(n_tags, n_hidd)) # Output layer
# Cast to theano GPU-compatible type
W_e = W_e.astype(theano.config.floatX)
W_x = W_x.astype(theano.config.floatX)
W_h = W_h.astype(theano.config.floatX)
W_y = W_y.astype(theano.config.floatX)
# Store as shared parameters
_W_e = theano.shared(W_e, borrow=True)
_W_x = theano.shared(W_x, borrow=True)
_W_h = theano.shared(W_h, borrow=True)
_W_y = theano.shared(W_y, borrow=True)
# Class variables
self.n_hidd = n_hidd
self.param = [_W_e, _W_x, _W_h, _W_y]
def _forward(self, _x, _h0=None):
# Default initial hidden is allways set to zero
if _h0 is None:
h0 = np.zeros((1, self.n_hidd)).astype(theano.config.floatX)
_h0 = theano.shared(h0, borrow=True)
# COMPUTATION GRAPH
# Get parameters in nice form
_W_e, _W_x, _W_h, _W_y = self.param
# NOTE: Since _x contains the indices rather than full one-hot vectors,
# use _W_e[:, _x].T instead of T.dot(_x, _W_e.T)
###########################
# Solution to Exercise 6.3
# Embedding layer
_z1 = _W_e[:, _x].T
# This defines what to do at each step
def rnn_step(_x_tm1, _h_tm1, _W_x, W_h):
return T.nnet.sigmoid(T.dot(_x_tm1, _W_x.T) + T.dot(_h_tm1, W_h.T))
# This creates the variable length computation graph (unrols the rnn)
_h, updates = theano.scan(fn=rnn_step,
sequences=_z1,
outputs_info=dict(initial=_h0),
non_sequences=[_W_x ,_W_h])
# Remove intermediate empty dimension
_z2 = _h[:,0,:]
# End of solution to Exercise 6.3
###########################
# Output layer
_p_y = T.nnet.softmax(T.dot(_z2, _W_y.T))
return _p_y
class LSTM():
def __init__(self, W_e, n_hidd, n_tags):
# Dimension of the embeddings
n_emb = W_e.shape[0]
# MODEL PARAMETERS
W_x = np.random.uniform(size=(4*n_hidd, n_emb)) # RNN Input layer
W_h = np.random.uniform(size=(4*n_hidd, n_hidd)) # RNN recurrent var
W_c = np.random.uniform(size=(3*n_hidd, n_hidd)) # Second recurrent var
W_y = np.random.uniform(size=(n_tags, n_hidd)) # Output layer
# Cast to theano GPU-compatible type
W_e = W_e.astype(theano.config.floatX)
W_x = W_x.astype(theano.config.floatX)
W_h = W_h.astype(theano.config.floatX)
W_c = W_c.astype(theano.config.floatX)
W_y = W_y.astype(theano.config.floatX)
# Store as shared parameters
_W_e = theano.shared(W_e, borrow=True)
_W_x = theano.shared(W_x, borrow=True)
_W_h = theano.shared(W_h, borrow=True)
_W_c = theano.shared(W_c, borrow=True)
_W_y = theano.shared(W_y, borrow=True)
# Class variables
self.n_hidd = n_hidd
self.param = [_W_e, _W_x, _W_h, _W_c, _W_y]
def _forward(self, _x, _h0=None, _c0=None):
# Default initial hidden is allways set to zero
if _h0 is None:
h0 = np.zeros((1, self.n_hidd)).astype(theano.config.floatX)
_h0 = theano.shared(h0, borrow=True)
if _c0 is None:
c0 = np.zeros((1, self.n_hidd)).astype(theano.config.floatX)
_c0 = theano.shared(c0, borrow=True)
# COMPUTATION GRAPH
# Get parameters in nice form
_W_e, _W_x, _W_h, _W_c, _W_y = self.param
H = self.n_hidd
# Embedding layer
_z1 = _W_e[:, _x].T
# Per loop operation
def _step(_x_tm1, _h_tm1, _c_tm1, _W_x, _W_h, _W_c):
# LINEAR TRANSFORMS
# Note that all transformations per variable are stacked for
# efficiency each individual variable is then selected using slices
# of H size (see below)
_z_x = T.dot(_x_tm1, _W_x.T)
_z_h = T.dot(_h_tm1, _W_h.T)
_z_c = T.dot(_c_tm1, _W_c.T)
# GATES
# Note the subtlety: _x_tm1 and hence _z_x are flat and have size
# (H,) _h_tm1 and _c_tm1 are not and thus have size (1, H)
_i_t = T.nnet.sigmoid(_z_x[:H] +_z_h[:, :H] +_z_c[:, :H])
_f_t = T.nnet.sigmoid(_z_x[H:2*H] +_z_h[:, H:2*H] +_z_c[:, H:2*H])
_o_t = T.nnet.sigmoid(_z_x[3*H:4*H] +_z_h[:, 3*H:4*H] +_z_c[:, 2*H:3*H])
# HIDDENS
_c_t = _f_t*_c_tm1 + _i_t*T.tanh(_z_x[2*H:3*H] +_z_h[:, 2*H:3*H])
_h_t = _o_t*T.tanh(_c_t)
return _h_t, _c_t
# Unrol the loop
_h, updates = theano.scan(_step,
sequences=_z1,
outputs_info=[_h0, _c0],
non_sequences=[_W_x, _W_h, _W_c])
# Just keep the first hidden, remove intermediate empty dimension
_z2 = _h[0][:, 0, :]
# Output layer
_p_y = T.nnet.softmax(T.dot(_z2, _W_y.T))
return _p_y
|
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from gym.spaces import Dict
from rllab.misc.instrument import VariantGenerator
import rlkit.torch.pytorch_util as ptu
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger, set_seed
from rlkit.envs import get_meta_env, get_meta_env_params_iters
from rlkit.torch.irl.disc_models.airl_disc import StandardMetaDisc
# from rlkit.torch.irl.disc_models.airl_disc import ThirdVersionSingleColorFetchCustomDisc
# from rlkit.torch.irl.disc_models.airl_disc import TransferVersionSingleColorFetchCustomDisc
from rlkit.torch.irl.policy_optimizers.sac import NewSoftActorCritic
from rlkit.torch.sac.policies import ReparamTanhMultivariateGaussianPolicy
from rlkit.torch.networks import FlattenMlp
from rlkit.torch.irl.meta_fairl import MetaFAIRL
from rlkit.torch.irl.encoders.mlp_encoder import TimestepBasedEncoder
# from rlkit.torch.irl.encoders.conv_trivial_encoder import TrivialTrajEncoder, TrivialR2ZMap, TrivialNPEncoder
# from rlkit.torch.irl.encoders.trivial_encoder import TrivialTrajEncoder, TrivialR2ZMap, TrivialNPEncoder
import yaml
import argparse
import importlib
import psutil
import os
from os import path
import argparse
import joblib
from time import sleep
EXPERT_LISTING_YAML_PATH = '/h/kamyar/oorl_rlkit/rlkit/torch/irl/experts.yaml'
def experiment(variant):
with open(EXPERT_LISTING_YAML_PATH, 'r') as f:
listings = yaml.load(f.read())
expert_dir = listings[variant['expert_name']]['exp_dir']
specific_run = listings[variant['expert_name']]['seed_runs'][variant['expert_seed_run_idx']]
file_to_load = path.join(expert_dir, specific_run, 'extra_data.pkl')
extra_data = joblib.load(file_to_load)
# this script is for the non-meta-learning AIRL
train_context_buffer, train_test_buffer = extra_data['meta_train']['context'], extra_data['meta_train']['test']
test_context_buffer, test_test_buffer = extra_data['meta_test']['context'], extra_data['meta_test']['test']
# set up the envs
env_specs = variant['env_specs']
meta_train_env, meta_test_env = get_meta_env(env_specs)
meta_train_env.seed(variant['seed'])
meta_test_env.seed(variant['seed'])
# set up the policy and training algorithm
if isinstance(meta_train_env.observation_space, Dict):
if variant['algo_params']['policy_uses_pixels']:
raise NotImplementedError('Not implemented pixel version of things!')
else:
obs_dim = int(np.prod(meta_train_env.observation_space.spaces['obs'].shape))
else:
obs_dim = int(np.prod(meta_train_env.observation_space.shape))
action_dim = int(np.prod(meta_train_env.action_space.shape))
print('obs dim: %d' % obs_dim)
print('act dim: %d' % action_dim)
sleep(3)
# make the disc model
if variant['algo_params']['state_only']: print('\n\nUSING STATE ONLY DISC\n\n')
disc_model = StandardMetaDisc(
2*obs_dim + action_dim + variant['algo_params']['z_dim'] if not variant['algo_params']['state_only'] else 2*obs_dim + variant['algo_params']['z_dim'],
num_layer_blocks=variant['disc_num_blocks'],
hid_dim=variant['disc_hid_dim'],
hid_act=variant['disc_hid_act'],
use_bn=variant['disc_use_bn'],
clamp_magnitude=variant['disc_clamp_magnitude']
)
print(disc_model)
print(disc_model.clamp_magnitude)
if variant['algo_params']['use_target_disc']:
target_disc = disc_model.copy()
else:
target_disc = None
print(disc_model)
print(disc_model.clamp_magnitude)
z_dim = variant['algo_params']['z_dim']
policy_net_size = variant['policy_net_size']
hidden_sizes = [policy_net_size] * variant['num_hidden_layers']
qf1 = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim + z_dim,
output_size=1,
)
qf2 = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim + z_dim,
output_size=1,
)
vf = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + z_dim,
output_size=1,
)
policy = ReparamTanhMultivariateGaussianPolicy(
hidden_sizes=hidden_sizes,
obs_dim=obs_dim + z_dim,
action_dim=action_dim,
)
policy_optimizer = NewSoftActorCritic(
policy=policy,
qf1=qf1,
qf2=qf2,
vf=vf,
wrap_absorbing=variant['algo_params']['wrap_absorbing'],
**variant['policy_params']
)
# make the encoder
encoder = TimestepBasedEncoder(
2*obs_dim + action_dim, #(s,a,s')
variant['algo_params']['r_dim'],
variant['algo_params']['z_dim'],
variant['algo_params']['enc_hid_dim'],
variant['algo_params']['r2z_hid_dim'],
variant['algo_params']['num_enc_layer_blocks'],
hid_act='relu',
use_bn=True,
)
train_task_params_sampler, test_task_params_sampler = get_meta_env_params_iters(env_specs)
algorithm = MetaFAIRL(
meta_test_env, # env is the test env, training_env is the training env (following rlkit original setup)
policy,
disc_model,
train_context_buffer,
train_test_buffer,
test_context_buffer,
test_test_buffer,
encoder,
policy_optimizer,
training_env=meta_train_env, # the env used for generating trajectories
train_task_params_sampler=train_task_params_sampler,
test_task_params_sampler=test_task_params_sampler,
target_disc=target_disc,
**variant['algo_params']
)
if ptu.gpu_enabled():
algorithm.cuda()
algorithm.train()
return 1
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--experiment', help='experiment specification file')
args = parser.parse_args()
with open(args.experiment, 'r') as spec_file:
spec_string = spec_file.read()
exp_specs = yaml.load(spec_string)
if exp_specs['use_gpu']:
print('\n\nUSING GPU\n\n')
ptu.set_gpu_mode(True)
exp_id = exp_specs['exp_id']
exp_prefix = exp_specs['exp_name']
seed = exp_specs['seed']
set_seed(seed)
setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)
experiment(exp_specs)
|
import sys
inputs = sys.stdin.readline().split()
a = int(inputs[0])
b = int(inputs[1])
if (b == 1):
print a
exit()
div = a / b
result = div
a = a - div * b
while (True):
b = b - a
result += 1
if (b == 1):
result += a/b
break
div = a / b
result += div
a = a - div * b
print result
|
# -*- coding: utf-8 -*-
"""
Kakao Hangul Analyzer III
__version__ = '0.4'
__author__ = 'Kakao Corp.'
__copyright__ = 'Copyright (C) 2018-, Kakao Corp. All rights reserved.'
__license__ = 'Apache 2.0'
__maintainer__ = 'Jamie'
__email__ = 'jamie.lim@kakaocorp.com'
"""
###########
# imports #
###########
from distutils.command.build import build
import os
import shutil
import subprocess
import zipfile
from setuptools import setup
#############
# constants #
#############
_SRC_NAME = 'khaiii-0.4'
#########
# types #
#########
class CustomBuild(build):
"""
custom handler for 'build' command
"""
def run(self):
"""
run build command
"""
with zipfile.ZipFile('{}.zip'.format(_SRC_NAME), 'r') as src_zip:
src_zip.extractall()
build_dir = '{}/build'.format(_SRC_NAME)
os.makedirs(build_dir, exist_ok=True)
subprocess.check_call('cmake ..', cwd=build_dir, shell=True)
subprocess.check_call('make all resource', cwd=build_dir, shell=True)
shutil.rmtree('khaiii/lib', ignore_errors=True)
shutil.copytree('{}/lib'.format(build_dir), 'khaiii/lib')
shutil.rmtree('khaiii/share', ignore_errors=True)
shutil.copytree('{}/share'.format(build_dir), 'khaiii/share')
shutil.rmtree(_SRC_NAME)
build.run(self)
#############
# functions #
#############
#def readme():
# """
# read content from README.md file
# Returns:
# long description (content of README.md)
# """
# return open('./README.md', 'r', encoding='UTF-8').read()
#########
# setup #
#########
setup(
name='khaiii',
version='0.4',
description='Kakao Hangul Analyzer III',
#long_description=readme(),
url='https://github.com/kakao/khaiii',
author='Kakao Corp.',
author_email='jamie.lim@kakaocorp.com',
classifiers=[
'Development Status :: 5 - Stable',
'License :: OSI Approved :: Apache 2.0',
'Programming Language :: Python :: 3',
],
license='Apache 2.0',
packages=['khaiii', ],
include_package_data=True,
install_requires=[],
setup_requires=['pytest-runner', ],
tests_require=['pytest', ],
zip_safe=False,
cmdclass={'build': CustomBuild}
)
|
import PySimpleGUI as sg
from operator import itemgetter
from src.enums.guiState import GuiState
from src.enums.modelType import ModelType
from src.enums.scalerType import ScalerType
from src.helpers.datasetHelper import DatasetHelper
from src.helpers.plotterHelper import PlotterHelper
from src.models.dataset import Dataset
from src.models.model import Model
from src.models.scaler import Scaler
from src.store import Store
from src.helpers.randomHelper import RandomHelper
class Gui:
_store = Store()
_window = None
_state = GuiState.GenerateData
_models = [{"key": "model" + str(x.value), "value": x}
for x in ModelType]
_selectedModel = ModelType.LinearRegression
_subscriptMap = {
"0": "₀",
"1": "₁",
"2": "₂",
"3": "₃",
"4": "₄",
"5": "₅",
"6": "₆",
"7": "₇",
"8": "₈",
"9": "₉",
"10": "₁₀",
"11": "₁₁",
"12": "₁₂",
"13": "₁₃",
"14": "₁₄",
"15": "₁₅",
"16": "₁₆",
"17": "₁₇",
"18": "₁₈",
"19": "₁₉",
"20": "₂₀",
}
def start(self):
sg.theme('DarkBlue')
self._window = sg.Window('Regression Benchmark', self.createLayout(),
icon="./assets/ico.ico", finalize=True)
self._window.read(timeout=10)
self._window['numberOfVariables'].update(self._store.numberOfVariables)
self._window['maxCoeff'].update(self._store.maxCoeff)
self._window['maxExp'].update(self._store.maxExp)
self._window['minValue'].update(self._store.minValue)
self._window['maxValue'].update(self._store.maxValue)
self._window['numberOfSamples'].update(self._store.numberOfSamples)
self.updateDisplayedModel(self._selectedModel)
while True: # Event Loop
event, values = self._window.read(timeout=10)
if event in (None, 'Exit'): # exits event loop
break
if event == 'updateValues':
self.updateValues(values)
if event == 'randomizeValues':
self.randomizeValues()
self.updateDisplayedValues()
if event == 'updateValues' or event == 'randomizeValues':
self._window['action'].update("Generate data")
self._window['action'].update(visible=True)
self._window['runall'].update(visible=True)
self._state = GuiState.GenerateData
if event == 'runall':
self.updateValues(values)
self._store.resetDatasetAndModel()
self._state = GuiState.GenerateData
self._window['action'].update("Generate data")
self.runAllModels()
if event == 'action':
if self._state == GuiState.GenerateData:
self._window['action'].update("Generating...")
self.generateData()
self.createDataset()
self._window['action'].update("Train Model")
self._state = GuiState.TrainModel
elif self._state == GuiState.TrainModel:
self._window['action'].update("Training...")
self.trainModel()
self._window['action'].update("Test")
self._state = GuiState.Test
elif self._state == GuiState.Test:
self._window['action'].update("Testing...")
self._window['score'].update(self.testModel())
self._window['action'].update("Show output")
self._state = GuiState.Output
else:
self.showOutput()
self._window.close()
def createLayout(self):
return [
[
sg.Text('Maximum coefficient:', size=(20, 1)),
sg.Input(size=(20, 1), key='maxCoeff'),
sg.Text(size=(20, 1)),
sg.Text('Maximum exponent:', size=(20, 1)),
sg.Input(size=(20, 1), key='maxExp'),
sg.Text(size=(30, 1)),
sg.Button(size=(20, 1), button_color=("white", "black"),
button_text="Update", key='updateValues'),
sg.Button(size=(20, 1), button_color=("white", "black"),
button_text="Randomize", key='randomizeValues')
],
[
sg.Text('Domain minimum value:', size=(20, 1)),
sg.Input(size=(20, 1), key='minValue'),
sg.Text(size=(20, 1)),
sg.Text('Domain maximum value:', size=(20, 1)),
sg.Input(size=(20, 1), key='maxValue'),
sg.Text(size=(20, 1)),
sg.Text('Number of samples:', size=(20, 1)),
sg.Input(size=(20, 1), key='numberOfSamples')
],
[
sg.Text('Regression model:', size=(20, 1))
] + [
sg.Radio(model.name, "radio_group1", key="model" + str(model.value)) for model in ModelType
],
[
sg.Text('Number of variables:', size=(20, 1)),
sg.Input(size=(20, 1), key='numberOfVariables'),
],
self.createParameterLayout('coeff', 'Coefficients:'),
self.createParameterLayout('exp', 'Exponents:'),
[
sg.Text('Resulting function:', size=(20, 2)),
sg.Text('', key='resultingFunction', size=(120, 2))
],
[
sg.Text('Regression score:', size=(20, 2)),
sg.Text('', key='score', size=(120, 2))
],
[
sg.Text(size=(77, 1)),
sg.Text(size=(77, 1)),
sg.Col([
[
sg.Button(size=(20, 1), button_color=(
"white", "black"), button_text="Auto run all models", key='runall', visible=False),
sg.Button(size=(20, 1), button_color=(
"white", "black"), button_text="Start", key='action', visible=False)
]
])
]
]
# Workaround for tkinker bug that missaligns invisible items
@staticmethod
def inputColumn(*args, **kwargs):
return sg.Col([[sg.Input(*args, **kwargs)]], pad=(0, 0))
def createParameterLayout(self, parameterName, title):
arr = [self.inputColumn(size=(7, 1), key=f'{parameterName}{x}', visible=False)
for x in range(self._store.maxNumberOfVariables)]
arr.insert(0, sg.Text(title, size=(20, 1),
key=parameterName, visible=True))
return arr
def updateValues(self, values):
try:
temp = int(values['numberOfVariables'])
if (temp != self._store.numberOfVariables):
self._store.numberOfVariables = temp
if (self._store.numberOfVariables > self._store.maxNumberOfVariables):
self._store.numberOfVariables = self._store.maxNumberOfVariables
elif self._store.numberOfVariables < self._store.minNumberOfVariables:
self._store.numberOfVariables = self._store.minNumberOfVariables
except:
self._store.numberOfVariables = self._store.minNumberOfVariables
finally:
self.updateParameterValues('coeff', values)
self.updateParameterValues('exp', values)
self.updateDisplayedValues()
try:
self._store.maxCoeff = float(values['maxCoeff'])
except:
self._window['maxCoeff'].update(self._store.maxCoeff)
try:
self._store.maxExp = float(values['maxExp'])
except:
self._window['maxExp'].update(self._store.maxExp)
try:
self._store.minValue = int(values['minValue'])
except:
self._window['minValue'].update(self._store.minValue)
try:
self._store.maxValue = int(values['maxValue'])
except:
self._window['maxValue'].update(self._store.maxValue)
try:
self._store.numberOfSamples = int(values['numberOfSamples'])
except:
self._window['numberOfSamples'].update(self._store.numberOfSamples)
for model in self._models:
if values[model["key"]]:
self._selectedModel = model["value"]
def updateDisplayedModel(self, value):
for model in self._models:
if model["value"] == value:
self._window[model["key"]].update(True)
else:
self._window[model["key"]].update(False)
def updateDisplayedValues(self):
self._window['numberOfVariables'].update(self._store.numberOfVariables)
self.updateParameterVisibility('coeff')
self.updateParameterVisibility('exp')
self.updateDisplayedParameterValues('exp')
self.updateDisplayedParameterValues('coeff')
self.updateResultingFunction()
def updateDisplayedParameterValues(self, parameterName):
for x in range(self._store.maxNumberOfVariables):
self._window[f'{parameterName}{x}'].update(
self._store.parametersArr[x][parameterName])
def updateParameterVisibility(self, parameterName):
for x in range(self._store.numberOfVariables):
self._window[f'{parameterName}{x}'].update(visible=True)
for x in range(self._store.numberOfVariables, self._store.maxNumberOfVariables):
self._window[f'{parameterName}{x}'].update(visible=False)
def updateParameterValues(self, parameterName, values):
for x in range(self._store.maxNumberOfVariables):
try:
self._store.parametersArr[x][parameterName] = int(
values[f'{parameterName}{x}'])
except ValueError:
try:
self._store.parametersArr[x][parameterName] = float(
values[f'{parameterName}{x}'])
except:
self._window[f'{parameterName}{x}'].update(1)
def resultingFunction(self):
resultingFunction = ''
for x in range(self._store.numberOfVariables):
coeff = self._store.parametersArr[x]['coeff']
if coeff > 0 and x > 0:
resultingFunction += ' + '
resultingFunction += f'{coeff}X{self._subscriptMap[f"{x}"]}^'
exp = self._store.parametersArr[x]['exp']
if exp < 0:
resultingFunction += '-'
resultingFunction += f'{exp}'
self._store.resultingFunction = resultingFunction
def updateResultingFunction(self):
self.resultingFunction()
self._window['resultingFunction'].update(self._store.resultingFunction)
def randomizeValues(self):
self._store.numberOfVariables = RandomHelper.randomInt(
self._store.minNumberOfVariables, self._store.maxNumberOfVariables)
for x in range(self._store.numberOfVariables):
self._store.parametersArr[x]['coeff'] = RandomHelper.randomFloat(
self._store.maxCoeff)
self._store.parametersArr[x]['exp'] = RandomHelper.randomFloat(
self._store.maxExp)
def generateData(self):
DatasetHelper.generateDataset(self._store)
def createDataset(self):
self._store.dataSet = Dataset(self._store.label, self._store.dataFrame)
def trainModel(self, model: ModelType = None):
self._store.scaler = Scaler(
ScalerType.StandardScaler, self._store.dataSet.getFeaturesData())
X = self._store.scaler.transform(self._store.dataSet.getFeaturesData())
y = self._store.dataSet.getLabelData()
if model == None:
self._store.model = Model(self._selectedModel, X, y)
else:
self._store.model = Model(model, X, y)
def testModel(self):
X = self._store.scaler.transform(self._store.dataSet.getFeaturesData())
y = self._store.dataSet.getLabelData()
return round(self._store.model.evaluate(X, y), 2)
def showOutput(self, show=True):
PlotterHelper.plotFormula(
self._store, self._store.model.getAlgorithmUsed(), show)
def runAllModels(self):
self.generateData()
scores = {}
figures = []
for model in self._models:
self.createDataset()
self.trainModel(model['value'])
scores[self._store.model.getAlgorithmUsed()] = self.testModel()
figures.append(self.showOutput(False))
figures.append(PlotterHelper.plotEvaluations(list(map(itemgetter(0), scores.items())),
list(
map(itemgetter(1), scores.items())),
'All scores'))
self._window['score'].update(' '.join(str(e) for e in scores.values()))
PlotterHelper.show()
|
#operadores aritméticos
# Soma 5+2==
# Subtração 5-2==
# Multiplicação 5*2==
# Divisão 5/2==
# Potenciação 5**2==
# Divisão Inteira 5//2==
# Resto da Divisão (módulo) 5%2==
# Exemplo: 5+3*2==11 5+(3*2)
# Exemplo: 3*5+4**2==31 ((3*5)+(4**2))
# Exemplo: 3*(5+4)**2==243
|
from utilities import *
def read_diary(day=time.strftime('%d'),month=time.strftime('%m'),year=time.strftime('%Y')):
try:
filename="diary/"+day+"_"+month+"_"+year+".wav1"
play_wav(filename)
except IOError:
voice("Oops! Couldnot find, a diary entry, for,this date!")
read_diary('22','07','2017')
|
# Generated by Django 2.1 on 2018-09-03 00:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0003_auto_20180902_1817'),
]
operations = [
migrations.RemoveField(
model_name='events',
name='city',
),
]
|
# Generated by Django 3.1.7 on 2021-04-10 08:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account_profile', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='awardhistory',
name='create_time',
),
migrations.RemoveField(
model_name='awardhistory',
name='is_delete',
),
migrations.RemoveField(
model_name='awardhistory',
name='update_time',
),
migrations.RemoveField(
model_name='competitionrecord',
name='create_time',
),
migrations.RemoveField(
model_name='competitionrecord',
name='is_delete',
),
migrations.RemoveField(
model_name='competitionrecord',
name='update_time',
),
migrations.RemoveField(
model_name='honortitle',
name='create_time',
),
migrations.RemoveField(
model_name='honortitle',
name='is_delete',
),
migrations.RemoveField(
model_name='honortitle',
name='update_time',
),
migrations.RemoveField(
model_name='paperrecord',
name='create_time',
),
migrations.RemoveField(
model_name='paperrecord',
name='is_delete',
),
migrations.RemoveField(
model_name='paperrecord',
name='update_time',
),
migrations.RemoveField(
model_name='patentrecord',
name='create_time',
),
migrations.RemoveField(
model_name='patentrecord',
name='is_delete',
),
migrations.RemoveField(
model_name='patentrecord',
name='update_time',
),
migrations.RemoveField(
model_name='publishrecord',
name='create_time',
),
migrations.RemoveField(
model_name='publishrecord',
name='is_delete',
),
migrations.RemoveField(
model_name='publishrecord',
name='update_time',
),
migrations.RemoveField(
model_name='researchproject',
name='create_time',
),
migrations.RemoveField(
model_name='researchproject',
name='is_delete',
),
migrations.RemoveField(
model_name='researchproject',
name='update_time',
),
]
|
from flask import Flask, request, render_template
from datetime import datetime
from pytz import timezone
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index_06.html")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.