seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
17055643944 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MaskedUserCertView(object):
def __init__(self):
self._is_certified = None
self._user_id = None
self._user_name = None
@property
def is_certified(self):
return self._is_certified
@is_certified.setter
def is_certified(self, value):
self._is_certified = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
@property
def user_name(self):
return self._user_name
@user_name.setter
def user_name(self, value):
self._user_name = value
def to_alipay_dict(self):
params = dict()
if self.is_certified:
if hasattr(self.is_certified, 'to_alipay_dict'):
params['is_certified'] = self.is_certified.to_alipay_dict()
else:
params['is_certified'] = self.is_certified
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
if self.user_name:
if hasattr(self.user_name, 'to_alipay_dict'):
params['user_name'] = self.user_name.to_alipay_dict()
else:
params['user_name'] = self.user_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MaskedUserCertView()
if 'is_certified' in d:
o.is_certified = d['is_certified']
if 'user_id' in d:
o.user_id = d['user_id']
if 'user_name' in d:
o.user_name = d['user_name']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/MaskedUserCertView.py | MaskedUserCertView.py | py | 1,861 | python | en | code | 241 | github-code | 13 |
6374990783 | import scrapy
from ..items import TestItem
class TestSpider(scrapy.Spider):
name = "test"
allowed_domains = ["www.runoob.com"]
start_urls = ["http://vip.stock.finance.sina.com.cn/corp/view/vRPD_NewStockIssue.php"]
def parse(self, response):
data = response.xpath('//*[@id="NewStockTable"]')
# df = pd.DataFrame(columns=['id','price'])
count =0;
cells = data.xpath('tr') #each row
ItemS =[]
for cel in cells:
Item = TestItem()
# count+=1
Item['id'] = cel.xpath('./td[1]/div/text()').extract()
Item['price'] = cel.xpath('./td[8]/div/text()').extract()
yield Item
# print(Item['id'],Item['price'])
next = response.xpath('//*[@id="con02-0"]/table[2]/tr[1]/td/a[text()="下一页"]/@href').extract()[0]
new_url = "http://vip.stock.finance.sina.com.cn"+next
yield scrapy.Request(url=new_url,callback=self.parse,dont_filter=True)
# Next_url =response.urljoin()
# def
# //*[@id="con02-0"]/table[2]/tbody/tr[1]/td/a[1]
| JohnKingm123/DailyTraining | 20230920/Proj/ttt/ttt/spiders/test.py | test.py | py | 1,134 | python | en | code | 0 | github-code | 13 |
11562139201 | class Student:
def __init__(self, name: str, school: str):
self.name = name.capitalize()
self.school = school.capitalize()
self.marks = []
def average_mark(self):
return sum(self.marks) / len(self.marks)
@classmethod
def friend(cls, origin, friend_name: str, *args, **kwargs):
# return a new student called 'friend_name' in the same school as self.
return cls(friend_name, origin.school, *args, **kwargs)
class WorkingStudent(Student):
def __init__(self, name: str, school: str, salary: float, job_title: str):
super().__init__(name, school)
self.salary = salary
self.job_title = job_title.capitalize()
try:
anna = WorkingStudent('anna', "Oxford", 1800.0, 'server')
print(anna.salary)
friend = WorkingStudent.friend(anna, 'greg', 800.0, "cleaner")
print(friend.name)
print(friend.school)
print(friend.salary)
except AttributeError as err:
print("{0}".format(err))
except TypeError as err:
print("{0}".format(err))
| ikostan/automation_with_python | IntroToPython/inheritance.py | inheritance.py | py | 1,047 | python | en | code | 0 | github-code | 13 |
21102824071 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 23 09:42:57 2021
@author: rotoapanta
"""
"""
crear una funcion de nombre fibonacci, que reciba como parámetro un numero
n fibonacci(n) y que genera los numeros contenidos entre [0,n]
correspondiente a la serie o sucesión de Fibonacci
"""
def fibonacci(n):
a,b = 0, 1
while a < n:
print(a, end=' ')
a, b = b, a+b
print()
#fibonacci(8)
| rotoapanta/programacion_python_aplicada_ingenieria | test2/fibonacci.py | fibonacci.py | py | 442 | python | es | code | 0 | github-code | 13 |
29834703955 | from spider.exam_handler import *
import requests
# 考试相关查询爬虫
class Exam(object):
def __init__(self):
self.url = {
'ch_test': 'http://www.cltt.org/StudentScore/ScoreResult',
'admit_query': 'http://zsjy.gzhu.edu.cn/gklqcxjgy.jsp?wbtreeid=1080'
}
self.client = requests.session()
# 广州大学高考录取查询
def admit_query(self, stu_id, stu_name):
post_data = {
'stuID1': stu_id,
'stuName1': stu_name
}
res = self.client.post(self.url["admit_query"], post_data)
get_result = re.findall(r'align="left" >(.+?)</td>', res.text)
try:
admit_result = {
'stu_id': get_result[0],
'stu_name': get_result[1],
'major': get_result[2]
}
except:
admit_result = {}
return admit_result
# 普通话水平测试查询
def ch_test_query(self, post_data):
res = self.client.post(
url=self.url['ch_test'], data=post_data)
if ('对不起没有查询到相关信息' in res.text):
return '对不起没有查询到相关信息'
else:
return ch_test_handler(res)
# 四六级获取验证码图片
def cetTestQueryGetImg(self, id_num, name):
imgCookiesData = {}
imgCookiesData['img'] = get_img(self.client, id_num)
imgCookiesData['cookies'] = requests.utils.dict_from_cookiejar(self.client.cookies)
return imgCookiesData
# 四六级获取分数
def cetTestQueryGetScore(self, id_num, name, capcha, cookies):
return get_score(self.client, id_num, name, capcha, cookies)
# 四六级获取验证码图片
def cet_get_captcha(self, id_num, name):
return get_img(self.client, id_num)
# 四六级获取分数
def cet_get_score(self, id_num, name, capcha):
return get_score(self.client, id_num, name, capcha)
# 普通话考试测试
# test = Exam()
# testData = {
# 'name': '杨泰桦',
# 'stuID': '',
# 'idCard': '440402199811059055'
# }
# print(test.ch_test_query(testData))
# #cet考试测试
# test=EX()
# print(test.cetTestQueryGetImg('440070182205601','肖镇'))
# capcha=input()
# cookiesTest={'BIGipServercache.neea.edu.cn_pool': '2543896586.39455.0000'}
# test1=Exam()
# print(test1.cetTestQueryGetScore('440070182205601','肖镇',cookiesTest))
'''
#录取查询
test=EX()
test.admitQuery('18440981203067','林婳婳')
'''
| vancece/GZHU-Pi | Server_py/spider/exam_spider.py | exam_spider.py | py | 2,609 | python | en | code | 19 | github-code | 13 |
37019452046 | # This is a sample Python script.
from mdb_bp import driver
from datetime import datetime
import csv
databaseName = "main"
productBlockchainName = "product"
materialsBlockchainName = "material"
projectBlockchainName = "project"
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
# Connect to the database
conn = driver.connect(
username="system",
password="biglove",
connection_protocol="tcp",
server_address="localhost",
server_port=5461,
database_name="master",
parameters={"interpolate_params": True},
)
# Check for an existing database
dbInitialized = False
rows = conn.query("SELECT name FROM sys_database WHERE sys_database.name = ?", [databaseName])
itr = iter(rows)
for row in itr:
dbInitialized = True
if not dbInitialized:
result = conn.prepare(
"CREATE DATABASE %s" % databaseName).exec()
print(result)
result = conn.prepare(
"USE %s" % databaseName).exec()
print(result)
# Check for an existing blockchain
productsInitialized = False
rows = conn.query("SELECT sys_blockchain_id FROM sys_blockchain WHERE sys_blockchain.name = ?",
[productBlockchainName])
itr = iter(rows)
for row in itr:
productsInitialized = True
if not productsInitialized:
conn.prepare(
"CREATE BLOCKCHAIN %s.%s TRADITIONAL " % (databaseName, productBlockchainName) +
"(product_id UINT64 PRIMARY KEY AUTO INCREMENT," +
" product_name STRING SIZE = 25 PACKED," +
" price_per_square_foot FLOAT32)"
).exec()
# Check for an existing blockchain
projectsInitialized = False
rows = conn.query("SELECT sys_blockchain_id FROM sys_blockchain WHERE sys_blockchain.name = ?",
[projectBlockchainName])
itr = iter(rows)
for row in itr:
projectsInitialized = True
if not projectsInitialized:
conn.prepare(
"CREATE BLOCKCHAIN %s.%s TRADITIONAL " % (databaseName, projectBlockchainName) +
"(project_id UINT64 PRIMARY KEY AUTO INCREMENT," +
" project_name STRING SIZE = 25 PACKED," +
" project_location UINT64)"
).exec()
# Check for an existing blockchain
materialsInitialized = False
rows = conn.query("SELECT sys_blockchain_id FROM sys_blockchain WHERE sys_blockchain.name = ?",
[materialsBlockchainName])
itr = iter(rows)
for row in itr:
materialsInitialized = True
if not materialsInitialized:
conn.prepare(
"CREATE BLOCKCHAIN %s.%s TRADITIONAL " % (databaseName, materialsBlockchainName) +
"(material_id UINT64 PRIMARY KEY AUTO INCREMENT," +
" project_id UINT64 FOREIGN [main.project, project_id]," +
" product_id UINT64 FOREIGN [main.product, product_id]," +
" volume FLOAT32)"
).exec()
# Add products using the csv file
steelProductId = 0
products = csv.reader(open('files/products.csv'), delimiter=',')
for product in products:
res = conn.prepare(
"INSERT %s.%s (product_name, price_per_square_foot) VALUES " % (databaseName, productBlockchainName) +
"(?, ?)"
).exec([product[0], product[1]])
if product[0] == "steel":
steelProductId = res.insert_id
# Add project
res = conn.prepare(
"INSERT %s.%s (project_name, project_location) VALUES (?, ?) " %
(databaseName, projectBlockchainName)
).exec(["San Diego", 92131])
# Add materials
conn.prepare(
"INSERT %s.%s (project_id, product_id, volume) VALUES (?, ?, 100) " %
(databaseName, materialsBlockchainName)
).exec([res.insert_id, steelProductId])
# Get the total cost of all projects (python doesn't support FLOAT64, must cast to FLOAT32 before returning)
rows = conn.query("SELECT project.project_id, (SUM(material.volume * product.price_per_square_foot)) " +
"FROM project JOIN material" +
" ON project.project_id = material.project_id " +
" JOIN product ON product.product_id = material.product_id " +
" GROUP BY project.project_id")
itr = iter(rows)
for row in itr:
print(row)
# Change the price of steel
steelProductId = conn.prepare(
"AMEND %s.%s (product_id, product_name, price_per_square_foot) VALUES " % (databaseName, productBlockchainName) +
"(?, ?, ?)"
).exec([steelProductId, "steel", 12.0]).insert_id
# Get the new cost of the project
rows = conn.query("SELECT *, STRING(price_per_square_foot), sys_timestamp FROM product")
itr = iter(rows)
for row in itr:
print(row)
| blockpointSystems/python-example | main.py | main.py | py | 4,863 | python | en | code | 0 | github-code | 13 |
28680538035 | import sys
input = sys.stdin.readline
N = int(input())
ns = [0]
ns.extend([int(input()) for _ in range(N)])
dp = [0 for _ in range(N+1)]
if N >= 1:
dp[1] = ns[1]
if N >= 2:
dp[2] = ns[2] + ns[1]
if N >= 3:
for i in range(3,N+1):
dp[i] = max(dp[i-1],dp[i-2]+ns[i],dp[i-3]+ns[i]+ns[i-1])
print(dp[-1])
| hodomaroo/BOJ-Solve | 백준/Silver/2156. 포도주 시식/포도주 시식.py | 포도주 시식.py | py | 323 | python | en | code | 2 | github-code | 13 |
39477635724 | import pygame
import sys
class Grid:
# constructor
def __init__(self,width,height,R):
self.width = width
self.height = height
self.W = int(width/R) + 1
self.H = int(height/R) + 1
self.R = R
self.boxes = {}
# add an index to the relevant boxes given a spacial coordinate
def add(self,vec,ind):
x = int(vec.x/self.R)
y = int(vec.y/self.R)
xVar = [x-1,x,x+1]
if(x == 0):
xVar = [0,1]
elif(x == self.W-1):
xVar = [self.W-2,self.W-1]
yVar = [y-1,y,y+1]
if(y == 0):
xVar = [0,1]
elif(y == self.H-1):
xVar = [self.H-2,self.H-1]
for a in xVar:
for b in yVar:
key = a*self.H + b
if(key in self.boxes.keys()):
self.boxes[key] = self.boxes[key] + [ind]
else:
self.boxes[key] = [ind]
# get the relevant indices given a coordinate:
def candidates(self,vec):
x = int(vec.x/self.R)
y = int(vec.y/self.R)
key = x*self.H + y
if(key in self.boxes.keys()):
return self.boxes[key]
else:
return []
def reset(self):
self.boxes = {}
| BrownestAndStickyest/Some-fun-programming | Fluid simulations/Grid.py | Grid.py | py | 1,009 | python | en | code | 0 | github-code | 13 |
18751279978 | # -*- coding=utf-8
import time
from qcloud_cos import CosConfig
from qcloud_cos import CosS3Client
import sys
import logging
import os
# 腾讯云COSV5Python SDK, 目前可以支持Python2.6与Python2.7以及Python3.x
# https://cloud.tencent.com/document/product/436/48987
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
# 设置用户属性, 包括secret_id, secret_key, region
# appid已在配置中移除,请在参数Bucket中带上appid。Bucket由bucketname-appid组成
# 这里秘钥是从环境变量取得,如自己测试可改成自己对应的秘钥
secret_id = os.environ["SECRETID"] # 替换为用户的 SecretId,请登录访问管理控制台进行查看和管理,https://console.cloud.tencent.com/cam/capi
secret_key = os.environ["SECRETKEY"] # 替换为用户的 SecretKey,请登录访问管理控制台进行查看和管理,https://console.cloud.tencent.com/cam/capi
region = 'ap-chongqing' # 替换为用户的 region,已创建桶归属的region可以在控制台查看,https://console.cloud.tencent.com/cos5/bucket
# COS支持的所有region列表参见https://www.qcloud.com/document/product/436/6224
token = None # 如果使用永久密钥不需要填入token,如果使用临时密钥需要填入,临时密钥生成和使用指引参见https://cloud.tencent.com/document/product/436/14048
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token, Scheme='https') # 获取配置对象
client = CosS3Client(config)
bucket_name = 'demo-1250000000'
def ci_get_media_bucket():
# 查询媒体处理开通状态
response = client.ci_get_media_bucket(
Regions=region,
BucketName='demo',
BucketNames=bucket_name,
PageSize="1",
PageNumber="1"
)
print(response)
return response
def ci_get_media_queue():
# 查询媒体队列信息
response = client.ci_get_media_queue(
Bucket=bucket_name,
)
print(response)
return response
def ci_get_pic_bucket():
# 查询图片处理异步服务开通状态
response = client.ci_get_pic_bucket(
Regions=region,
BucketName='demo',
BucketNames=bucket_name,
PageSize="1",
PageNumber="1"
)
print(response)
return response
def ci_get_ai_bucket():
# 查询ai处理异步服务开通状态
response = client.ci_get_ai_bucket(
Regions=region,
BucketName='demo',
BucketNames=bucket_name,
PageSize="1",
PageNumber="1"
)
print(response)
return response
def ci_get_ai_queue():
# 查询ai处理队列信息
response = client.ci_get_ai_queue(
Bucket=bucket_name,
)
print(response)
return response
def ci_put_ai_queue():
# 更新ai队列信息
body = {
'Name': 'ai-queue',
'QueueID': 'pa2c2afbe68xxxxxxxxxxxxxxxxxxxxxx',
'State': 'Active',
'NotifyConfig': {
'Type': 'Url',
'Url': 'http://www.demo.callback.com',
'Event': 'TaskFinish',
'State': 'On',
'ResultFormat': 'JSON',
}
}
response = client.ci_update_ai_queue(
Bucket=bucket_name,
QueueId='pa2c2afbe68c44xxxxxxxxxxxxxxxxxxxx',
Request=body,
ContentType='application/xml'
)
print(response)
return response
def ci_get_media_pic_queue():
# 查询图片处理队列信息
response = client.ci_get_media_pic_queue(
Bucket=bucket_name,
)
print(response)
return response
def ci_put_media_queue():
# 更新媒体队列信息
body = {
'Name': 'media-queue',
'QueueID': 'p5135bc6xxxxxxxxxxxxxxxxf047454',
'State': 'Active',
'NotifyConfig': {
'Type': 'Url',
'Url': 'http://www.demo.callback.com',
'Event': 'TaskFinish',
'State': 'On',
'ResultFormat': 'JSON',
# TDMQ回调信息配置
# 消息队列所属园区
# 必选。目前支持园区 sh(上海)、bj(北京)、gz(广州)、cd(成都)、hk(中国香港)
# 'MqRegion': 'bj',
# # 消息队列使用模式
# # 必选。主题订阅:Topic 队列服务: Queue
# 'MqMode': 'Queue',
# # TDMQ 主题名称 必选。
# 'MqName': 'queueName'
}
}
response = client.ci_update_media_queue(
Bucket=bucket_name,
QueueId='p5135bcxxxxxxxxxxxxxxxxf047454',
Request=body,
ContentType='application/xml'
)
print(response)
return response
def ci_put_media_pic_queue():
# 更新图片处理队列信息
body = {
'Name': 'media-pic-queue',
'QueueID': 'peb83bdxxxxxxxxxxxxxxxxa21c7d68',
'State': 'Active',
'NotifyConfig': {
'Type': 'Url',
'Url': 'http://www.demo.callback.com',
'Event': 'TaskFinish',
'State': 'On',
'ResultFormat': 'JSON'
}
}
response = client.ci_update_media_pic_queue(
Bucket=bucket_name,
QueueId='peb83bdxxxxxxxxxxxxxxxxxx4a21c7d68',
Request=body,
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_transcode_with_digital_watermark_jobs():
# 创建带数字水印的转码任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'Transcode',
'Operation': {
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'transcode_with_digital_watermark_output.mp4'
},
'TemplateId': 't04e1ab86554984f1aa17c062fbf6c007c',
'DigitalWatermark': {
'Type': 'Text',
'Message': '123456789ab',
'Version': 'V1',
'IgnoreError': 'false',
},
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_transcode_with_watermark_jobs():
# 创建带水印的转码任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'Transcode',
'Operation': {
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'transcode_with_watermark_output.mp4'
},
'TemplateId': 't04e1ab86554984f1aa17c062fbf6c007c',
# "WatermarkTemplateId": ["", ""],
'Watermark': [
{
'Type': 'Text',
'Pos': 'TopRight',
'LocMode': 'Absolute',
'Dx': '64',
'Dy': '64',
'StartTime': '0',
'EndTime': '1000.5',
'Text': {
'Text': '水印内容',
'FontSize': '90',
'FontType': 'simfang.ttf',
'FontColor': '0xFFEEFF',
'Transparency': '100',
},
},
{
'Type': 'Image',
'Pos': 'TopLeft',
'LocMode': 'Absolute',
'Dx': '100',
'Dy': '100',
'StartTime': '0',
'EndTime': '1000.5',
'Image': {
'Url': 'http://' + bucket_name + ".cos." + region + ".myqcloud.com/watermark.png",
'Mode': 'Fixed',
'Width': '128',
'Height': '128',
'Transparency': '100',
},
}
]
}
}
# dict中数组类型的标签,都需要特殊处理
lst = [
'<Watermark>',
'<WatermarkTemplateId>',
'</WatermarkTemplateId>',
'</Watermark>'
]
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst=lst,
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_hls_transcode_jobs():
# 创建hls转码任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'Transcode',
'Operation': {
"Transcode": {
"Container": {
"Format": "hls"
},
"Video": {
"Codec": "H.264",
"Profile": "high",
"Bitrate": "1000",
"Width": "1280",
"Fps": "30",
"Preset": "medium",
"Bufsize": "1000",
"Maxrate": "10"
},
"Audio": {
"Codec": "aac",
"Samplerate": "44100",
"Bitrate": "128",
"Channels": "4"
},
"TransConfig": {
'HlsEncrypt': {
'IsHlsEncrypt': 'true',
'UriKey': 'http://www.demo.com'
}
},
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'transcode_output.mp4'
},
# 'TemplateId': 't02db40900dc1c43ad9bdbd8acec6075c5'
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_transcode_jobs():
# 创建转码任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'Transcode',
'Operation': {
"Transcode": {
"Container": {
"Format": "mp4"
},
"Video": {
"Codec": "H.264",
"Profile": "high",
"Bitrate": "1000",
"Width": "1280",
"Fps": "30",
"Preset": "medium",
"Bufsize": "1000",
"Maxrate": "10"
},
"Audio": {
"Codec": "aac",
"Samplerate": "44100",
"Bitrate": "128",
"Channels": "4"
},
"TransConfig": {
"AdjDarMethod": "scale",
"IsCheckReso": "false",
"ResoAdjMethod": "1"
},
"TimeInterval": {
"Start": "0",
"Duration": "60"
}
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'transcode_output.mp4'
},
# 'FreeTranscode': 'true',
# 'TemplateId': 't02db40900dc1c43ad9bdbd8acec6075c5'
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_snapshot_jobs():
# 创建截图任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'Snapshot',
'Operation': {
'Snapshot': {
'Mode': 'Interval',
'Width': '1280',
'Height': '1280',
'Start': '0',
'TimeInterval': '',
'Count': '1',
'SnapshotOutMode': 'SnapshotAndSprite',
'SpriteSnapshotConfig': {
"CellHeight": "128",
"CellWidth": "128",
"Color": "White",
"Columns": "10",
"Lines": "10",
"Margin": "0",
"Padding": "0"
}
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'snapshot-${Number}.jpg',
'SpriteObject': 'sprite-snapshot-${Number}.jpg'
},
# 'TemplateId': 't02db40900dc1c43ad9bdbd8acec6075c5'
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_animation_jobs():
# 创建转动图任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'Animation',
'Operation': {
"Animation": {
"Container": {
"Format": "gif"
},
"Video": {
"Codec": "gif",
"Width": "1280",
"Fps": "15",
"AnimateOnlyKeepKeyFrame": "true"
},
"TimeInterval": {
"Start": "0",
"Duration": "60"
}
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'snapshot.gif'
},
# 'TemplateId': 't02db40900dc1c43ad9bdbd8acec6075c5'
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_concat_jobs():
# 创建拼接任务,以下示例仅展示部分参数,更详细参数详见API文档
# API文档:https://cloud.tencent.com/document/product/460/84788
body = {
# 任务类型,拼接任务为Concat
'Tag': 'Concat',
'Operation': {
# 拼接参数
"ConcatTemplate": {
# 拼接节点
"ConcatFragment": [
{
# 拼接对象地址
"Url": "http://demo-1xxxxxxxxx.cos.ap-chongqing.myqcloud.com/1.mp4",
# 拼接对象的索引位置, 非必传参数,默认为0
"FragmentIndex": "0",
# 开始/结束时间,表示截取 StartTime - EndTime的视频段进行拼接,非必传参数,
# Request.Operation.ConcatTemplate.DirectConcat 为 true 时不生效
# 此示例表示截取1.mp4的0-1s的视频段进行拼接
"StartTime": "0",
"EndTime": "1"
},
{
"Url": "http://demo-1xxxxxxxxx.cos.ap-chongqing.myqcloud.com/2.mp4",
"FragmentIndex": "1",
}
],
# 目标文件的音频配置,非必传参数
"Audio": {
"Codec": "mp3"
},
# 目标文件的视频配置,非必传参数
"Video": {
"Codec": "H.264",
"Bitrate": "1000",
"Width": "1280",
"Fps": "30"
},
# 目标文件的封装格式
"Container": {
# 封装格式:mp4,flv,hls,ts, mp3, aac
"Format": "mp4"
},
# 转场参数
"SceneChangeInfo": {
# 转场模式
# Default:不添加转场特效
# FADE:淡入淡出
# GRADIENT:渐变
"Mode": "Default",
# 转场时长 非必传参数,单位秒, 默认为3秒
# 取值范围:(0, 5], 支持小数
"Time": "3",
},
# 简单拼接方式(不转码直接拼接),若值为true,以上视频和音频参数失效
"DirectConcat": "false",
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'concat-result.mp4'
},
}
}
lst = ['<ConcatFragment>', '</ConcatFragment>']
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst=lst,
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_smart_cover_jobs():
# 创建智能封面任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'SmartCover',
'Operation': {
'SmartCover': {
'Format': 'jpg',
'Width': '128',
'Height': '128',
'Count': '3',
'DeleteDuplicates': 'true'
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'smart-cover-${Number}.jpg'
},
# 'TemplateId': 't02db40900dc1c43ad9bdbd8acec6075c5'
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_video_process_jobs():
# 创建视频增强任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'VideoProcess',
'Operation': {
"VideoProcess": {
"ColorEnhance": {
"Enable": "true",
"Contrast": "10",
"Correction": "10",
"Saturation": "10"
},
"MsSharpen": {
"Enable": "true",
"SharpenLevel": "1"
}
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'video-process.mp4'
},
# 'TemplateId': 't02db40900dc1c43ad9bdbd8acec6075c5',
'TranscodeTemplateId': 't04e1ab86554984f1aa17c062fbf6c007c'
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_video_montage_jobs():
# 创建精彩集锦任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'VideoMontage',
'Operation': {
"VideoMontage": {
"Container": {
"Format": "mp4"
},
"Video": {
"Codec": "H.264",
"Bitrate": "1000",
"Width": "1280",
"Height": "1280"
},
"Audio": {
"Codec": "aac",
"Samplerate": "44100",
"Bitrate": "128",
"Channels": "4"
},
"AudioMix": {
"AudioSource": "https://demo-xxxxxxxxxxxx.cos.ap-chongqing.myqcloud.com/1.mp4",
"MixMode": "Once",
"Replace": "true"
},
"Duration": "1"
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'video-montage.mp4'
},
# 'TemplateId': 't02db40900dc1c43ad9bdbd8acec6075c5',
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_voice_separate_jobs():
# 创建人声分离任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'VoiceSeparate',
'Operation': {
"VoiceSeparate": {
"AudioMode": "IsAudio",
"AudioConfig": {
"Codec": "mp3",
"Samplerate": "44100",
"Bitrate": "12",
"Channels": "2"
}
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'voice-separate.mp3',
'AuObject': 'voice-separate-audio.mp3'
},
# 'TemplateId': 't02db40900dc1c43ad9bdbd8acec6075c5',
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_sdr2hdr_jobs():
# 创建sdr2hdr任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'SDRtoHDR',
'Operation': {
"SDRtoHDR": {
"HdrMode": "HLG",
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'sdr2hdr.mp4'
},
'TranscodeTemplateId': 't04e1ab86554984f1aa17c062fbf6c007c'
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_digital_watermark_jobs():
# 创建嵌入数字水印任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'DigitalWatermark',
'Operation': {
"DigitalWatermark": {
"Type": "Text",
"Message": "123456789ab",
"Version": "V1"
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'digital.mp4'
},
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_extract_digital_watermark_jobs():
# 创建提取数字水印任务
body = {
'Input': {
'Object': 'digital.mp4'
},
'Tag': 'ExtractDigitalWatermark',
'Operation': {
"ExtractDigitalWatermark": {
"Type": "Text",
"Version": "V1"
},
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_super_resolution_jobs():
# 创建超分任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'SuperResolution',
'Operation': {
"SuperResolution": {
"Resolution": "sdtohd",
"EnableScaleUp": "true"
},
'TranscodeTemplateId': 't04e1ab86554984f1aa17c062fbf6c007c',
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'super.mp4'
},
},
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_video_tag_jobs():
# 创建视频标签任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'VideoTag',
'Operation': {
"VideoTag": {
"Scenario": "Stream"
},
},
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_segment_jobs():
# 创建转封装任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'Segment',
'Operation': {
"Segment": {
"Format": "mp4",
"Duration": "5",
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'segment-${Number}.mp4'
},
},
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_multi_jobs():
# 创建多任务
body = {
'Input': {
'Object': '117374C.mp4'
},
'Operation': [
{
'Tag': 'Segment',
"Segment": {
"Format": "mp4",
"Duration": "50",
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'multi-segment-${Number}.mp4'
},
},
{
'Tag': 'SDRtoHDR',
"SDRtoHDR": {
"HdrMode": "HLG",
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'multi-sdr2hdr.mp4'
},
'TranscodeTemplateId': 't04e1ab86554984f1aa17c062fbf6c007c'
}
],
}
lst = ['<Operation>', '</Operation>']
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst=lst,
ContentType='application/xml'
)
print(response)
return response
def ci_create_get_media_info_jobs():
# 创建获取媒体信息任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'MediaInfo',
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_media_pic_jobs():
# 创建图片处理任务
body = {
'Input': {
'Object': '1.png'
},
'Tag': 'PicProcess',
'Operation': {
"PicProcess": {
"IsPicInfo": "true",
"ProcessRule": "imageMogr2/rotate/90",
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'pic-process-result.png'
},
}
}
response = client.ci_create_media_pic_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_list_media_transcode_jobs():
# 转码任务列表
response = client.ci_list_media_jobs(
Bucket=bucket_name,
Tag='DigitalWatermark',
ContentType='application/xml',
StartCreationTime='2022-05-27T00:00:00+0800',
EndCreationTime='2022-05-31T00:00:00+0800',
States='Success'
)
print(response)
return response
def ci_get_media_jobs():
# 查询任务详情
response = client.ci_get_media_jobs(
Bucket=bucket_name,
JobIDs='jc46435e40bcc11ed83d6e19dd89b02cc',
ContentType='application/xml'
)
print(response)
return response
def ci_list_media_pic_jobs():
# 图片处理任务列表
response = client.ci_list_media_pic_jobs(
Bucket=bucket_name,
Tag='PicProcess',
ContentType='application/xml',
StartCreationTime='2022-05-30T23:30:00+0800',
EndCreationTime='2022-05-31T01:00:00+0800',
States='Success'
)
print(response)
return response
def ci_get_media_pic_jobs():
# 图片处理任务详情
response = client.ci_get_media_pic_jobs(
Bucket=bucket_name,
JobIDs='c01742xxxxxxxxxxxxxxxxxx7438e39',
ContentType='application/xml'
)
print(response)
return response
def get_media_info():
# 获取媒体信息
response = client.get_media_info(
Bucket=bucket_name,
Key='demo.mp4'
)
print(response)
def get_snapshot():
# 产生同步截图
response = client.get_snapshot(
Bucket=bucket_name,
Key='demo.mp4',
Time='1.5',
Width='480',
Format='png'
)
print(response)
response['Body'].get_stream_to_file('snapshot.jpg')
def get_pm3u8():
# 获取私有 M3U8 ts 资源的下载授权
response = client.get_pm3u8(
Bucket=bucket_name,
Key='demo.m3u8',
Expires='3600',
)
print(response)
response['Body'].get_stream_to_file('pm3u8.m3u8')
def ci_trigger_workflow():
# 触发工作流接口
response = client.ci_trigger_workflow(
Bucket=bucket_name,
WorkflowId='w1b4ffd6900a343c3a2fe5b92b1fb7ff6',
Key='test.mp4'
)
print(response)
return response
def ci_get_workflowexecution():
# 查询工作流实例接口
response = client.ci_get_workflowexecution(
Bucket=bucket_name,
RunId='id1f94868688111eca793525400ca1839'
)
print(response)
return response
def ci_list_workflowexecution():
# 查询工作流实例接口
response = client.ci_list_workflowexecution(
Bucket=bucket_name,
WorkflowId='w1b4ffd6900a343c3a2fe5b92b1fb7ff6'
)
print(response)
return response
def ci_create_quality_estimate_jobs():
# 创建视频质量评分任务
body = {
'Input': {
'Object': 'gaobai.mp4'
},
'Tag': 'QualityEstimate',
'Operation': {
# 非必选
"UserData": "This is my data",
},
# 非必选
'CallBack': 'http://callback.demo.com',
# 非必选
'CallBackFormat': 'JSON'
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_segment_video_body_jobs():
# 创建视频人像抠图任务
body = {
# 待操作的对象信息
'Input': {
# 输入文件路径
'Object': 'gaobai.mp4'
},
# 任务类型,固定值 SegmentVideoBody
'Tag': 'SegmentVideoBody',
# 操作规则
'Operation': {
# 视频人像抠图配置
'SegmentVideoBody': {
# 抠图模式 当前只支持
# Mask (输出alpha通道结果) 、
# Foreground(输出前景视频)、
# Combination(输出抠图后的前景与自定义背景合成后的视频)
'Mode': 'Mask',
# 非必选 抠图模式,当前支持 HumanSeg(人像抠图)、GreenScreenSeg(绿幕抠图)、SolidColorSeg(纯色背景抠图)。 默认值 HumanSeg
# 'SegmentType': 'GreenScreenSeg',
# 非必选 mode为Foreground时可指定此参数,背景颜色为蓝色,取值范围为0-255, 默认值为0
# 'BackgroundBlue': '255',
# 非必选 mode为Foreground时可指定此参数,背景颜色为红色,取值范围为0-255, 默认值为0
# 'BackgroundRed': '255',
# 非必选 mode为Foreground时可指定此参数,背景颜色为绿色,取值范围为0-255, 默认值为0
# 'BackgroundGreen': '255',
# 非必选 mode为Combination时,必需指定此参数,传入背景文件,背景文件需与object在同存储桶下
# 'BackgroundLogoUrl': 'http://testpic-1253960454.cos.ap-chongqing.myqcloud.com'
# 非必选 阈值 可以调整alpha通道的边缘,调整抠图的边缘位置 取值范围为0-255, 默认值为0
# 'BinaryThreshold': '200',
# 非必选 纯色背景抠图的背景色(红), 当 SegmentType 为 SolidColorSeg 生效,取值范围为0-255,默认值为 0
# 'RemoveRed': '200',
# 非必选 纯色背景抠图的背景色(绿), 当 SegmentType 为 SolidColorSeg 生效,取值范围为0-255,默认值为 0
# 'RemoveGreen': '200',
# 非必选 纯色背景抠图的背景色(蓝), 当 SegmentType 为 SolidColorSeg 生效,取值范围为0-255,默认值为 0
# 'RemoveBlue': '200'
},
# 输出配置
'Output': {
# 输出桶信息
'Bucket': bucket_name,
# 输出地域信息
'Region': region,
# 输出文件路径信息
'Object': 'result.mp4'
},
# 非必选
"UserData": "This is my data",
},
# 非必选 回调URL
# 'CallBack': 'http://callback.demo.com',
# 非必选 回调信息格式 支持JSON/XML
# 'CallBackFormat': 'JSON'
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_and_get_live_recognition_jobs():
# 创建直播流识别任务
body = {
# 待操作的直播流信息
'Input': {
# 直播流拉流地址
'Url': 'http://demo.liveplay.com/demo.m3u8',
# 输入类型,直播流固定为LiveStream
'SourceType': 'LiveStream'
},
# 任务类型,固定值 VideoTargetRec
'Tag': 'VideoTargetRec',
# 操作规则
'Operation': {
# 识别配置
'VideoTargetRec': {
# 直播流识别任务必选且值设置为true
'CarPlate': 'true',
# 截图时间间隔,单位为秒,非必选,默认为1,取值范围:[1, 300]
'SnapshotTimeInterval': '1',
},
# 输出配置,直播流转存至cos的配置信息,转存为hls格式,ts分片时长为3s
'Output': {
# 输出桶信息
'Bucket': bucket_name,
# 输出地域信息
'Region': region,
# 输出文件路径信息
'Object': 'result.m3u8'
},
# 非必选
"UserData": "This is my data",
},
# 非必选 回调URL
'CallBack': 'https://www.callback.com',
# 非必选 回调信息格式 支持JSON/XML
# 'CallBackFormat': 'JSON'
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
print("create job success")
job_id = response['JobsDetail'][0]['JobId']
while True:
time.sleep(5)
response = client.ci_get_media_jobs(
Bucket=bucket_name,
JobIDs=job_id,
ContentType='application/xml'
)
if 'VideoTargetRecResult' in response['JobsDetail'][0]["Operation"]:
if 'CarPlateRecognition' in response['JobsDetail'][0]["Operation"][
"VideoTargetRecResult"] and response['JobsDetail'][0]["Operation"]["VideoTargetRecResult"][
"CarPlateRecognition"] is not None \
and \
response['JobsDetail'][0]["Operation"]["VideoTargetRecResult"][
"CarPlateRecognition"]['CarPlateInfo'] is not None:
print("result:" + str(response['JobsDetail'][0]["Operation"]["VideoTargetRecResult"]["CarPlateRecognition"]))
else:
print("don't have result: " + str(response['JobsDetail'][0]["Operation"]["VideoTargetRecResult"]))
state = response['JobsDetail'][0]['State']
if state == 'Success' or state == 'Failed' or state == 'Cancel':
print(response)
break
def ci_cancel_jobs():
# 转码任务详情
response = client.ci_cancel_jobs(
Bucket=bucket_name,
JobID='a65xxxxxxxxxxxxxxxx1f213dcd0151',
ContentType='application/xml'
)
print(response)
return response
def ci_create_workflow_image_inspect():
# 创建异常图片检测工作流
# 工作流配置详情
body = {
# 工作流节点 固定值传入即可
'MediaWorkflow': {
# 创建的工作流名称,可自定义输入名称
# 支持中文、英文、数字、—和_,长度限制128字符
# 必传参数
'Name': 'image-inspect',
# 工作流状态,表示创建时是否开启COS上传对象事件通知
# 支持 Active / Paused
# 非必选,默认Paused 不开启
'State': 'Active',
# 工作流拓扑结构
# 必传参数
'Topology': {
# 工作流节点依赖关系
# 必传参数
'Dependencies': {
# Start 工作流开始节点,用于存储工作流回调,前缀,后缀等配置信息,只有一个开始节点
# End 工作流结束节点
# ImageInspectNode 异常图片检测节点信息
# 此示例表示 Start -> ImageInspectNode -> End 的依赖关系
'Start': 'ImageInspectNode',
'ImageInspectNode': 'End',
},
# 工作流各节点的详细配置信息
# 必传参数
'Nodes': {
# 工作流开始节点配置信息
'Start': {
# 节点类型,开始节点固定为 Start
# 必传参数
'Type': 'Start',
# 工作流的输入信息
# 必传参数
'Input': {
# Object 前缀,COS上传对象的前缀,只有当前缀匹配时,才会触发该工作流
# 如该示例,会触发以test为前缀的对象
# 必传参数
'ObjectPrefix': 'test',
# 工作流自定义回调配置信息,当配置了该项后,当工作流执行完成或工作流中的子节点中的任务执行完成,会发送回调给指定Url或tdmq
# 非必传配置
'NotifyConfig': {
# 回调类型,支持Url TDMQ两种类型
'Type': 'Url',
# 回调地址,当回调类型为Url时有效
'Url': 'http://www.callback.com',
# 回调事件 支持多种事件,以逗号分割
'Event': 'WorkflowFinish,TaskFinish',
# 回调信息格式,支持XML JSON两种格式,非必传,默认为XML
'ResultFormat': '',
# TDMQ 所属园区,当回调类型为TDMQ时有效,支持园区详见https://cloud.tencent.com/document/product/406/12667
'MqRegion': '',
# TDMQ 使用模式,当回调类型为TDMQ时有效
# Topic:主题订阅
# Queue:队列服务
'MqMode': '',
# TDMQ 主题名称,当回调类型为TDMQ时有效
'MqName': '',
},
# 文件后缀过滤器,当需要只处理部分后缀文件时,可配置此项
# 非必传配置
'ExtFilter': {
# 是否开始后缀过滤,On/Off,非必选,默认为Off
'State': '',
# 打开视频后缀限制,false/true,非必选,默认为false
'Video': '',
# 打开音频后缀限制,false/true,非必选,默认为false
'Audio': '',
# 打开图片后缀限制,false/true,非必选,默认为false
'Image': '',
# 打开 ContentType 限制,false/true,非必选,默认为false
'ContentType': '',
# 打开自定义后缀限制,false/true,非必选,默认为false
'Custom': '',
# 自定义后缀,当Custom为true时有效,多种文件后缀以/分隔,后缀个数不超过10个
'CustomExts': 'jpg/png',
# 所有文件,false/true,非必选,默认为false
'AllFile': '',
}
}
},
# 异常图片检测节点配置信息
'ImageInspectNode': {
# 节点类型,异常图片检测固定为ImageInspect
'Type': 'ImageInspect',
# 节点执行操作集合
# 非必选配置
'Operation': {
# 异常图片检测配置详情
'ImageInspect': {
# 是否开启检测到异常图片检测后自动对图片进行处理的动作,false/true,非必选,默认false
'AutoProcess': 'true',
# 在检测到为异常图片后的处理动作,有效值为:
# BackupObject:移动图片到固定目录下,目录名为abnormal_images_backup/,由后台自动创建
# SwitchObjectToPrivate:将图片权限设置为私有
# DeleteObject:删除图片
# 非必选参数,默认值为BackupObject
'ProcessType': 'BackupObject'
}
}
},
},
},
},
}
response = client.ci_create_workflow(
Bucket=bucket_name, # 桶名称
Body=body, # 工作流配置信息
ContentType='application/xml'
)
print(response)
print("workflowId is: " + response['MediaWorkflow']['WorkflowId'])
return response
def ci_update_workflow():
# 更新工作流配置信息,仅当工作流状态为Paused时支持更新配置信息,故在更新信息前,需要将工作流状态为Paused
# 工作流配置详情
body = {
# 工作流节点 固定值传入即可
'MediaWorkflow': {
# 工作流名称,可自定义输入名称
# 支持中文、英文、数字、—和_,长度限制128字符
# 必传参数
'Name': 'image-inspect',
# 工作流状态,表示创建时是否开启COS上传对象事件通知
# 支持 Active / Paused
# 非必选,默认Paused 不开启
'State': 'Active',
# 工作流拓扑结构
# 必传参数
'Topology': {
# 工作流节点依赖关系
# 必传参数
'Dependencies': {
# Start 工作流开始节点,用于存储工作流回调,前缀,后缀等配置信息,只有一个开始节点
# End 工作流结束节点
# ImageInspectNode 异常图片检测节点信息
# 此示例表示 Start -> ImageInspectNode -> End 的依赖关系
'Start': 'ImageInspectNode',
'ImageInspectNode': 'End',
},
# 工作流各节点的详细配置信息
# 必传参数
'Nodes': {
# 工作流开始节点配置信息
'Start': {
# 节点类型,开始节点固定为 Start
# 必传参数
'Type': 'Start',
# 工作流的输入信息
# 必传参数
'Input': {
# Object 前缀,COS上传对象的前缀,只有当前缀匹配时,才会触发该工作流
# 如该示例,会触发以test为前缀的对象
# 必传参数
'ObjectPrefix': 'test',
# 工作流自定义回调配置信息,当配置了该项后,当工作流执行完成或工作流中的子节点中的任务执行完成,会发送回调给指定Url或tdmq
# 非必传配置
'NotifyConfig': {
# 回调类型,支持Url TDMQ两种类型
'Type': 'Url',
# 回调地址,当回调类型为Url时有效
'Url': 'http://www.callback.com',
# 回调事件 支持多种事件,以逗号分割
'Event': 'WorkflowFinish,TaskFinish',
# 回调信息格式,支持XML JSON两种格式,非必传,默认为XML
'ResultFormat': '',
# TDMQ 所属园区,当回调类型为TDMQ时有效,支持园区详见https://cloud.tencent.com/document/product/406/12667
'MqRegion': '',
# TDMQ 使用模式,当回调类型为TDMQ时有效
# Topic:主题订阅
# Queue:队列服务
'MqMode': '',
# TDMQ 主题名称,当回调类型为TDMQ时有效
'MqName': '',
},
# 文件后缀过滤器,当需要只处理部分后缀文件时,可配置此项
# 非必传配置
'ExtFilter': {
# 是否开始后缀过滤,On/Off,非必选,默认为Off
'State': 'On',
# 打开视频后缀限制,false/true,非必选,默认为false
'Video': '',
# 打开音频后缀限制,false/true,非必选,默认为false
'Audio': '',
# 打开图片后缀限制,false/true,非必选,默认为false
'Image': 'true',
# 打开 ContentType 限制,false/true,非必选,默认为false
'ContentType': '',
# 打开自定义后缀限制,false/true,非必选,默认为false
'Custom': '',
# 自定义后缀,当Custom为true时有效,多种文件后缀以/分隔,后缀个数不超过10个
'CustomExts': 'jpg/png',
# 所有文件,false/true,非必选,默认为false
'AllFile': '',
}
}
},
# 异常图片检测节点配置信息
'ImageInspectNode': {
# 节点类型,异常图片检测固定为ImageInspect
'Type': 'ImageInspect',
# 节点执行操作集合
# 非必选配置
'Operation': {
# 异常图片检测配置详情
'ImageInspect': {
# 是否开启检测到异常图片检测后自动对图片进行处理的动作,false/true,非必选,默认false
'AutoProcess': 'true',
# 在检测到为异常图片后的处理动作,有效值为:
# BackupObject:移动图片到固定目录下,目录名为abnormal_images_backup/,由后台自动创建
# SwitchObjectToPrivate:将图片权限设置为私有
# DeleteObject:删除图片
# 非必选参数,默认值为BackupObject
'ProcessType': 'SwitchObjectToPrivate'
}
}
},
},
},
},
}
response = client.ci_update_workflow(
Bucket=bucket_name, # 桶名称
WorkflowId='wd34ca394909xxxxxxxxxxxx4d', # 需要更新的工作流ID
Body=body, # 工作流配置详情
ContentType='application/xml'
)
print(response)
print("workflowId is: " + response['MediaWorkflow']['WorkflowId'])
return response
def ci_update_workflow_state():
# 更新工作流状态
response = client.ci_update_workflow_state(
Bucket=bucket_name, # 桶名称
WorkflowId='wd34ca3949090xxxxxxxxxx44d', # 需要更新的工作流ID
UpdateState='paused', # 需要更新至的工作流状态,支持 active 开启 / paused 关闭
ContentType='application/xml'
)
print(response)
return response
def ci_get_workflow():
# 获取工作流配置详情
response = client.ci_get_workflow(
Bucket=bucket_name, # 桶名称
Ids='wd34ca394909xxxxxxxxxxxx4d', # 需要查询的工作流ID,支持传入多个,以","分隔
Name='image-inspect', # 需要查询的工作流名称
# PageNumber='6', # 分页查询使用,第几页
# PageSize='3', # 分页查询使用,每页个数
ContentType='application/xml'
)
print(response)
return response
def ci_delete_workflow():
# 删除指定的工作流
response = client.ci_delete_workflow(
Bucket=bucket_name, # 桶名称
WorkflowId='wd34ca39490904xxxxxxxxxx744d', # 需要删除的工作流ID
)
print(response)
return response
def ci_create_image_inspect_jobs():
# 创建异常图片检测任务
body = {
# 待操作的对象信息
'Input': {
# 输入文件路径
'Object': 'heichan.png'
},
# 任务类型,固定值 ImageInspect
'Tag': 'ImageInspect',
# 操作规则
# 非必选
'Operation': {
# 异常图片检测配置
# 非必选
'ImageInspect': {
# 是否开启检测到异常图片检测后自动对图片进行处理的动作,false/true,非必选,默认false
'AutoProcess': 'true',
# 在检测到为异常图片后的处理动作,有效值为:
# BackupObject:移动图片到固定目录下,目录名为abnormal_images_backup/,由后台自动创建
# SwitchObjectToPrivate:将图片权限设置为私有
# DeleteObject:删除图片
# 非必选参数,默认值为BackupObject
'ProcessType': 'SwitchObjectToPrivate'
},
# 非必选
"UserData": "This is my data",
},
# 非必选 回调URL
# 'CallBack': 'http://callback.demo.com',
# 非必选 回调信息格式 支持JSON/XML
# 'CallBackFormat': 'JSON'
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_image_inspect_batch_jobs():
# 创建异常图片检测批量处理任务
body = {
# 批量任务名称
# 存量触发任务名称,支持中文、英文、数字、—和_,长度限制128字符
# 必选
'Name': 'image-inspect-auto-move-batch-process',
# 批量处理任务类型:
# 支持 Job(独立异步任务) Workflow(工作流)
'Type': 'Job',
# 待操作的对象信息
# 必传参数
'Input': {
# 'UrlFile': 'https://ziroom-tech-1255976291.cos.ap-beijing.myqcloud.com/ci_ziroom-tech_scankey_url.txt',
# Object 前缀
'Prefix': '/',
},
# 操作规则
# 必传参数
'Operation': {
# 按对象的last modify时间过滤的触发范围,若传入此参数,则当前任务仅处理指定事件范围内的对象
# 非必传
# 创建的任务类型,此处固定值为ImageInspect
# 必传参数
'Tag': 'ImageInspect',
# 任务参数集合
# 必传参数
'JobParam': {
# 异常图片检测配置
# 非必选
'ImageInspect': {
# 是否开启检测到异常图片检测后自动对图片进行处理的动作,false/true,非必选,默认false
'AutoProcess': 'true',
# 在检测到为异常图片后的处理动作,有效值为:
# BackupObject:移动图片到固定目录下,目录名为abnormal_images_backup/,由后台自动创建
# SwitchObjectToPrivate:将图片权限设置为私有
# DeleteObject:删除图片
# 非必选参数,默认值为BackupObject
'ProcessType': 'BackupObject'
},
},
},
}
response = client.ci_create_inventory_trigger_jobs(
Bucket=bucket_name,
JobBody=body,
ContentType='application/xml'
)
print(response)
return response
def ci_create_image_inspect_workflow_batch_jobs():
# 创建异常图片检测工作流批量处理任务
body = {
# 批量任务名称
# 存量触发任务名称,支持中文、英文、数字、—和_,长度限制128字符
# 必选
'Name': 'image-inspect-auto-move-batch-process',
# 批量处理任务类型:
# 支持 Job(独立异步任务) Workflow(工作流)
'Type': 'Workflow',
# 待操作的对象信息
# 必传参数
'Input': {
# 'UrlFile': 'https://ziroom-tech-1255976291.cos.ap-beijing.myqcloud.com/ci_ziroom-tech_scankey_url.txt',
# Object 前缀
'Prefix': '/',
},
# 操作规则
# 必传参数
'Operation': {
# 按对象的last modify时间过滤的触发范围,若传入此参数,则当前任务仅处理指定事件范围内的对象
# 非必传
'TimeInterval': {
'Start': '2023-05-01T00:00:00+0800',
'End': '2023-06-01T00:00:00+0800'
},
# 创建的任务类型,此处固定值为ImageInspect
# 必传参数
'WorkflowIds': 'w2504f47ad46exxxxxxxxxxxxxx',
},
}
response = client.ci_create_inventory_trigger_jobs(
Bucket=bucket_name,
JobBody=body,
ContentType='application/xml'
)
print(response)
return response
def ci_list_inventory_trigger_jobs():
# 查询批量处理任务列表
response = client.ci_list_inventory_trigger_jobs(
Bucket=bucket_name, # 桶名称
)
print(response)
return response
def ci_get_inventory_trigger_jobs():
# 查询指定批量处理任务
response = client.ci_get_inventory_trigger_jobs(
Bucket=bucket_name, # 桶名称
JobID='bb16331089f7c11ecb10252540019ee59',
)
print(response)
return response
def ci_delete_inventory_trigger_jobs():
# 删除指定的批量处理任务
response = client.ci_delete_inventory_trigger_jobs(
Bucket=bucket_name, # 桶名称
JobId='b97c37f492adf11xxxxxxxxxxxx', # 需要删除的工作流ID
)
print(response)
return response
def ci_create_sound_hound_jobs():
# 创建听歌识曲任务
body = {
# 待操作的对象信息
'Input': {
# 输入文件路径
'Object': '1.mp3'
},
# 任务类型,固定值 SoundHound
'Tag': 'SoundHound',
# 操作规则
# 非必选
'Operation': {
# 非必选 透传用户信息, 可打印的 ASCII 码, 长度不超过1024
"UserData": "This is my data",
},
# 非必选 回调URL
# 'CallBack': 'http://callback.demo.com',
# 非必选 回调信息格式 支持JSON/XML
# 'CallBackFormat': 'JSON'
# 非必选 任务回调类型,Url 或 TDMQ,默认 Url,优先级高于队列的回调类型
# 'CallBackType': 'Url',
# 任务回调TDMQ配置,当 CallBackType 为 TDMQ 时必填
# 'CallBackMqConfig': {
# # TDMQ回调信息配置
#
# # 消息队列所属园区
# # 必选。目前支持园区 sh(上海)、bj(北京)、gz(广州)、cd(成都)、hk(中国香港)
# 'MqRegion': 'bj',
# # 消息队列使用模式
# # 必选。主题订阅:Topic 队列服务: Queue
# 'MqMode': 'Queue',
# # TDMQ 主题名称 必选。
# 'MqName': 'queueName'
# },
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
ContentType='application/xml'
)
print(response)
return response
def ci_create_noise_reduction_jobs():
# 创建音频降噪任务
body = {
'Input': {
'Object': 'demo.mp3'
},
'Tag': 'NoiseReduction',
'Operation': {
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'noise_reduction_result.mp3',
},
# 'TemplateId': 't02db40900dc1c43ad9bdbd8acec6075c5'
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_stream_extract_jobs():
# 创建流分离任务
body = {
'Input': {
'Object': 'demo.mp4'
},
'Tag': 'StreamExtract',
'Operation': {
'Output': {
'Bucket': bucket_name,
'Region': region,
'StreamExtract': [
{
'Index': '0',
'Object': 'stream-1.mp4'
},
{
'Index': '1',
'Object': 'stream-2.mp4'
}
]
},
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_tts_jobs():
# 创建语音合成任务
body = {
'Tag': 'Tts',
'Input': {
'Object': 'demo.mp4'
},
'Operation': {
'TtsTpl': {
# 处理模式,Asyc(异步合成)、Sync(同步合成)
# 当选择 Asyc 时,codec 只支持 pcm
# 默认值 Asyc
'Mode': '',
# 音频格式,wav、mp3、pcm
# 默认值 wav(同步)/pcm(异步)
'Codec': '',
# 音色
# 默认值 ruxue
'VoiceType': '',
# 音量
# 取值范围:[-10,10]
# 默认值0
'Volume': '',
# 语速
# 取值范围:[50,200]
# 默认值100
'Speed': '',
},
'TemplateId': '',
'TtsConfig': {
'InputType': 'Text',
'Input': '床前明月光,疑是地上霜',
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'result.mp3'
},
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_translation_jobs():
# 创建翻译任务
body = {
'Tag': 'Translation',
'Input': {
'Object': 'demo.txt',
'Lang': 'en',
'Type': 'txt',
# 'BasicType': ''
},
'Operation': {
'Translation': {
'Lang': 'zh',
'Type': 'txt',
},
'Output': {
'Bucket': bucket_name,
'Region': region,
'Object': 'result.txt'
},
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_create_words_generalize_jobs():
# 创建分词任务
body = {
'Tag': 'WordsGeneralize',
'Input': {
'Object': 'demo.txt',
},
'Operation': {
'WordsGeneralize': {
'NerMethod': 'DL',
'SegMethod': 'MIX',
},
}
}
response = client.ci_create_media_jobs(
Bucket=bucket_name,
Jobs=body,
Lst={},
ContentType='application/xml'
)
print(response)
return response
def ci_get_presigned_download_url():
param = {
# 想要处理的文件路径
# 支持cos对象的绝对路径
# 必选参数
"object": "test1026.gif",
# 转码后的视频流格式
# 当前仅支持mp4
# 必选参数
"format": "mp4",
# 转码后视频的宽
# 取值范围:(0,4096]。默认为0
# 非必传参数
# "width": "200",
# 转码后视频的高
# 取值范围:(0,4096]。默认为0
# 非必传参数
# 当 width 和 height 都为0时,表示使用视频的宽高。如果单个为0,则以另外一个值按视频宽高比例自动适应
# "heigth": "200"
}
url = client.get_presigned_download_url(
Bucket=bucket_name, # 存储桶名称
Key="/convert", # 请求uri 同步转码固定为/convert
Expired=3600, # 预签名超时时间
Params=param, # 请求处理参数
UseCiEndPoint=True, # 是否使用数据万象的请求域名
)
if token is not None:
url = url + "&x-cos-security-token=" + token
print(url)
if __name__ == "__main__":
# ci_get_media_queue()
# ci_get_media_jobs()
# ci_create_media_transcode_jobs()
# get_media_info()
# get_snapshot()
# ci_trigger_workflow()
# ci_list_workflowexecution()
# ci_get_workflowexecution()
# ci_get_media_bucket()
# get_pm3u8()
# ci_create_media_snapshot_jobs()
# ci_create_media_animation_jobs()
# ci_create_media_smart_cover_jobs()
# ci_create_media_video_process_jobs()
# ci_create_media_video_montage_jobs()
# ci_create_media_voice_separate_jobs()
# ci_create_media_sdr2hdr_jobs()
# ci_create_media_super_resolution_jobs()
# ci_create_media_concat_jobs()
# ci_create_media_digital_watermark_jobs()
# ci_create_media_extract_digital_watermark_jobs()
# ci_create_media_video_tag_jobs()
# ci_create_media_segment_jobs()
# ci_create_multi_jobs()
# ci_create_media_pic_jobs()
# ci_get_media_pic_jobs()
# ci_create_get_media_info_jobs()
# ci_put_media_queue()
# ci_create_media_transcode_with_watermark_jobs()
# ci_create_media_transcode_with_digital_watermark_jobs()
# ci_create_media_hls_transcode_jobs()
# ci_list_media_transcode_jobs()
# ci_list_media_pic_jobs()
# ci_get_media_pic_queue()
# ci_put_media_pic_queue()
# ci_create_quality_estimate_jobs()
# ci_create_segment_video_body_jobs()
# ci_create_and_get_live_recognition_jobs()
# ci_cancel_jobs()
# ci_create_workflow_image_inspect()
# ci_get_workflow()
# ci_update_workflow()
# ci_update_workflow_state()
# ci_delete_workflow()
# ci_create_image_inspect_jobs()
# ci_create_sound_hound_jobs()
# ci_list_inventory_trigger_jobs()
# ci_get_pic_bucket()
# ci_get_inventory_trigger_jobs()
# ci_get_ai_bucket()
# ci_get_ai_queue()
# ci_put_ai_queue()
# ci_create_noise_reduction_jobs()
# ci_create_stream_extract_jobs()
# ci_create_tts_jobs()
# ci_create_translation_jobs()
# ci_create_words_generalize_jobs()
ci_get_presigned_download_url()
| tencentyun/cos-python-sdk-v5 | demo/ci_media.py | ci_media.py | py | 66,969 | python | zh | code | 173 | github-code | 13 |
1346899171 | from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from pyhealth.datasets import SampleEHRDataset
from pyhealth.models import BaseModel
# VALID_OPERATION_LEVEL = ["visit", "event"]
class RETAINLayer(nn.Module):
"""RETAIN layer.
Paper: Edward Choi et al. RETAIN: An Interpretable Predictive Model for
Healthcare using Reverse Time Attention Mechanism. NIPS 2016.
This layer is used in the RETAIN model. But it can also be used as a
standalone layer.
Args:
feature_size: the hidden feature size.
dropout: dropout rate. Default is 0.5.
Examples:
>>> from pyhealth.models import RETAINLayer
>>> input = torch.randn(3, 128, 64) # [batch size, sequence len, feature_size]
>>> layer = RETAINLayer(64)
>>> c = layer(input)
>>> c.shape
torch.Size([3, 64])
"""
def __init__(
self,
feature_size: int,
dropout: float = 0.5,
):
super(RETAINLayer, self).__init__()
self.feature_size = feature_size
self.dropout = dropout
self.dropout_layer = nn.Dropout(p=self.dropout)
self.alpha_gru = nn.GRU(feature_size, feature_size, batch_first=True)
self.beta_gru = nn.GRU(feature_size, feature_size, batch_first=True)
self.alpha_li = nn.Linear(feature_size, 1)
self.beta_li = nn.Linear(feature_size, feature_size)
@staticmethod
def reverse_x(input, lengths):
"""Reverses the input."""
reversed_input = input.new(input.size())
for i, length in enumerate(lengths):
reversed_input[i, :length] = input[i, :length].flip(dims=[0])
return reversed_input
def compute_alpha(self, rx, lengths):
"""Computes alpha attention."""
rx = rnn_utils.pack_padded_sequence(
rx, lengths, batch_first=True, enforce_sorted=False
)
g, _ = self.alpha_gru(rx)
g, _ = rnn_utils.pad_packed_sequence(g, batch_first=True)
attn_alpha = torch.softmax(self.alpha_li(g), dim=1)
return attn_alpha
def compute_beta(self, rx, lengths):
"""Computes beta attention."""
rx = rnn_utils.pack_padded_sequence(
rx, lengths, batch_first=True, enforce_sorted=False
)
h, _ = self.beta_gru(rx)
h, _ = rnn_utils.pad_packed_sequence(h, batch_first=True)
attn_beta = torch.tanh(self.beta_li(h))
return attn_beta
def forward(
self,
x: torch.tensor,
mask: Optional[torch.tensor] = None,
) -> Tuple[torch.tensor, torch.tensor]:
"""Forward propagation.
Args:
x: a tensor of shape [batch size, sequence len, feature_size].
mask: an optional tensor of shape [batch size, sequence len], where
1 indicates valid and 0 indicates invalid.
Returns:
c: a tensor of shape [batch size, feature_size] representing the
context vector.
"""
# rnn will only apply dropout between layers
x = self.dropout_layer(x)
batch_size = x.size(0)
if mask is None:
lengths = torch.full(
size=(batch_size,), fill_value=x.size(1), dtype=torch.int64
)
else:
lengths = torch.sum(mask.int(), dim=-1).cpu()
rx = self.reverse_x(x, lengths)
attn_alpha = self.compute_alpha(rx, lengths)
attn_beta = self.compute_beta(rx, lengths)
c = attn_alpha * attn_beta * x # (patient, sequence len, feature_size)
c = torch.sum(c, dim=1) # (patient, feature_size)
return c
class RETAIN(BaseModel):
"""RETAIN model.
Paper: Edward Choi et al. RETAIN: An Interpretable Predictive Model for
Healthcare using Reverse Time Attention Mechanism. NIPS 2016.
Note:
We use separate Retain layers for different feature_keys.
Currentluy, we automatically support different input formats:
- code based input (need to use the embedding table later)
- float/int based value input
We follow the current convention for the Retain model:
- case 1. [code1, code2, code3, ...]
- we will assume the code follows the order; our model will encode
each code into a vector and apply Retain on the code level
- case 2. [[code1, code2]] or [[code1, code2], [code3, code4, code5], ...]
- we will assume the inner bracket follows the order; our model first
use the embedding table to encode each code into a vector and then use
average/mean pooling to get one vector for one inner bracket; then use
Retain one the braket level
- case 3. [[1.5, 2.0, 0.0]] or [[1.5, 2.0, 0.0], [8, 1.2, 4.5], ...]
- this case only makes sense when each inner bracket has the same length;
we assume each dimension has the same meaning; we run Retain directly
on the inner bracket level, similar to case 1 after embedding table
- case 4. [[[1.5, 2.0, 0.0]]] or [[[1.5, 2.0, 0.0], [8, 1.2, 4.5]], ...]
- this case only makes sense when each inner bracket has the same length;
we assume each dimension has the same meaning; we run Retain directly
on the inner bracket level, similar to case 2 after embedding table
Args:
dataset: the dataset to train the model. It is used to query certain
information such as the set of all tokens.
feature_keys: list of keys in samples to use as features,
e.g. ["conditions", "procedures"].
label_key: key in samples to use as label (e.g., "drugs").
mode: one of "binary", "multiclass", or "multilabel".
embedding_dim: the embedding dimension. Default is 128.
**kwargs: other parameters for the RETAIN layer.
Examples:
>>> from pyhealth.datasets import SampleEHRDataset
>>> samples = [
... {
... "patient_id": "patient-0",
... "visit_id": "visit-0",
... "list_codes": ["505800458", "50580045810", "50580045811"], # NDC
... "list_vectors": [[1.0, 2.55, 3.4], [4.1, 5.5, 6.0]],
... "list_list_codes": [["A05B", "A05C", "A06A"], ["A11D", "A11E"]], # ATC-4
... "list_list_vectors": [
... [[1.8, 2.25, 3.41], [4.50, 5.9, 6.0]],
... [[7.7, 8.5, 9.4]],
... ],
... "label": 1,
... },
... {
... "patient_id": "patient-0",
... "visit_id": "visit-1",
... "list_codes": [
... "55154191800",
... "551541928",
... "55154192800",
... "705182798",
... "70518279800",
... ],
... "list_vectors": [[1.4, 3.2, 3.5], [4.1, 5.9, 1.7], [4.5, 5.9, 1.7]],
... "list_list_codes": [["A04A", "B035", "C129"]],
... "list_list_vectors": [
... [[1.0, 2.8, 3.3], [4.9, 5.0, 6.6], [7.7, 8.4, 1.3], [7.7, 8.4, 1.3]],
... ],
... "label": 0,
... },
... ]
>>> dataset = SampleEHRDataset(samples=samples, dataset_name="test")
>>>
>>> from pyhealth.models import RETAIN
>>> model = RETAIN(
... dataset=dataset,
... feature_keys=[
... "list_codes",
... "list_vectors",
... "list_list_codes",
... "list_list_vectors",
... ],
... label_key="label",
... mode="binary",
... )
>>>
>>> from pyhealth.datasets import get_dataloader
>>> train_loader = get_dataloader(dataset, batch_size=2, shuffle=True)
>>> data_batch = next(iter(train_loader))
>>>
>>> ret = model(**data_batch)
>>> print(ret)
{
'loss': tensor(0.5640, grad_fn=<BinaryCrossEntropyWithLogitsBackward0>),
'y_prob': tensor([[0.5325],
[0.3922]], grad_fn=<SigmoidBackward0>),
'y_true': tensor([[1.],
[0.]]),
'logit': tensor([[ 0.1303],
[-0.4382]], grad_fn=<AddmmBackward0>)
}
>>>
"""
def __init__(
self,
dataset: SampleEHRDataset,
feature_keys: List[str],
label_key: str,
mode: str,
pretrained_emb: str = None,
embedding_dim: int = 128,
**kwargs,
):
super(RETAIN, self).__init__(
dataset=dataset,
feature_keys=feature_keys,
label_key=label_key,
mode=mode,
pretrained_emb=pretrained_emb,
)
self.embedding_dim = embedding_dim
# validate kwargs for RETAIN layer
if "feature_size" in kwargs:
raise ValueError("feature_size is determined by embedding_dim")
# the key of self.feat_tokenizers only contains the code based inputs
self.feat_tokenizers = {}
self.label_tokenizer = self.get_label_tokenizer()
# the key of self.embeddings only contains the code based inputs
self.embeddings = nn.ModuleDict()
# the key of self.linear_layers only contains the float/int based inputs
self.linear_layers = nn.ModuleDict()
# add feature RETAIN layers
for feature_key in self.feature_keys:
input_info = self.dataset.input_info[feature_key]
# sanity check
if input_info["type"] not in [str, float, int]:
raise ValueError(
"RETAIN only supports str code, float and int as input types"
)
elif (input_info["type"] == str) and (input_info["dim"] not in [2, 3]):
raise ValueError(
"RETAIN only supports 2-dim or 3-dim str code as input types"
)
elif (input_info["type"] in [float, int]) and (
input_info["dim"] not in [2, 3]
):
raise ValueError(
"RETAIN only supports 2-dim or 3-dim float and int as input types"
)
# for code based input, we need Type
# for float/int based input, we need Type, input_dim
self.add_feature_transform_layer(feature_key, input_info)
self.retain = nn.ModuleDict()
for feature_key in feature_keys:
self.retain[feature_key] = RETAINLayer(feature_size=embedding_dim, **kwargs)
output_size = self.get_output_size(self.label_tokenizer)
self.fc = nn.Linear(len(self.feature_keys) * self.embedding_dim, output_size)
def forward(self, **kwargs) -> Dict[str, torch.Tensor]:
"""Forward propagation.
The label `kwargs[self.label_key]` is a list of labels for each patient.
Args:
**kwargs: keyword arguments for the model. The keys must contain
all the feature keys and the label key.
Returns:
A dictionary with the following keys:
loss: a scalar tensor representing the loss.
y_prob: a tensor representing the predicted probabilities.
y_true: a tensor representing the true labels.
"""
patient_emb = []
for feature_key in self.feature_keys:
input_info = self.dataset.input_info[feature_key]
dim_, type_ = input_info["dim"], input_info["type"]
# for case 1: [code1, code2, code3, ...]
if (dim_ == 2) and (type_ == str):
x = self.feat_tokenizers[feature_key].batch_encode_2d(
kwargs[feature_key]
)
# (patient, event)
x = torch.tensor(x, dtype=torch.long, device=self.device)
# (patient, event, embedding_dim)
x = self.embeddings[feature_key](x)
# (patient, event)
mask = torch.sum(x, dim=2) != 0
# for case 2: [[code1, code2], [code3, ...], ...]
elif (dim_ == 3) and (type_ == str):
x = self.feat_tokenizers[feature_key].batch_encode_3d(
kwargs[feature_key]
)
# (patient, visit, event)
x = torch.tensor(x, dtype=torch.long, device=self.device)
# (patient, visit, event, embedding_dim)
x = self.embeddings[feature_key](x)
# (patient, visit, embedding_dim)
x = torch.sum(x, dim=2)
# (patient, visit)
mask = torch.sum(x, dim=2) != 0
# for case 3: [[1.5, 2.0, 0.0], ...]
elif (dim_ == 2) and (type_ in [float, int]):
x, mask = self.padding2d(kwargs[feature_key])
# (patient, event, values)
x = torch.tensor(x, dtype=torch.float, device=self.device)
# (patient, event, embedding_dim)
x = self.linear_layers[feature_key](x)
# (patient, event)
mask = mask.bool().to(self.device)
# for case 4: [[[1.5, 2.0, 0.0], [1.8, 2.4, 6.0]], ...]
elif (dim_ == 3) and (type_ in [float, int]):
x, mask = self.padding3d(kwargs[feature_key])
# (patient, visit, event, values)
x = torch.tensor(x, dtype=torch.float, device=self.device)
# (patient, visit, embedding_dim)
x = torch.sum(x, dim=2)
x = self.linear_layers[feature_key](x)
# (patient, event)
mask = mask[:, :, 0]
mask = mask.bool().to(self.device)
else:
raise NotImplementedError
# transform x to (patient, event, embedding_dim)
if self.pretrained_emb != None:
x = self.linear_layers[feature_key](x)
x = self.retain[feature_key](x, mask)
patient_emb.append(x)
patient_emb = torch.cat(patient_emb, dim=1)
# (patient, label_size)
logits = self.fc(patient_emb)
# obtain y_true, loss, y_prob
y_true = self.prepare_labels(kwargs[self.label_key], self.label_tokenizer)
loss = self.get_loss_function()(logits, y_true)
y_prob = self.prepare_y_prob(logits)
results = {
"loss": loss,
"y_prob": y_prob,
"y_true": y_true,
"logit": logits,
}
if kwargs.get("embed", False):
results["embed"] = patient_emb
return results
if __name__ == "__main__":
from pyhealth.datasets import SampleEHRDataset
samples = [
{
"patient_id": "patient-0",
"visit_id": "visit-0",
# "single_vector": [1, 2, 3],
"list_codes": ["505800458", "50580045810", "50580045811"], # NDC
"list_vectors": [[1.0, 2.55, 3.4], [4.1, 5.5, 6.0]],
"list_list_codes": [["A05B", "A05C", "A06A"], ["A11D", "A11E"]], # ATC-4
"list_list_vectors": [
[[1.8, 2.25, 3.41], [4.50, 5.9, 6.0]],
[[7.7, 8.5, 9.4]],
],
"label": 1,
},
{
"patient_id": "patient-0",
"visit_id": "visit-1",
# "single_vector": [1, 5, 8],
"list_codes": [
"55154191800",
"551541928",
"55154192800",
"705182798",
"70518279800",
],
"list_vectors": [[1.4, 3.2, 3.5], [4.1, 5.9, 1.7], [4.5, 5.9, 1.7]],
"list_list_codes": [["A04A", "B035", "C129"]],
"list_list_vectors": [
[[1.0, 2.8, 3.3], [4.9, 5.0, 6.6], [7.7, 8.4, 1.3], [7.7, 8.4, 1.3]],
],
"label": 0,
},
]
# dataset
dataset = SampleEHRDataset(samples=samples, dataset_name="test")
# data loader
from pyhealth.datasets import get_dataloader
train_loader = get_dataloader(dataset, batch_size=2, shuffle=True)
# model
model = RETAIN(
dataset=dataset,
feature_keys=[
"list_codes",
"list_vectors",
"list_list_codes",
# "list_list_vectors",
],
label_key="label",
mode="binary",
)
# data batch
data_batch = next(iter(train_loader))
# try the model
ret = model(**data_batch)
print(ret)
# try loss backward
ret["loss"].backward()
| sunlabuiuc/PyHealth | pyhealth/models/retain.py | retain.py | py | 17,019 | python | en | code | 778 | github-code | 13 |
16852688736 | # Class and Object Basics
'''
Python is an object oriented programming language.
Almost everything in Python is an object, with its properties and methods.
'''
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# class
'''
A Class is like an object constructor, or a "blueprint" for creating objects.
'''
class MyClass:
x=10
# class named MyClass, with a property named x
obj1 = MyClass() # creating object for class
print("class property:",obj1.x)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# __init__() :
'''
__init__() is a Build-in function in python.
All class calls __init__(),which is always executed when the class is being initiated.
__init__() is assign defualt values to the object properties.
Note: The __init__() function is called automatically every time the class is being used to create a new object.
'''
print("\n __int__() basics:")
class Person:
def __init__(self,name,age):
self.name = name
self.age = age
p1 = Person("aswanth",12)
print(p1.name)
print(p1.age)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# __str__()
'''
to make object representation into string.
'''
print("\n __str__() basics:")
class Person2:
def __init__(self,name,age):
self.name = name
self.age = age
def __str__(self):
return f"name: {self.name} age:{self.age}"
p1 = Person2("Ajay",23)
print(p1)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Object Methods
print("\n Object Methods")
class Person2:
def __init__(self,name,age):
self.name = name
self.age = age
def myAge(self):
print("my age is : "+ str(self.age))
p1 = Person2("aswanth",12)
p1.myAge()
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# deleting object property
print("\n delete Object and object property.")
class Person3:
def __init__(self, name, age):
self.name = name
self.age = age
p3 = Person3("aswanht",21)
del p3 # DELETING obj
# print(p3)
del p3.age # object property.
print(p3.age)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# pass
class Person:
pass
# having an empty class definition like this, would raise an error without the pass statement
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | Aswanthcp/python-Basics | classBasics.py | classBasics.py | py | 2,657 | python | en | code | 0 | github-code | 13 |
24237793306 | from API.LL_API import LL_API
import datetime
from UI.airplaneUI import AirplaneUI
from UI.crewUI import CrewUI
from UI.EditMenus.extra_crewmember_menu import AddExtraCrewmemberMenu
import datetime
class VoyageUI:
EMPTY = 'empty'
SEPERATOR = '-'
def getDateInput(self):
'''Gets a date input from the user and returns a datetime object'''
year_str = input('Year: ').strip()
month_str = input('Month: ').strip()
day_str = input('Day: ').strip()
# check if date is valid
year_int,month_int,day_int = LL_API().verifyDate(year_str,month_str,day_str)
# turn date input into datetime object
datetime_input = datetime.datetime(year_int,month_int,day_int,0,0,0)
return datetime_input
def getDateWithTime(self):
'''Gets a date and time input from the user and returns a datetime object or a string
if the user wants to quit'''
while True:
print('Enter departure time\n')
year_str = input('Year: ').strip()
month_str = input('Month: ').strip()
day_str = input('Day: ').strip()
# Check if date is valid
year_int, month_int, day_int = LL_API().verifyDate(year_str, month_str, day_str)
hour_str = input('Hour: ').strip()
minutes_str = input('Minute: ').strip()
print()
# check if time is valid
hour_int, minutes_int = LL_API().verifyTime(hour_str, minutes_str)
# get date and time now
time_now = datetime.datetime.now()
year_now = time_now.year
month_now = time_now.month
day_now = time_now.day
hour_now = time_now.hour
minutes_now = time_now.minute
# check if date has passed
if year_now <= year_int:
# if it is the same year but input months are in the future
if year_now == year_int\
and month_now <= month_int \
and day_now <= day_int:
if day_now == day_int and month_now == month_int and year_int == year_now:
if hour_now <= hour_int:
if hour_now == hour_int:
if minutes_now <= minutes_int:
return datetime.datetime(year_int, month_int, day_int, hour_int, minutes_int, 0)
else:
print('\nDate has already passed!\n')
else:
return datetime.datetime(year_int, month_int, day_int, hour_int, minutes_int, 0)
else:
print('\nDate has already passed!\n')
else:
return datetime.datetime(year_int, month_int, day_int, hour_int, minutes_int, 0)
# if input is earlier this year
elif year_now == year_int\
and month_now > month_int:
print('\nDate has already passed!\n')
else:
# if year_int is bigger than year now
return datetime.datetime(year_int, month_int, day_int, hour_int, minutes_int, 0)
else:
print('\nDate has already passed!\n')
def seperateDatetimeString(self, datetime_str):
'''Seperates a datetime string and returns the split parts'''
# date time
return datetime_str[:10],datetime_str[-8:]
def prettyprint(self,voyage,voyage_staffed,aircraft_ID,voyage_duration_hrs,\
flight_no_out, flight_no_home, voyage_duration_min, voyage_state):
'''Prints out a voyage'''
print('\n'+'-'*50)
# destination
print('To {}, {} on {} at {}'.format(voyage.getDestination().getDestinationName(),\
voyage.getDestination().getDestinationAirport(),\
voyage.getDepartureTime()[:10] ,voyage.getDepartureTime()[-8:-3]))
print('-'*50)
# status
print('\t Status: {}'.format(voyage_state))
print('\t Flight numbers: {} - {}'.format(flight_no_out, flight_no_home))
# if aircraft is assigned
if aircraft_ID != 'EMPTY' and aircraft_ID != 'empty':
airplane = LL_API().getAirplanebyInsignia(aircraft_ID)
aircraft_type = airplane.get_planeTypeID()
total_seats = airplane.get_planeCapacity()
sold_seats_out,sold_seats_home = voyage.getSeatsSold()
print('\t Seats sold on flight {}: {}/{}'.format(flight_no_out,\
sold_seats_out,total_seats))
print('\t Seats sold on flight {}: {}/{}'.format(flight_no_home,\
sold_seats_home,total_seats))
print('\t Aircraft: {}, type {}'.format(aircraft_ID,aircraft_type))
elif aircraft_ID == 'EMPTY' or aircraft_ID == 'empty':
print('\t Aircraft: No aircraft assigned to voyage')
print('\t Total time: {} hrs {} min'.format(voyage_duration_hrs,\
voyage_duration_min))
print('\t Status on staff: {}'.format(voyage_staffed))
print('\t Voyage ID: {}'.format(voyage.getVoyageID()))
print('-'*50)
def checkVoyagesInRange(self):
'''Checks if the voyages in a date range are completed.
Returns None if they're all completed or if there are no voyages on the dates'''
# Gets a tuple with a list of all voyages
# in a date range and a list of all completed
# voyages in a date range
voyages_tuple = self.showAllVoyagesInRange()
if voyages_tuple:
voyages_on_date, completed_voyages_in_range = voyages_tuple
# All voyages are completed if the lists are equally long
if len(completed_voyages_in_range) < len(voyages_on_date):
voyage = self.checkCompleted()
return voyage
else:
print('-'*45+'\n')
print('{:^45}'.format('All voyages in range are completed'))
print('{:^45}'.format('only possible to change seats'))
print('\n'+'-'*45)
return None
else:
print('-'*45+'\n')
print('No voyages on these dates.')
print('\n'+'-'*45)
return None
def checkCompleted(self):
'''Gets an voyage id as an input and checks if the voyage is completed'''
while True:
voyage_id = input("Enter voyage ID to select: ").strip()
# class instance of voyage with inputted ID
voyage = LL_API().getOneVoyage(voyage_id)
if voyage:
# get status of voyage (completed, not departed, in air)
voyage_state = LL_API().get_status_of_voyage(voyage)
if voyage_state == 'Completed':
print('-'*45+'\n')
print('{:^45}'.format('Voyage is completed'))
print('\n'+'-'*45)
return voyage
else:
return voyage
else:
print('\nNo voyage with this ID\n')
def changeSoldSeats(self,voyage,a_str):
'''Changes sold seats on a given flight route in a voyage'''
while True:
print('\nEnter number of seats sold')
print('m - to go back\n')
new_seats_str = input('Enter your input: ').strip()
if new_seats_str == 'm':
return
elif new_seats_str.isdigit():
LL_API().changeSoldSeats(voyage,a_str,new_seats_str)
print('-'*45+'\n')
print('{:^45}'.format('Number of sold seats successfully changed!'))
print('\n'+'-'*45)
return
else:
print('\nInvalid input!')
def checkRank(self, crew_member,voyage,airplane_type_on_voyage):
'''Checks if rank of a crewmember, exeption raised if crewmember does not
have the right rank, role or licence for the voyage posotion'''
success = True
try:
self.addCrewMember(crew_member,voyage,airplane_type_on_voyage)
# exception if pilot does not have License for assigned airplane
except Exception as e:
success = False
print(e)
input('Press any key to try continue editing voyage ').strip()
if success:
position = CrewUI().checkRank(crew_member)
if position == 'Pilot':
position = 'Copilot'
print('\n'+'~'*45)
a_str = '{} - {}, {},'.format(crew_member.getName(),crew_member.getRole(),position)
b_str = 'was added to voyage {}'.format(voyage.getVoyageID())
print('{:^45}'.format(a_str))
print('{:^45}'.format(b_str))
print('~'*45+'\n')
def addCrewMember(self, crew_member, voyage,airplane_type_on_voyage):
'''Takes in crew member instance, voyage instance and airplane type.
Adds crew member to voyage'''
# get role of crew member, where role is pilot or cabincrew
role = crew_member.getRole()
if role == 'Pilot':
# if crew member is captain
if crew_member.getBool():
# if there is no captain assigned to voyage
if voyage.getCaptain() == 'empty':
# check if crew member is working on another voyage that day
if AddExtraCrewmemberMenu().checkIfCrewmemberWorking(voyage,crew_member):
a_str = '\nCaptain is assigned to another voyage on the same date\n\
Please chose another captain\n'
raise Exception(a_str)
voyage.setCaptain(crew_member,airplane_type_on_voyage)
# if there is already a captain
else:
raise Exception('A captain is already assigned to the voyage\n')
# if crewmember is copilot
else:
# if there is no assigned copilot
if voyage.getCopilot() == 'empty':
# check if pilot is working on the same day
if AddExtraCrewmemberMenu().checkIfCrewmemberWorking(voyage,crew_member):
a_str = 'pilot is assigned to another voyage on the same date\n\
Please chose another pilot\n'
raise Exception(a_str)
voyage.setCopilot(crew_member,airplane_type_on_voyage)
# if there is already a copilot
else:
raise Exception('A copilot is already assigned to the voyage\n')
elif role == 'Cabincrew':
# if crew_member.getBool():
if voyage.getHeadFlightAtt() == 'empty':
if AddExtraCrewmemberMenu().checkIfCrewmemberWorking(voyage,crew_member):
a_str = 'pilot is assigned to another voyage on the same date\n\
Please chose another pilot\n'
raise Exception(a_str)
voyage.setHeadFlightAtt(crew_member)
else:
raise Exception('\nA head flight attendant is already assigned to voyage\n')
# elif crew_member.getBool() == False:
# raise Exception('You must add a Head Flight Attendant first\n')
def addCrewToVoyage(self,voyage):
'''Adds crew to a voyage'''
airplane = LL_API().getAirplanebyInsignia(voyage.getAircraftID())
airplane_type_on_voyage = airplane.get_planeTypeID()
crew_on_voyage_list = voyage.getCrewOnVoyage()
if 'empty' in crew_on_voyage_list[0:3]:
print()
CrewUI().showQualifiedCrew(voyage.getDepartureTime(), voyage.getAircraftID())
print('You must add 1 captain and 1 copilot with license for {} and 1 head flight attendant'\
.format(airplane_type_on_voyage))
print(95*'-')
print()
while 'empty' in crew_on_voyage_list[0:3]:
# Captain, copilot and head flight attendant must be added added at the same time
# Voyage must have captain, copilot and head flight attendant
crew_member = CrewUI().queryShowNotWorkingCrew()
if crew_member:
self.checkRank(crew_member,voyage,airplane_type_on_voyage)
crew_on_voyage_list = voyage.getCrewOnVoyage()
else:
break
if crew_member:
LL_API().change_voyage(voyage)
print('~'*70)
print('A captain, pilot and head flight attendant have been added to voyage {}\n'\
.format(voyage.getVoyageID()))
print('~'*70)
AddExtraCrewmemberMenu().startAddExtraCrewMenu(voyage,crew_on_voyage_list)
else:
return
elif 'empty' in crew_on_voyage_list:
# If captain, copilot and head flight attendant are assigned to voyage the
# option to add extra flight attendant is presented
print()
AddExtraCrewmemberMenu().startAddExtraCrewMenu(voyage,crew_on_voyage_list)
else:
print('\nVoyage is fully staffed!\n')
# If voyage is fully staffed no more crewmembers can be added
return
def getStatusOfVoyage(self,voyage):
'''Takes voyage instance and returns the status of the flight
(completed, in air, not departed)'''
return LL_API().get_status_of_voyage(voyage)
def showOneVoyage(self,voyage = ''):
'''Shows one voyage by ID'''
while True:
if voyage == '':
voyage_id = input('\nEnter voyage ID: ').strip()
else:
voyage_id = voyage.getVoyageID()
voyage = LL_API().getOneVoyage(voyage_id)
if voyage != None:
voyage_duration_hrs, voyage_duration_min = \
LL_API().get_voyage_duration(voyage)
voyage_state = self.getStatusOfVoyage(voyage)
flight_no_out, flight_no_home = voyage.getFlightNumbers()
crew_on_voyage_list = voyage.getCrewOnVoyage()
if VoyageUI.EMPTY in crew_on_voyage_list[0:3]:
# not fully staffed if there is not one captain, one pilot and
# one head flight attendant
voyage_staffed = 'Voyage not fully staffed'
else:
voyage_staffed = 'Voyage fully staffed'
self.prettyprint(voyage,voyage_staffed,voyage.getAircraftID(),\
voyage_duration_hrs,flight_no_out, flight_no_home, voyage_duration_min,\
voyage_state)
return voyage
else:
return None
LL_API().change_voyage(voyage)
def revertDatetimeStrtoDatetime(self,datetime_str):
return LL_API().revertDatetimeStrtoDatetime(datetime_str)
def addAircraftToVoyage(self,voyage):
'''Adds aircraft to voyage'''
depart_datetime_object = self.revertDatetimeStrtoDatetime(voyage.getDepartureTime())
arrival_datetime_object = self.revertDatetimeStrtoDatetime(voyage.getArrivalTimeHome())
print()
aircraft_ID = AirplaneUI().getAirplaneInput(depart_datetime_object, arrival_datetime_object)
voyage.setAircraftID(aircraft_ID)
print('\n'+'~'*45)
print('Airplane has been added to voyage {}'.format(voyage.getVoyageID()))
print('~'*45+'\n')
return LL_API().change_voyage(voyage)
def showAllVoyagesInRange(self, start_datetime = '', end_datetime = ''):
'''Shows all voyages for a current time period'''
if start_datetime == '':
print('\nEnter start date for time period')
print()
start_datetime = VoyageUI().getDateInput()
if end_datetime == '':
print('\nEnter end date for time period')
print()
end_datetime = VoyageUI().getDateInput()
completed_voyages_in_range = LL_API().getCompletedVoyagesInRange(start_datetime,end_datetime)
voyages_on_date = LL_API().get_all_voyages_in_date_range(start_datetime,end_datetime)
start_date = VoyageUI().seperateDatetimeString(start_datetime.isoformat())[0]
end_date = VoyageUI().seperateDatetimeString(end_datetime.isoformat())[0]
if voyages_on_date != []:
print()
print('All voyages from {} to {}'.format(start_date,end_date))
print(60*VoyageUI.SEPERATOR)
for voyage in voyages_on_date:
crew_on_voyage_list = voyage.getCrewOnVoyage()
flight_no_out, flight_no_home = voyage.getFlightNumbers()
voyage_duration_hrs, voyage_duration_min = \
LL_API().get_voyage_duration(voyage)
voyage_state = self.getStatusOfVoyage(voyage)
if VoyageUI.EMPTY in crew_on_voyage_list[0:3]:
# not fully staffed if there is not one captain, one pilot and
# one flight attendant
voyage_staffed = 'Voyage not staffed'
elif VoyageUI.EMPTY in crew_on_voyage_list[-2:]:
voyage_staffed = 'Voyage has enough staff'
else:
voyage_staffed = 'Voyage fully staffed'
aircraft_ID = voyage.getAircraftID().upper()
VoyageUI().prettyprint(voyage,voyage_staffed,aircraft_ID,\
voyage_duration_hrs, flight_no_out, flight_no_home, \
voyage_duration_min, voyage_state)
return voyages_on_date,completed_voyages_in_range
else:
return None
def getDest(self):
'''Gets user input for a 3 letter destination code'''
# all destinations
destinations_class_list = LL_API().get_destinations()
print()
print('Please choose a destination.')
print('Available destinations are:')
print(45*'-')
# print destinations with 3 letter IATA code
for destination in destinations_class_list:
if destination.getDestinationAirport() != 'KEF':
print('\t{:<3}: {:<10}'.format(destination.getDestinationName(),\
destination.getDestinationAirport()))
print()
dest = input('Your destination (3 letters): ').upper().strip()
# check if input is valid
check = LL_API().checkDestInput(dest)
# while input is not valid
while check == False:
dest = input('Please enter a valid destination: ').upper().strip()
check = LL_API().checkDestInput(dest)
return dest
def addVoyage(self):
'''Gets input from user to add voyage to file'''
print('\n'+'-'*45)
print('{:^45}'.format('Add new voyage'))
print('-'*45)
dest = self.getDest()
# put selection as 2 so while loop is entered
selection = '2'
# while user chooses to redo input
while selection == '2':
# get datetime input
departure_datetime = self.getDateWithTime()
print('Are you sure you want to add this voyage?')
print('-'*45)
print('1 - Confirm input')
print('2 - Redo input')
print('3 - Cancel voyage registration')
print()
selection = input('Please choose one of the above: ').strip()
print()
# if user confirms input
if selection == '1':
while True:
if departure_datetime != None:
# arrival time found from departure time
arrival_time = LL_API().getArrivalTime(departure_datetime, dest)
# if date is taken by another voyage
while LL_API().checkIfTakenDate(departure_datetime) == True:
print('Another voyage is departing or arriving at that time. Please choose another date.')
departure_datetime = self.getDateWithTime()
if departure_datetime != 'c':
continue
else:
return
print('Would you like to assign an airplane to this voyage?')
print('(You can also do this later)')
selection = input('Input your selection (Y/N): ').lower().strip()
# while input is neither y or n
while selection != 'y' and selection != 'n':
selection = input('Please enter Y or N to make your choice: ').lower().strip()
# if chosen to add airplane
if selection == 'y':
plane_name = AirplaneUI().getAirplaneInput(departure_datetime, arrival_time)
# if chosen to add airplane later
else:
plane_name = 'empty'
LL_API().add_voyage(dest, departure_datetime, plane_name)
print()
print('~'*45)
print('{:^45}'.format('New voyage succesfully added!'))
print('~'*45)
return
else:
departure_datetime = self.getDateWithTime()
continue
# user cancels registration
elif selection == '3':
return
# if nothing else is chosen
else:
print('\nInvalid input!\n')
def countCrewmembers(self, voyage):
'''counts crewmembrs on voyage'''
crew_members_counter = 0
crew_on_voyage = voyage.getCrewOnVoyage()
for crew_member in crew_on_voyage:
if crew_member != 'empty':
crew_members_counter += 1
return crew_members_counter
def removeCrewFromVoyage(self,voyage):
''' Removes all crewmembers from voyage'''
crew_members_counter = self.countCrewmembers(voyage)
if crew_members_counter == 0:
# if no crewmembers are assinged they can not be removed
print('\n'+45*'-')
print('No crewmembers are assigned to the voyage!')
print(45*'-'+'\n')
else:
print('-'*45)
print('{:^45}'.format('Are you sure you want to'))
print('{:^45}'.format('remove all crew members?'))
print('-'*45+'\n')
print('1 - Yes\n2 - No (Go back)\n')
selection = input('Please choose one of the above: ').strip()
if selection == '1':
voyage.removeCrewFromVoyage()
LL_API().change_voyage(voyage)
print('~'*45)
print('{:^45}'.format('All crewmembers have been removed!'))
print('~'*45+'\n')
elif selection == '2':
return
else:
print('\nInvalid selection!\n')
def removeAirplaneFromVoyage(self,voyage):
''' Removes airplane from voyage, staff assigned to the voyage
will be removed '''
old_airplane_type = voyage.getAircraftID()
crew_members_counter = self.countCrewmembers(voyage)
if crew_members_counter > 0:
success = self.removeAirplaneFromVoyageWithStaff(voyage,crew_members_counter)
if success:
voyage.removeAirplaneFromVoyage()
voyage.removeCrewFromVoyage()
LL_API().change_voyage(voyage)
else:
return
else:
voyage.removeAirplaneFromVoyage()
LL_API().change_voyage(voyage)
print('\n'+'~'*45)
a_str = 'Airplane {} has been'.format(old_airplane_type)
b_str = 'removed from voyage {}'.format(voyage.getVoyageID())
print('{:^45}'.format(a_str))
print('{:^45}'.format(b_str))
print('~'*45+'\n')
def removeAirplaneFromVoyageWithStaff(self,voyage,crew_members_counter):
''''''
while True:
print('\n'+'-'*45)
a_str = '{} crewmembers are assigned to the voyage'.format(crew_members_counter)
print('{:^45}'.format(a_str))
print('{:^45}'.format('All crewmembers will be removed'))
print('-'*45+'\n')
print('Are you sure you want to remove the airplane?')
print('1 - Yes\n2 - No (Go back)\n')
selection = input('Input your selection: ').strip()
if selection == '1':
return True
if selection == '2':
return False
else:
print('\nInvalid input!')
| helenaj18/Dagbok | NaNair/UI/voyageUI.py | voyageUI.py | py | 25,605 | python | en | code | 0 | github-code | 13 |
69893121299 | """Intersections between districts and counties.
Get which districts intersect with which counties.
This will speed up the block level interpolation because we can reduce
to counties that intersect with each district rather than interpolating on
the entire state.
"""
import geopandas as gpd
import os
from download_census_data import state_fips
from county_district_interpolation import district_attribute
from county_district_interpolation import get_district_year
def main():
"""Interpolate district boundaries on census block data."""
fips = state_fips()
# Iterate over each state
for state, fips_code in fips.items():
# Get the base bath to the state folder
base_path = 'clean_data/' + state + '/'
# Get county and redistricting plan shapefiles
files = os.listdir(base_path)
files = [x for x in files if x[-4:] == '.shp']
files = [x for x in files if 'blocks' not in x]
counties = [x for x in files if 'county' in x]
districts = [x for x in files if 'county' not in x]
# Load most recent county file
counties.sort()
df = gpd.read_file(base_path + counties[-1])
# Add systematic countyfp
if 'COUNTYFP00' in df.columns:
df['COUNTYFP'] = df['COUNTYFP00']
if 'COUNTYFP10' in df.columns:
df['COUNTYFP'] = df['COUNTYFP10']
# Iterate through each files
keep_cols = ['COUNTYFP']
for file in districts:
print('INTERSECTIONS', file, '\n')
# Load the district file
district_path = base_path + file
df_dist = gpd.read_file(district_path)
# Define relevant column names and add to keep columns
district_year = get_district_year(file)
dist_col = district_attribute(district_year)
keep_cols.append(district_year)
# Detect intersections
df = county_district_intersections(df, district_year, df_dist,
dist_col)
# Save dataframe
df = df[keep_cols]
df.to_csv(base_path + state + '_district_county_intersection.csv',
index=False)
return
def county_district_intersections(df_county, county_col, df_district,
district_col):
"""Determine which districts intersect with each county.
Arguments:
df_county: county shapefile
county_col: name to save district information
we'll be saving a comma delimited string
df_district: district shapefile
district_col: name of fips columnin district shapefile
"""
# Let the index by an integer for spatial indexing purposes
df_district.index = df_district.index.astype(int)
# construct r-tree spatial index
si = df_district.sindex
# Get centroid for each geometry in the large shapefile
df_district['centroid'] = df_district['geometry'].centroid
# Find appropriate matching large geometry for each small geometry
df_county = df_county.reset_index(drop=True)
for ix, row in df_county.iterrows():
try:
# Get potential matches
county_poly = row['geometry']
potential_matches = [df_district.index[i] for i in
list(si.intersection(county_poly.bounds))]
# Only keep matches that have intersections
matches = [m for m in potential_matches
if df_district.at[m, 'geometry'].intersection(
county_poly).area > 0]
# Get matches values
matches_values = df_district.loc[matches, district_col]
matches_str = ','.join(list(matches_values))
# Save matches
df_county.at[ix, county_col] = matches_str
except:
continue
return df_county
if __name__ == "__main__":
main()
| jacobwachspress/locality-splitting | geoprocessing/county_district_intersections.py | county_district_intersections.py | py | 3,946 | python | en | code | 1 | github-code | 13 |
20839376233 | """
stock unittest
"""
import unittest
import pandas as pd
import numpy as np
from stock import get_balance_sheet, get_profit_statement
from stock import get_annual_report, get_quarterly_results
from stock import get_basic_info, get_level0_report, get_level1_report
from stock import classifier_level_report, pct_change
from stock import get_sh_margin_details, get_sz_margin_details, get_tick_data
from stock.website import BALANCE_SHEET_INDEX, PROFIT_STATEMENT_INDEX
from stock.fundamental import ANNUAL_REPORT_INDEX, BASIC_REPORT_INDEX
from stock.fundamental import LEVEL0_REPORT_INDEX, LEVEL1_REPORT_INDEX
from stock.technical import MARGIN_COLUMNS, TICK_COLUMNS
class TestStock(unittest.TestCase):
"""
测试股票接口
"""
def test_get_balance_sheet(self):
"""
测试资产负债表
"""
balance_sheet = get_balance_sheet('002367')
self.assertTrue(isinstance(balance_sheet, pd.DataFrame))
self.assertEqual(balance_sheet.index.tolist(), BALANCE_SHEET_INDEX)
self.assertEqual(balance_sheet.columns.dtype, 'datetime64[ns]')
for column_name in balance_sheet.columns:
self.assertEqual(balance_sheet[column_name].dtype, 'float64')
def test_get_profit_statement(self):
"""
测试利润表
"""
profit_statement = get_profit_statement('002367')
self.assertTrue(isinstance(profit_statement, pd.DataFrame))
self.assertEqual(profit_statement.index.tolist(),
PROFIT_STATEMENT_INDEX)
self.assertEqual(profit_statement.columns.dtype, 'datetime64[ns]')
for column_name in profit_statement.columns:
self.assertEqual(profit_statement[column_name].dtype, 'float64')
def test_get_annual_report(self):
"""
测试年报表
"""
annual_report = get_annual_report('002367')
# YoY
#comparisions = annual_report.pct_change(axis=1)
self.assertTrue(isinstance(annual_report, pd.DataFrame))
self.assertEqual(annual_report.index.tolist(),
ANNUAL_REPORT_INDEX['new'])
self.assertEqual(annual_report.columns.dtype, 'datetime64[ns]')
columns_list = annual_report.columns.tolist()
columns_list.sort()
self.assertEqual(annual_report.columns.tolist(), columns_list)
def test_get_quarterly_results(self):
"""
测试季报表
"""
quarterly_results = get_quarterly_results('002367')
# YoY
#comparisions = quarterly_results.pct_change(axis=1)
# QoQ
#comparisions = quarterly_results.pct_change(periods=4, axis=1)
self.assertTrue(isinstance(quarterly_results, pd.DataFrame))
self.assertEqual(quarterly_results.index.tolist(),
ANNUAL_REPORT_INDEX['new'])
self.assertEqual(quarterly_results.columns.dtype, 'datetime64[ns]')
columns_list = quarterly_results.columns.tolist()
columns_list.sort()
self.assertEqual(quarterly_results.columns.tolist(), columns_list)
def test_get_basic_info(self):
"""
测试基本信息
"""
basic_report = get_basic_info('002367')
self.assertTrue(isinstance(basic_report, pd.Series))
self.assertEqual(basic_report.index.tolist(), BASIC_REPORT_INDEX)
def test_get_level0_report(self):
"""
测试level0分析
"""
annual_report = get_annual_report('002367')
level0_report = get_level0_report(annual_report.iloc[:, -1])
self.assertTrue(isinstance(level0_report, pd.Series))
self.assertEqual(level0_report.index.tolist(), LEVEL0_REPORT_INDEX)
def test_get_level1_report(self):
"""
测试level1分析
"""
level1_report = get_level1_report('002367', 2016, 4)
self.assertTrue(isinstance(level1_report, pd.Series))
self.assertEqual(level1_report.index.tolist(), LEVEL1_REPORT_INDEX)
def test_classifier_level_report(self):
"""
测试level report分类
"""
annual_report = get_annual_report('002367')
level0_report = get_level0_report(annual_report.iloc[:, -1])
level0_report2 = classifier_level_report(level0_report)
self.assertTrue(isinstance(level0_report2, pd.Series))
def test_pct_change(self):
"""
测试财务增速接口
"""
quarterly_results = get_quarterly_results('002367')
quarterly_results.dropna(axis=1, how='any', inplace=True)
pct_change1 = quarterly_results.pct_change(axis=1)
pct_change2 = pct_change(quarterly_results, axis=1)
self.assertTrue(isinstance(pct_change1, pd.DataFrame))
self.assertTrue(isinstance(pct_change2, pd.DataFrame))
d1 = pct_change1.abs().round(4)
d2 = pct_change2.abs().round(4)
self.assertTrue(d1.equals(d2))
self.assertFalse(d1.empty)
def test_get_sh_margin_details(self):
"""
测试沪市融资融券
"""
margin_details = get_sh_margin_details("2018-12-01",
"2018-12-07")
self.assertTrue(isinstance(margin_details, pd.DataFrame))
self.assertEqual(margin_details.columns.tolist(), MARGIN_COLUMNS)
def test_get_sz_margin_details(self):
"""
测试深市融资融券
"""
margin_details = get_sz_margin_details("2018-11-07")
self.assertTrue(isinstance(margin_details, pd.DataFrame))
self.assertEqual(margin_details.columns.tolist(), MARGIN_COLUMNS)
def test_get_tick_data(self):
"""
测试分笔数据
"""
tick_datas = get_tick_data('002367', "2018-12-07")
self.assertTrue(isinstance(tick_datas, pd.DataFrame))
self.assertEqual(tick_datas.columns.tolist(), TICK_COLUMNS)
# self.assertEqual(tick_datas.iloc[0]['日期'], "2017-08-21")
self.assertLessEqual(tick_datas.iloc[0]['时间'], "2018-12-07 09:30")
# self.assertEqual(tick_datas.iloc[-1]['日期'], "2017-08-25")
self.assertGreaterEqual(tick_datas.iloc[-1]['时间'], "2018-12-07 14:59")
| flychensc/orange | test/test_stock.py | test_stock.py | py | 6,205 | python | en | code | 1 | github-code | 13 |
10510330475 | from selenium.common.exceptions import NoSuchElementException
import math
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from .locators import BasePageLocators
class BasePage():
def __init__(self, browser, url, timeout=10):
self.browser = browser
self.url = url
self.browser.implicitly_wait(timeout)
def open(self):
self.browser.get(self.url)
def should_be_authorized_user(self):
assert self.is_element_present(*BasePageLocators.USER_ICON), "User icon is not presented, probably unauthorised user"
def go_to_login_page(self):
link = self.browser.find_element(*BasePageLocators.LOGIN_LINK)
link.click()
def go_to_basket_page(self):
element = self.browser.find_element(*BasePageLocators.BASKET_BUTTON)
element.click()
def should_be_login_link(self):
assert self.is_element_present(*BasePageLocators.LOGIN_LINK), "Login link is not presented"
def is_element_present(self, how, what):
try:
self.browser.find_element(how, what)
except (NoSuchElementException):
return False
return True
def get_element(self, how, what):
try:
element = self.browser.find_element(how, what)
except (NoSuchElementException):
return False
return element
def send_keys_into_element(self, how, what, text):
try:
element = self.browser.find_element(how, what)
element.send_keys(text)
except (NoSuchElementException):
return False
return True
def get_element_text(self, how, what):
try:
element_text = self.browser.find_element(how, what).text
except (NoSuchElementException):
return False
return element_text
def is_element_with_text_present(self, how, what, text):
try:
elements = self.browser.find_elements(how, what)
for element in elements:
if element.text == text:
return True
raise Exception(f"There is No such element '{what}' with text '{text}'")
except (NoSuchElementException):
return False
def is_not_element_present(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout).until(EC.presence_of_element_located((how, what)))
except TimeoutException:
return True
return False
def is_disappeared(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout, 1, TimeoutException).until_not(EC.presence_of_element_located((how, what)))
except TimeoutException:
return False
return True
def click_element(self, how, what):
try:
element = self.browser.find_element(how, what)
element.click()
except (NoSuchElementException):
return False
return True
def solve_quiz_and_get_code(self):
alert = self.browser.switch_to.alert
x = alert.text.split(" ")[2]
answer = str(math.log(abs((12 * math.sin(float(x))))))
alert.send_keys(answer)
alert.accept()
try:
alert = self.browser.switch_to.alert
alert_text = alert.text
print(f"Your code: {alert_text}")
alert.accept()
except NoAlertPresentException:
print("No second alert presented") | MikalaiKryvusha/stepik-selenium-page-object | pages/base_page.py | base_page.py | py | 3,652 | python | en | code | 0 | github-code | 13 |
22233431173 | """import array as arr
myArray = arr.array('hello', [1.3, 2.4, 5.6])
print(myArray[0])"""
import pprint
def ThreeD(a, b, c):
lst = [[ ['#' for col in range(a)] for col in range(b)] for row in range(c)]
return lst
col1 = 5
col2 = 3
row = 2
pprint.pprint(ThreeD(col1, col2, row))
| Cu3t0m/Projects-2.0 | Python/School/tests.py | tests.py | py | 294 | python | en | code | 0 | github-code | 13 |
19552598700 | # 491 - Tile Topology
# Resources: http://stackoverflow.com/questions/60208/replacements-for-switch-statement-in-python
import sys
def get_tilings(n):
return{
2: str(1),
3: str(2),
4: str(7),
5: str(18),
6: str(60),
7: str(196),
8: str(704),
9: str(2500),
10: str(9189),
11: str(33896),
12: str(126759),
}.get(n, "1")
def get_input():
while(1):
try:
n = int(next(sys.stdin))
yield(n)
except(ValueError):
break
for n in get_input():
sys.stdout.write(get_tilings(n)+ "\n") | tristan-hunt/UVaProblems | tiletopology.py | tiletopology.py | py | 516 | python | en | code | 0 | github-code | 13 |
498099367 | import os
import time
import torch
import datetime
import numpy as np
from tqdm import tqdm
from models import *
from transformers import BertTokenizer
from trainer.utils import multi_acc, multi_mse, load_datasetbert_from_local, multi_f1_macro, multi_f1_micro
from models.get_optim import get_Adam_optim_v2
ALL_MODLES = {
'bert': PSBERT.BertForSequenceClassification,
}
class Trainer:
def __init__(self, config):
self.config = config
pretrained_weights = 'bert-base-uncased'
self.tokenizer = BertTokenizer.from_pretrained(pretrained_weights)
self.train_itr, self.dev_itr, self.test_itr, self.usr_stoi, self.prd_stoi = load_datasetbert_from_local(config)
self.config.moniter_per_step = len(self.train_itr) // 10
# print(config)
net = ALL_MODLES[config.model].from_pretrained(pretrained_weights,
num_labels=config.num_labels,
cus_config=config,
return_dict=False)
net.bert.init_personalized()
self.KD = KD_zoo.BiSelfKD(T1=4, T2=4)
if self.config.n_gpu > 1:
self.net = torch.nn.DataParallel(net).to(self.config.device)
self.optim, self.scheduler = get_Adam_optim_v2(config, self.net.module, lr_rate=config.lr_base)
else:
self.net = net.to(self.config.device)
self.optim, self.scheduler = get_Adam_optim_v2(config, self.net, lr_rate=config.lr_base)
self.early_stop = config.early_stop
self.best_dev_acc = 0
self.eval_f1_macro = 0
self.unimproved_iters = 0
self.iters_not_improved = 0
self.step_count = 0
self.oom_time = 0
self.losses = []
self.losses_whole = []
self.dev_acc = []
training_steps_per_epoch = len(self.train_itr) // (config.gradient_accumulation_steps)
self.config.num_train_optimization_steps = self.config.max_epoch * training_steps_per_epoch
self.log_file = self.config.log_path + '/log_run_' + self.config.dataset + '_' + self.config.version + '.txt'
def get_logging(self, loss, acc, rmse, f1_mi=None, f1_ma=None, eval='training'):
logs_metrics_format = \
'==={} phrase...'.format(eval) + "".center(60, " ") + "\n" + \
"total_loss:{:>2.4f}\ttotal_acc:{:>2.4f}\ttotal_rmse:{:>2.4f}"
logs_f1_format = "\ttotal_f1_macro:{:>2.4f}\ttotal_f1_micro:{:>2.4f}\t"
if f1_mi is not None and f1_mi is not None:
logs_format = logs_metrics_format + logs_f1_format
logs = logs_format.format(loss, acc, rmse, f1_ma, f1_mi) + "\n"
else:
logs_format = logs_metrics_format
logs = logs_format.format(loss, acc, rmse) + "\n"
return logs
def logging(self, log_file, logs):
logfile = open(
log_file, 'a+'
)
logfile.write(logs)
logfile.close()
def eval(self, eval_itr, agnostic=False):
loss_fn = torch.nn.CrossEntropyLoss()
metric_fn = multi_acc
mse_fn = multi_mse
total_loss = []
total_label = []
total_logit = []
eval_oom = 0
for step, batch in enumerate(eval_itr):
start_time = time.time()
input_ids, label, usr, prd = batch
input_ids = input_ids.to(self.config.device)
attention_mask = (input_ids != 0).long()
labels = label.long().to(self.config.device)
usr = [self.usr_stoi[x] for x in usr]
prd = [self.prd_stoi[x] for x in prd]
usr = torch.Tensor(usr).long().to(self.config.device)
prd = torch.Tensor(prd).long().to(self.config.device)
try:
logits = self.net(input_ids=input_ids,
user_ids=usr,
item_ids=prd,
attention_mask=attention_mask,
agnostic=agnostic)[0]
loss = loss_fn(logits, labels)
total_loss.append(loss.item())
total_label.extend(label.cpu().detach().tolist())
total_logit.extend(logits.cpu().detach().tolist())
except RuntimeError as exception:
if "out of memory" in str(exception):
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
eval_oom += 1
else:
torch.cuda.empty_cache()
print(str(exception))
raise exception
# monitoring results on every steps1
end_time = time.time()
span_time = (end_time - start_time) * (int(len(eval_itr)) - step)
h = span_time // (60 * 60)
m = (span_time % (60 * 60)) // 60
s = (span_time % (60 * 60)) % 60 // 1
if eval_oom > 0:
print("out of memory times over evalation is:", eval_oom)
print(
"\rIteration: {:>4}/{} ({:>4.1f}%) -ETA {:>2}h-{:>2}m-{:>2}s".format(
step, int(len(eval_itr)),
100 * (step) / int(len(eval_itr)),
int(h), int(m), int(s)),
end="")
return np.array(total_loss).mean(0), \
metric_fn(torch.tensor(total_label), torch.tensor(total_logit)), \
mse_fn(torch.tensor(total_label), torch.tensor(total_logit)).sqrt(), \
multi_f1_micro(torch.tensor(total_label), torch.tensor(total_logit)), \
multi_f1_macro(torch.tensor(total_label), torch.tensor(total_logit))
def save_state(self):
SAVED_MODEL_PATH = self.config.ckpts_path
self.ensureDirs(os.path.join(SAVED_MODEL_PATH, self.config.dataset))
self.tokenizer.save_pretrained(os.path.join(SAVED_MODEL_PATH, self.config.dataset))
if self.config.n_gpu > 1:
self.net.module.save_pretrained(os.path.join(SAVED_MODEL_PATH, self.config.dataset))
else:
self.net.save_pretrained(os.path.join(SAVED_MODEL_PATH, self.config.dataset))
def load_state(self):
SAVED_MODEL_PATH = self.config.ckpts_path
self.tokenizer = BertTokenizer.from_pretrained(os.path.join(SAVED_MODEL_PATH, self.config.dataset))
net = ALL_MODLES[self.config.model].from_pretrained(os.path.join(SAVED_MODEL_PATH, self.config.dataset), cus_config=self.config)
if self.config.n_gpu > 1:
self.net = torch.nn.DataParallel(net).to(self.config.device)
else:
self.net = net.to(self.config.device)
def ensureDirs(self, *dir_paths):
for dir_path in dir_paths:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def run(self, run_mode):
if run_mode == 'train':
# empty log file
self.empty_log()
self.train()
self.load_state()
# do test
self.net.eval()
with torch.no_grad():
eval_loss, eval_acc, eval_rmse, eval_f1_micro, eval_f1_macro = self.eval(self.test_itr, agnostic=False)
eval_logs = self.get_logging(eval_loss, eval_acc, eval_rmse, eval_f1_micro, eval_f1_macro, "testing-enhanced")
print("\r" + eval_logs)
self.logging(
self.config.log_path + '/log_run_' + self.config.dataset + '_' + self.config.version + '.txt',
eval_logs)
eval_loss, eval_acc, eval_rmse, eval_f1_macro, eval_f1_micro = self.eval(self.test_itr, agnostic=True)
eval_logs = self.get_logging(eval_loss, eval_acc, eval_rmse, eval_f1_macro, eval_f1_micro,
"testing-agnostic")
print("\r" + eval_logs)
self.logging(
self.config.log_path + '/log_run_' + self.config.dataset + '_' + self.config.version + '.txt',
eval_logs)
if run_mode == 'test':
self.load_state()
# do test
self.net.eval()
with torch.no_grad():
eval_loss, eval_acc, eval_rmse, eval_f1_micro, eval_f1_macro = self.eval(self.test_itr, agnostic=False)
eval_logs = self.get_logging(eval_loss, eval_acc, eval_rmse, eval_f1_micro, eval_f1_macro, "testing-enhanced")
print("\r" + eval_logs)
self.logging(
self.config.log_path + '/log_run_' + self.config.dataset + '_' + self.config.version + '.txt',
eval_logs)
eval_loss, eval_acc, eval_rmse, eval_f1_macro, eval_f1_micro = self.eval(self.test_itr, agnostic=True)
eval_logs = self.get_logging(eval_loss, eval_acc, eval_rmse, eval_f1_macro, eval_f1_micro,
"testing-agnostic")
print("\r" + eval_logs)
self.logging(
self.config.log_path + '/log_run_' + self.config.dataset + '_' + self.config.version + '.txt',
eval_logs)
else:
exit(-1)
def empty_log(self):
if (os.path.exists(self.log_file)):
os.remove(self.log_file)
print('Initializing log file ........')
print('Finished!')
print('')
def test_load_dataset(self):
for step, batch in enumerate(self.train_itr):
self.step_count += 1
self.net.train()
input_ids, label, usr, prd = batch
# input_ids = processor4baseline(text, self.tokenizer, self.config)
usr = torch.Tensor([self.usr_stoi[x] for x in usr]).long().to(self.config.device)
prd = torch.Tensor([self.prd_stoi[x] for x in prd]).long().to(self.config.device)
print(input_ids)
print(label)
print(usr)
print(prd)
def train(self):
# Save log information
logfile = open(self.log_file, 'a+')
logfile.write(
'nowTime: ' +
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') +
'\n' +
'seed:' + str(self.config.seed) +
'\n' +
'data:' + str(self.config.dataset) +
'\n' +
'strategy:' + str(self.config.strategy) +
'\n'
)
logfile.close()
loss_fn = torch.nn.CrossEntropyLoss()
acc_fn = multi_acc
mse_fn = multi_mse
total_loss = []
total_acc = []
total_mse = []
eval_best_loss = 0.
eval_best_acc = 0.
eval_best_rmse = 0.
eval_best_f1_micro = 0.
eval_best_f1_macro = 0.
self.optim.zero_grad()
for epoch in range(0, self.config.max_epoch):
epoch_tqdm = tqdm(self.train_itr)
epoch_tqdm.set_description_str("Processing Epoch: {}".format(epoch))
for step, batch in enumerate(epoch_tqdm):
self.step_count += 1
self.net.train()
input_ids, label, usr, prd = batch
input_ids = input_ids.to(self.config.device)
attention_mask = (input_ids != 0).long() # id of [PAD] is 0
labels = label.long().to(self.config.device)
usr = torch.Tensor([self.usr_stoi[x] for x in usr]).long().to(self.config.device)
prd = torch.Tensor([self.prd_stoi[x] for x in prd]).long().to(self.config.device)
try:
logits1 = self.net(input_ids=input_ids,
user_ids=usr,
item_ids=prd,
attention_mask=attention_mask,
agnostic=False,
)[0]
logits2 = self.net(input_ids=input_ids,
user_ids=usr,
item_ids=prd,
attention_mask=attention_mask,
agnostic=True,
)[0]
kd_loss = self.KD(logits1, logits2)
loss = loss_fn(logits1, labels) + kd_loss
metric_acc = acc_fn(labels, logits1)
metric_mse = mse_fn(labels, logits1)
total_loss.append(loss.item())
total_acc.append(metric_acc.item())
total_mse.append(metric_mse.item())
if self.config.gradient_accumulation_steps > 1:
loss = loss / self.config.gradient_accumulation_steps
loss.backward()
if (step + 1) % self.config.gradient_accumulation_steps == 0:
self.optim.step()
self.scheduler.step()
self.optim.zero_grad()
except RuntimeError as exception:
if "out of memory" in str(exception):
self.oom_time += 1
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
torch.cuda.empty_cache()
print(str(exception))
raise exception
if self.step_count % self.config.moniter_per_step == 0:
# evaluating phase
self.net.eval()
with torch.no_grad():
eval_loss, eval_acc, eval_rmse, eval_f1_micro, eval_f1_macro = self.eval(self.dev_itr, agnostic=False)
self.dev_acc.append(eval_acc)
# monitoring eval metrices
if eval_f1_macro > eval_best_f1_macro:
eval_best_loss = eval_loss
eval_best_acc = eval_acc
eval_best_rmse = eval_rmse
eval_best_f1_micro = eval_f1_micro
eval_best_f1_macro = eval_f1_macro
if eval_f1_macro > self.eval_f1_macro:
# saving models
self.eval_f1_macro = eval_f1_macro
self.save_state()
self.unimproved_iters = 0
else:
self.unimproved_iters += 1
if self.unimproved_iters >= self.config.patience and self.early_stop == True:
early_stop_logs = self.log_file + "\n" + \
"Early Stopping. Epoch: {}, Best Dev Acc: {}".format(epoch,
self.best_dev_acc)
print(early_stop_logs)
self.logging(self.log_file, early_stop_logs)
# load best model on dev datasets
self.load_state()
# logging test logs
self.net.eval()
with torch.no_grad():
eval_loss, eval_acc, eval_rmse, eval_f1_micro, eval_f1_macro = self.eval(self.test_itr)
eval_logs = self.get_logging(eval_loss, eval_acc, eval_rmse, eval_f1_micro, eval_f1_macro,
eval="testing")
print("\r" + eval_logs)
# logging testt logs
self.logging(self.log_file, eval_logs)
return
# exit()
# monitoring stats at each epoch
train_loss, train_acc, train_rmse = \
np.array(total_loss).mean(), np.array(total_acc).mean(), np.sqrt(np.array(total_mse).mean())
logs = (" Epoch:{:^5} ".format(epoch)).center(85, "-") \
+ "".center(70, " ") + '\n' + \
self.get_logging(train_loss, train_acc, train_rmse, eval="training")
print("\r" + logs)
if self.oom_time > 0:
print("num of out of memory is: " + str(self.oom_time))
# logging training logs
self.logging(self.log_file, logs)
eval_logs = self.get_logging(eval_best_loss,
eval_best_acc,
eval_best_rmse,
eval_best_f1_micro,
eval_best_f1_macro,
eval="evaluating")
print("\r" + eval_logs)
# logging evaluating logs
self.logging(self.log_file, eval_logs)
# reset monitors
total_loss = []
total_acc = []
total_mse = []
eval_best_loss = 0.
eval_best_acc = 0.
eval_best_rmse = 0.
eval_best_f1_micro = 0.
eval_best_f1_macro = 0.
| yoyo-yun/DG_RRR | trainer/trainer_bert.py | trainer_bert.py | py | 17,326 | python | en | code | 0 | github-code | 13 |
42415745375 | from torch import nn, optim
from torch.autograd import Variable
import torch
from torch.nn import functional as F
class ModelWrapperWGAN:
def __init__(self, d, g,
clamp_lower, clamp_upper,
opt_params,
input_, noise,
meters, loggers):
self.d = d
self.g = g
lrD = opt_params["lrD"]
lrG = opt_params["lrG"]
beta = opt_params["beta"]
self.opt_d = optim.Adam(d.parameters(),
lr=lrD, betas=(beta, 0.999))
self.opt_g = optim.Adam(g.parameters(),
lr=lrG, betas=(beta, 0.999))
self.input = input_
# self.label = label
self.noise = noise
self.meters = meters
self.loggers = loggers
#
self.clamp_lower = clamp_lower
self.clamp_upper = clamp_upper
# self.REAL_LABEL = 1
# self.FAKE_LABEL = 0
self.noise_dim = self.g.noise_dim
def __call__(self, sample):
self.d.zero_grad()
data = sample[0].cuda()
batch_size = data.size(0)
self.input.resize_as_(data).copy_(data)
inputv = Variable(self.input)
output = self.d(inputv)
D_x = torch.mean(output)
# train with fake
self.noise.resize_(batch_size,
self.noise.size(1)).normal_(0, 1)
fake = self.g(Variable(self.noise))
output = self.d(fake.detach())
D_G_z1 = torch.mean(output)
D_loss = -(D_x - D_G_z1)
D_loss.backward()
self.opt_d.step()
for p in self.d.parameters():
p.data.clamp_(self.clamp_lower, self.clamp_upper)
should_train_G = sample[-1]
D_G_z2 = None
G_loss = None
if should_train_G:
self.g.zero_grad()
self.noise.resize_(batch_size,
self.noise.size(1)).normal_(0, 1)
fake = self.g(Variable(self.noise))
output = self.d(fake)
D_G_z2 = torch.mean(output)
G_loss = -D_G_z2
G_loss.backward()
self.opt_g.step()
return {"d_loss": D_loss,
"g_loss": G_loss,
"real_prob": D_x,
"fake_prob_before": D_G_z1,
"fake_prob_after": D_G_z2}, None
def generate(self, noise):
return (1.0 + self.g(noise)) / 2.0
| festeh/GAN-thesis | WGAN/model.py | model.py | py | 2,440 | python | en | code | 0 | github-code | 13 |
35784943708 | import numpy as np
import pickle
import os
import argparse
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from rf.model import RF_conv_decoder
from rf.proc import rotateIQ
from data.datasets import RFDataRAMVersion
from losses.NegPearsonLoss import Neg_Pearson
from losses.SNRLoss import SNRLoss_dB_Signals
from utils.eval import eval_rf_model
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# Argparser.
def parseArgs():
parser = argparse.ArgumentParser(description='Configs for thr RF train script')
parser.add_argument('-dir', '--data-dir', default="./dataset/rf_files", type=str,
help="Parent directory containing the folders with the Pickle files and the Vital signs.")
parser.add_argument('-fp', '--fitzpatrick-path', type=str,
default="./dataset/fitzpatrick_labels.pkl",
help='Pickle file containing the fitzpatrick labels.')
parser.add_argument('--folds-path', type=str,
default="./dataset/demo_fold.pkl",
help='Pickle file containing the folds.')
parser.add_argument('--fold', type=int, default=0,
help='Fold Number')
parser.add_argument('--device', type=str, default=None,
help="Device on which the model needs to run (input to torch.device). \
Don't specify for automatic selection. Will be modified inplace.")
parser.add_argument('-ckpts','--checkpoints-path', type=str,
default="./ckpt/RF_IQ_Net",
help='Checkpoint Folder.')
parser.add_argument('--verbose', action='store_true', help="Verbosity.")
parser.add_argument('--viz', action='store_true', help="Visualize.")
# Train args
parser.add_argument('--batch-size', type=int, default=32,
help="Batch Size for the dataloaders.")
parser.add_argument('--num-workers', type=int, default=2,
help="Number of Workers for the dataloaders.")
parser.add_argument('--train-shuffle', action='store_true', help="Shuffle the train loader.")
parser.add_argument('--val-shuffle', action='store_true', help="Shuffle the val loader.")
parser.add_argument('--test-shuffle', action='store_true', help="Shuffle the test loader.")
parser.add_argument('--train-drop', action='store_true', help="Drop the final sample of the train loader.")
parser.add_argument('--val-drop', action='store_true', help="Drop the final sample of the val loader.")
parser.add_argument('--test-drop', action='store_true', help="Drop the final sample of the test loader.")
parser.add_argument('-lr', '--learning-rate', type=float, default=1e-4,
help="Learning Rate for the optimizer.")
parser.add_argument('-wd', '--weight-decay', type=float, default=1e-2,
help="Weight Decay for the optimizer.")
parser.add_argument('--epochs', type=int, default=200, help="Number of Epochs.")
parser.add_argument('--checkpoint-period', type=int, default=5,
help="Checkpoint save period.")
parser.add_argument('--epoch-start', type=int, default=1,
help="Starting epoch number.")
return parser.parse_args()
def train_model(args, model, datasets):
# Instantiate the dataloaders
train_dataloader = DataLoader(datasets["train"], batch_size=args.batch_size,
shuffle=args.train_shuffle, drop_last=args.train_drop,
num_workers=args.num_workers)
val_dataloader = DataLoader(datasets["val"], batch_size=args.batch_size,
shuffle=args.val_shuffle, drop_last=args.val_drop,
num_workers=args.num_workers)
test_dataloader = DataLoader(datasets["test"], batch_size=args.batch_size,
shuffle=args.test_shuffle, drop_last=args.test_drop,
num_workers=args.num_workers)
if args.verbose:
print(f"Number of train iterations : {len(train_dataloader)}")
print(f"Number of val iterations : {len(val_dataloader)}")
print(f"Number of test iterations : {len(test_dataloader)}")
ckpt_path = args.checkpoints_path
latest_ckpt_path = os.path.join(os.getcwd(), f"{ckpt_path}/latest_context.pth")
# Train Essentials
loss_fn1 = Neg_Pearson()
loss_fn2 = SNRLoss_dB_Signals()
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
# A high number to remember the best Loss.
best_loss = 1e7
# Train configurations
epochs = args.epochs
checkpoint_period = args.checkpoint_period
epoch_start = args.epoch_start
if os.path.exists(latest_ckpt_path):
print('Context checkpoint exists. Loading state dictionaries.')
checkpoint = torch.load(latest_ckpt_path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch_start = checkpoint['epoch']
epoch_start+=1
if args.verbose:
print(f"Checkpoint Period={checkpoint_period}. Epoch start = {epoch_start}")
mae_best_loss = np.inf
for epoch in range(epoch_start, epochs+1):
# Training Phase
loss_train = 0
r_loss = 0
snr_loss = 0
no_batches = 0
# print("Starting Epoch: {}".format(epoch))
for batch, (rf, signal) in tqdm(enumerate(train_dataloader), total=len(train_dataloader)):
model.train()
# Convert to the appropriate format and mount on the specified device
# Normalize RF to [-1,1] range
rf = rf.type(torch.float32)/1.255e5
rf = rotateIQ(rf)
rf = torch.reshape(rf, (rf.shape[0], -1, rf.shape[3])).to(args.device)
signal = signal.type(torch.float32).to(args.device)
# Predict the PPG signal and find ther loss
pred_signal, pred_latent = model(rf)
pred_signal = pred_signal.squeeze(1)
loss1 = loss_fn1(pred_signal, signal)
loss2 = loss_fn2(pred_signal, signal)
# loss = loss3
loss = 0.01*loss1 + loss2
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Accumulate the total loss
loss_train += loss.item()
r_loss += loss1.item()
snr_loss += loss2.item()
no_batches+=1
# Save the model every few epochs
if(epoch % checkpoint_period == 0):
torch.save(model.state_dict(), os.path.join(os.getcwd(), f"{ckpt_path}/{epoch}.pth"))
# See if best checkpoint
maes_val, _, _ = eval_rf_model(root_path=args.data_dir, test_files=datasets["val"].rf_file_list, model=model, device=args.device)
current_loss = np.mean(maes_val)
if(current_loss < mae_best_loss):
mae_best_loss = current_loss
torch.save(model.state_dict(), os.path.join(os.getcwd(), f"{ckpt_path}/best.pth"))
print("Best checkpoint saved!")
print("Saved Checkpoint!")
print(f"Epoch: {epoch} ; Loss: {loss_train/no_batches:>7f}, Loss R : {r_loss/no_batches:>7f}, SNR Loss : {snr_loss/no_batches:>7f}")
#SAVE CONTEXT AFTER EPOCH
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, latest_ckpt_path)
def main(args):
# Import essential info, i.e. destination folder and fitzpatrick label path
destination_folder = args.data_dir
fitz_labels_path = args.fitzpatrick_path
ckpt_path = args.checkpoints_path
with open(args.folds_path, "rb") as fp:
files_in_fold = pickle.load(fp)
train_files = files_in_fold[args.fold]["train"]
train_files = [i[2:] for i in train_files]
val_files = files_in_fold[args.fold]["val"]
val_files = [i[2:] for i in val_files]
test_files = files_in_fold[args.fold]["test"]
test_files = [i[2:] for i in test_files]
if args.verbose:
print(f"There are {len(train_files)} train files. They are : {train_files}")
print(f"There are {len(val_files)} val files. They are : {val_files}")
print(f"There are {len(test_files)} test files. They are : {test_files}")
# Dataset
train_dataset = RFDataRAMVersion(datapath=destination_folder,
datapaths=train_files, frame_length_ppg = 128,
static_dataset_samples=15)
val_dataset = RFDataRAMVersion(datapath=destination_folder,
datapaths=val_files, frame_length_ppg = 128,
static_dataset_samples=15)
test_dataset = RFDataRAMVersion(datapath=destination_folder,
datapaths=test_files, frame_length_ppg = 128,
static_dataset_samples=15)
# Select the device
if args.device is None:
args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
args.device = torch.device(args.device)
if args.verbose:
print('Running on device: {}'.format(args.device))
# Visualize some examples
if args.viz:
train_batch, train_batch_sig = train_dataset[0]
val_batch, val_batch_sig = val_dataset[0]
test_batch, test_batch_sig = test_dataset[0]
if args.verbose:
print(f"Train data and signal shapes : {train_batch.shape}, {train_batch_sig.shape}")
print(f"Val data and signal shapes : {val_batch.shape}, {val_batch_sig.shape}")
print(f"Test data and signal shapes : {test_batch.shape}, {test_batch_sig.shape}")
plt.figure(); plt.imshow(np.transpose(train_batch[:,0], (1,2,0)))
plt.figure(); plt.plot(train_batch_sig)
plt.figure(); plt.imshow(np.transpose(val_batch[:,0], (1,2,0)))
plt.figure(); plt.plot(val_batch_sig)
plt.figure(); plt.imshow(np.transpose(test_batch[:,0], (1,2,0)))
plt.figure(); plt.plot(test_batch_sig)
plt.show()
# Create the checkpoints folder if it does not exist
os.makedirs(ckpt_path, exist_ok=True)
#Check if Checkpoints exist
all_ckpts = os.listdir(ckpt_path)
if(len(all_ckpts) > 0):
all_ckpts.sort()
print(f"Checkpoints already exists at : {all_ckpts}")
else:
print("No checkpoints found, starting from scratch!")
datasets = {"train" : train_dataset, "val" : val_dataset, "test" : test_dataset}
model = RF_conv_decoder().to(args.device)
train_model(args, model, datasets)
if __name__ == '__main__':
args = parseArgs()
main(args) | UCLA-VMG/EquiPleth | nndl/rf/train.py | train.py | py | 11,112 | python | en | code | 6 | github-code | 13 |
18002512993 | # -*- coding: utf-8 -*-
from django.contrib import admin
from translater.models import TranslatedString,OriginalString
class TranslatedStringAdmin(admin.ModelAdmin):
list_display = ['translated','To']
admin.site.register(TranslatedString,TranslatedStringAdmin)
class OriginalStringAdmin(admin.ModelAdmin):
list_display = ['original','alreadyTranslated']
filter_horizontal =['translations']
admin.site.register(OriginalString,OriginalStringAdmin)
| lauro-cesar/Django-template-Tag-Translator | translater/admin.py | admin.py | py | 466 | python | en | code | 4 | github-code | 13 |
26377037128 | #!python
import sys
import numpy as np
import utils
# Metric function to compare histograms
def chi2_distance(histA, histB, eps = 1e-10):
# compute the chi-squared distance
d = 0.5 * np.sum([((a - b) ** 2) / (a + b + eps)
for (a, b) in zip(histA, histB)])
# return the chi-squared distance
return d
# This function calculates score according to similarity to english language distribution of given byte array
def english_score(bytes):
# English letters frequency dictonary from https://www.math.cornell.edu/~mec/2003-2004/cryptography/subs/frequencies.html
real_hist = {'e':12.02,
't':9.10,
'a':8.12,
'o':7.68,
'i':7.31,
'n':6.95,
's':6.28,
'r':6.02,
'h':5.92,
'd':4.32,
'l':3.98,
'u':2.88,
'c':2.71,
'm':2.61,
'f':2.30,
'y':2.11,
'w':2.09,
'g':2.03,
'p':1.82,
'b':1.49,
'v':1.11,
'k':0.69,
'x':0.17,
'q':0.11,
'j':0.10,
'z':0.07};
# Calculate byte frequency
freq = dict((x, bytes.count(x)/len(bytes)) for x in bytes)
# build input histogram
letters = [ord(x) for x in list(real_hist)];
hist=[0]*len(letters);
for i in range(len(letters)):
if (letters[i] in freq):
hist[i] = freq[letters[i]];
# Compare 2 histograms and calculate score (lower -> better)
return chi2_distance(hist, real_hist.values());
def XOR_1B_guess_key(bytes):
# Guesses score dictonary
guesses = {};
# Try to guess the key (0-255)
for i in range(256):
# Apply key and generate guess result
guess = [i^x for x in bytes];
#Score the frequency dictonary
guess_score = english_score(guess);
#Add to scores database
guesses.update({i:guess_score});
# Sort guesses by their frequency (lower -> better)
sorted_guesses = sorted(guesses, key=guesses.__getitem__,reverse=False);
return sorted_guesses;
def XOR_1B_decrypt(bytes):
# Extract keys and take the 1st one
key = XOR_1B_guess_key(bytes)[0];
decoded_bytes = [key^x for x in bytes]
return decoded_bytes;
# Main entry point
def main(hex_str='1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736'):
# Convert input to bytes
bytes = utils.hexstr_bytes(hex_str)
# Perfomr decryption
decrypted_bytes = XOR_1B_decrypt(bytes);
# Print output
print(utils.bytes_string(decrypted_bytes));
if __name__ == "__main__":
main() | MarkLuk/cryptopals | challenge03.py | challenge03.py | py | 2,790 | python | en | code | 0 | github-code | 13 |
72392505619 | import cv2 as cv
import numpy as np
img = np.ones((800,800,3), dtype='uint8')
font = cv.FONT_HERSHEY_COMPLEX
# putText format is... image,text,position,font,font-size,color,line-size,line-type
cv.putText(img, 'Naruto Uzumaki',(150,400), font, 2, (255,255,255), 6, cv.LINE_4)
cv.imshow('Writing Text',img)
cv.waitKey()
cv.destroyAllWindows() | Reaper-Dhan/OpenCV-Learning | Drawing/text.py | text.py | py | 350 | python | en | code | 0 | github-code | 13 |
19415005286 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mlp
from matplotlib.font_manager import fontManager
# 引入中文字體
fontManager.addfont('ChineseFont.ttf')
mlp.rc('font', family='ChineseFont')
data = pd.read_csv('./Salary_Data.csv')
# print(data)
# 數學上可以用 y = w * x + b 來表示一條直線
# 月薪 = w * 年資 + b
x = data['YearsExperience']
y = data['Salary']
# xy: 真實數據, yPred: 預測數值
def computeCost(x, y, w, b):
yPred = w * x + b
# 成本函數: (真實數據 - 預測值)**2
cost = (y - yPred) ** 2
# 計算成本函數平均數值
cost = cost.sum() / len(x)
return cost
# 計算斜率
def computeGradient(x, y, w, b):
wGradient = (x * (w * x + b - y)).mean() # mean: 計算平均
bGradient = (w * x + b - y).mean()
return wGradient, bGradient
# learningRate = 0.001 # 學習綠率
# 梯度下降
# x,y: 真實數據
# learningRate: 學習率
# runIter: 計算幾次
# pIter: 每幾圈就列印一次
def gradientDescent(x, y, wInit, bInit, learningRate, costFunction, gradientFunction, runIter, pIter=1000):
# 紀錄cost, w, b
cHist = []
wHist = []
bHist = []
# 初始 w, b
w = wInit
b = bInit
for i in range(runIter):
wGradient, bGradient = gradientFunction(x, y, w, b)
w = w - wGradient * learningRate
b = b - bGradient * learningRate
cost = costFunction(x, y, w, b)
cHist.append(cost)
wHist.append(w)
bHist.append(b)
# 每一千次print資料
if i % pIter == 0:
print(f'Ieration: {i:5}, Cost: {cost:.2e}, w: {w:.2e}, b: {b:.2e}, w gradient: {wGradient:.2e}, b gradient: {bGradient:.2e}')
return w, b, wHist, bHist, cHist
wInit = 50
bInit = 50
learningRate = 1.0e-3
runIter = 100000
wFinal, bFinal, wHist, bHist, cHist = gradientDescent(x, y, wInit, bInit, learningRate, computeCost, computeGradient, runIter)
print(f'Final w: {wFinal:.2f}, b: {bFinal:.2f}')
print(f'年資3.5 預測薪資: {wFinal * 3.5 + bFinal:.1f}K')
print(f'年資5.9 預測薪資: {wFinal * 5.9 + bFinal:.1f}K')
# w = -100~100 b = -100~100 的cost
# arrange: 創建-100到101的矩陣
ws = np.arange(-100, 101)
bs = np.arange(-100, 101)
# zeros: 創建數值為0的矩陣,這邊創建了二為矩陣
costs = np.zeros((201, 201))
# 雙重迴圈尋遍所有w與b的組合結果
i = 0
for w in ws:
j = 0
for b in bs:
cost = computeCost(x, y, w, b)
costs[i, j] = cost
j += 1
i += 1
# 繪製3D圖表示所有結果
ax = plt.axes(projection='3d')
ax.xaxis.set_pane_color((0, 0, 0))
ax.yaxis.set_pane_color((0, 0, 0))
ax.zaxis.set_pane_color((0, 0, 0))
# 繪製3d網格
bGrid, wGrid = np.meshgrid(bs, ws)
ax.plot_surface(bGrid, wGrid, costs, cmap='Spectral_r', alpha=0.7) # cmap: 網格顏色, alpha: 透明度
ax.plot_wireframe(wGrid, wGrid, costs, color='black', alpha=0.1) # 網格線條
ax.set_title('w b 對應的 cost')
ax.set_xlabel('w')
ax.set_ylabel('b')
ax.set_zlabel('cost')
# 取得最小cost位置
wIndex, bIndex = np.where(costs == np.min(costs))
ax.scatter(ws[wIndex], bs[bIndex], costs[wIndex, bIndex], color='red', s=40)
ax.scatter(wHist[0], bHist[0], cHist[0], color='green', s=40)
ax.plot(wHist, bHist, cHist)
print(f'當w={ws[wIndex]}和b={bs[bIndex]}會有最小cost: {costs[wIndex, bIndex]}')
plt.show() | IOUKI/simple-linear-regression | gradientDescent.py | gradientDescent.py | py | 3,408 | python | en | code | 0 | github-code | 13 |
72570257939 | import lightning.pytorch as pl
import torch.nn as nn
import torch
import math
from model.transformer.encoder_layer import EncoderLayer
from model.transformer.positional_encoding import positional_encoding
class Encoder(nn.Module):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, word_emb, rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.word_emb = torch.FloatTensor(word_emb)
self.embedding = nn.Embedding.from_pretrained(self.word_emb)
self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)
self.enc_layers = nn.ModuleList([ EncoderLayer(d_model, num_heads, dff) for _ in range(num_layers)])
self.dropout = nn.Dropout(rate)
def forward(self, x, mask):
seq_len = x.shape[1]
# adding embedding and position encoding.
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= math.sqrt(self.d_model)
x = (x.cpu() + self.pos_encoding[:, :seq_len, :]).to(self.device)
x = self.dropout(x)
# for i in range(self.num_layers):
# x = self.enc_layers[i](x,mask)
for layer in self.enc_layers:
x = layer(x, mask)
return x # (batch_size, input_seq_len, d_model) | Junhua9981/NCU_NLP_Assignments | Homework1_NER_WNUT2016/model/transformer/encoder.py | encoder.py | py | 1,299 | python | en | code | 0 | github-code | 13 |
25119010319 | from __future__ import print_function
from ape1_and_apeplan import ipcArgs, envArgs, APE1, APEplan
from shared.dataStructures import PlanArgs
from timer import globalTimer, SetMode
#from time import time
from state import ReinitializeState, RemoveLocksFromState
import threading
import colorama
from shared import GLOBALS
__author__ = 'patras'
domain_module = None
def GetNextAlive(lastActiveStack, numstacks, threadList):
'''
:param lastActiveStack: the stack which was progressed before this
:param numstacks: total number of stacks in the Agenda
:param threadList: list of all the threads, each running a RAE stack
:return: The stack which should be executed next
'''
nextAlive = -1
i = 1
j = lastActiveStack % numstacks + 1
while i <= numstacks:
if threadList[j-1].isAlive() == True:
nextAlive = j
break
i = i + 1
j = j % numstacks + 1
return nextAlive
def GetNewTasks():
'''
:return: gets the new task that appears in the problem at the current time
'''
GetNewTasks.counter += 1
if GetNewTasks.counter in domain_module.tasks:
return domain_module.tasks[GetNewTasks.counter]
else:
return []
def InitializeDomain(domain, problem):
'''
:param domain: code of the domain which you are running
:param problem: id of the problem
:return:none
'''
if domain in ['CR', 'SD', 'EE', 'IP', 'PD', 'SR', 'test']:
module = problem + '_' + domain
global domain_module
ReinitializeState() # useful for batch runs to start with the first state
domain_module = __import__(module)
domain_module.ResetState()
return domain_module
else:
print("Invalid domain\n", domain)
exit(11)
def BeginFreshIteration(lastActiveStack, numstacks, threadList):
begin = True
i = lastActiveStack % numstacks + 1
while i != 1:
if threadList[i - 1].isAlive() == True:
begin = False
break
i = i % numstacks + 1
return begin
def CreateNewStack(taskInfo, raeArgs):
stackid = raeArgs.stack
retcode, retryCount, commandCount, eff = APE1(raeArgs.task, raeArgs)
taskInfo[stackid] = ([raeArgs.task] + raeArgs.taskArgs, retcode, retryCount, commandCount, eff)
def PrintResult(taskInfo):
for stackid in taskInfo:
args, res, retryCount, commandCount, eff = taskInfo[stackid]
print(stackid,'\t','Task {}{}'.format(args[0], args[1:]),'\t\t',res,'\t\t', retryCount, '\t\t', commandCount, '\t\t', eff, '\n')
def PrintResultSummary(taskInfo):
succ = 0
fail = 0
retries = 0
cmdNet = {}
effTotal = 0
for stackid in taskInfo:
args, res, retryCount, commandCount, eff = taskInfo[stackid]
if res == 'Success':
succ += 1
else:
fail += 1
retries += retryCount
effTotal += eff
if cmdNet == {}:
cmdNet = commandCount
else:
for cmd in cmdNet:
if cmd in cmdNet and cmd in commandCount:
cmdNet[cmd] += commandCount[cmd]
elif cmd in commandCount:
cmdNet[cmd] = commandCount[cmd]
print(succ, succ+fail, retryCount, globalTimer.GetSimulationCounter(), globalTimer.GetRealCommandExecutionCounter(), effTotal)
#print(' '.join('-'.join([key, str(cmdNet[key])]) for key in cmdNet))
def StartEnv():
while(True):
while(envArgs.envActive == False):
pass
envArgs.sem.acquire()
if envArgs.exit == True:
return
StartEnv.counter += 1
if StartEnv.counter in domain_module.eventsEnv:
eventArgs = domain_module.eventsEnv[StartEnv.counter]
event = eventArgs[0]
eventParams = eventArgs[1]
t = threading.Thread(target=event, args=eventParams)
t.setDaemon(True) # Setting the environment thread to daemon because we don't want the environment running once the tasks are done
t.start()
envArgs.envActive = False
envArgs.sem.release()
def raeMult():
if globals.GetShowOutputs() == 'on':
print("Starting APE\n", colorama.Style.RESET_ALL)
ipcArgs.sem = threading.Semaphore(1) #the semaphore to control progress of each stack and master
ipcArgs.nextStack = 0 #the master thread is the next in line to be executed, which adds a new stack for every new task
ipcArgs.threadList = [] #keeps track of all the stacks in RAE Agenda
lastActiveStack = 0 #keeps track of the last stack that was Progressed
numstacks = 0 #keeps track of the total number of stacks
GetNewTasks.counter = 0
StartEnv.counter = 0
taskInfo = {}
envArgs.sem = threading.Semaphore(1)
envArgs.envActive = False
envArgs.exit = False
envThread = threading.Thread(target=StartEnv)
#startTime = time()
envThread.start()
while (True):
if ipcArgs.nextStack == 0 or ipcArgs.threadList[ipcArgs.nextStack-1].isAlive() == False:
ipcArgs.sem.acquire()
if numstacks == 0 or BeginFreshIteration(lastActiveStack, numstacks, ipcArgs.threadList) == True: # Check for incoming tasks after progressing all stacks
taskParams = GetNewTasks()
if taskParams != []:
for newTask in taskParams:
numstacks = numstacks + 1
raeArgs = globals.RaeArgs()
raeArgs.stack = numstacks
raeArgs.task = newTask[0]
raeArgs.taskArgs = newTask[1:]
ipcArgs.threadList.append(threading.Thread(target=CreateNewStack, args = (taskInfo, raeArgs)))
ipcArgs.threadList[numstacks-1].start()
lastActiveStack = 0 # for the environment
envArgs.envActive = True
envArgs.sem.release()
while(envArgs.envActive == True):
pass
envArgs.sem.acquire()
globalTimer.IncrementTime()
if numstacks > 0:
res = GetNextAlive(lastActiveStack, numstacks, ipcArgs.threadList)
if res != -1:
ipcArgs.nextStack = res
lastActiveStack = res
ipcArgs.sem.release()
else:
envArgs.envActive = True
envArgs.exit = True
envArgs.sem.release()
break
else:
ipcArgs.sem.release()
if globals.GetShowOutputs() == 'on':
print("----Done with RAE----\n")
PrintResult(taskInfo)
else:
PrintResultSummary(taskInfo)
#globalTimer.Callibrate(startTime)
return taskInfo # for unit tests
def CreateNewStackSimulation(pArgs, queue):
tree, planningTime = APEplan(pArgs.GetTask(), pArgs)
queue.put((tree, planningTime))
def APEPlanMain(task, taskArgs, queue, candidateMethods):
# Simulating one stack now
# TODO: Simulate multiple stacks in future
SetMode('Counter') #Counter mode in simulation
globals.SetPlanningMode(True)
RemoveLocksFromState()
pArgs = PlanArgs()
pArgs.SetTaskArgs(taskArgs)
pArgs.SetStackId(1)
pArgs.SetTask(task)
pArgs.SetCandidates(candidateMethods)
#ipcArgs.nextStack = 0
#ipcArgs.sem = threading.Semaphore(1)
thread = threading.Thread(target=CreateNewStackSimulation, args=[pArgs, queue])
thread.start()
thread.join()
#while(True):
# if ipcArgs.nextStack == 0 or thread.isAlive() == False:
# ipcArgs.sem.acquire()
# globalTimer.IncrementTime()
# if thread.isAlive() == False:
# break
# else:
# ipcArgs.nextStack = 1
# ipcArgs.sem.release() | patras91/rae_release | planners/APE_and_APEplan/APE.py | APE.py | py | 7,978 | python | en | code | 1 | github-code | 13 |
26388593480 | #!/usr/bin/python3
"""
module 1
"""
from sys import argv
saving = __import__("7-save_to_json_file").save_to_json_file
loading = __import__("8-load_from_json_file").load_from_json_file
try:
x = loading("add_item.json")
except Exception:
x = []
for arg in argv[1:]:
x.append(arg)
saving(x, "add_item.json")
| nourouhichi/higher_level_programming | 0x0B-python-input_output/9-add_item.py | 9-add_item.py | py | 319 | python | en | code | 0 | github-code | 13 |
37449835905 | import sys
sys.path.insert(0, './yolov5')
import os
from pathlib import Path
import cv2
import torch
from yolov5.models.common import DetectMultiBackend
from yolov5.utils.datasets import LoadImages
from yolov5.utils.general import LOGGER, check_img_size, non_max_suppression, scale_coords, check_imshow, xyxy2xywh, \
increment_path
from yolov5.utils.torch_utils import select_device, time_sync
from yolov5.utils.plots import Annotator, colors
from reid.reid import ReID
from gooey import Gooey, GooeyParser
def track(arguments):
# PARSED ARGS
video_file = arguments.video_file
reid_model = arguments.reid_model
is_save = not arguments.save_results
# HARDCODED ARGS
yolo_model = 'yolov5/weights/yolov5m_pedestrian.pt'
output_dir = 'output'
image_size = [640, 640]
is_export_yolo_stages = False
device = select_device(0)
if is_save and not os.path.exists(output_dir):
os.makedirs(output_dir)
reid = ReID(reid_model)
model = DetectMultiBackend(yolo_model, device=device, dnn=False)
stride = model.stride
pt = model.pt
jit = model.jit
half = device.type != 'cpu' and pt
if pt:
model.model.half() if half else model.model.float()
is_display = check_imshow()
image_size = check_img_size(image_size, s=stride)
dataset = LoadImages(video_file, img_size=image_size, stride=stride, auto=pt and not jit)
path_video = write_video = [None]
names = model.module.names if hasattr(model, 'module') else model.names
if pt and device.type != 'cpu':
model(torch.zeros(1, 3, *image_size).to(device).type_as(next(model.model.parameters())))
counter = 0
for _, (path, image, image_0s, cap, info) in enumerate(dataset):
image = torch.from_numpy(image).to(device)
image = image.half() if half else image.float()
image /= 255.0
if image.ndimension() == 3:
image = image.unsqueeze(0)
time_start_det = time_sync()
is_export_yolo_stages = increment_path(
str(Path(output_dir)) + '/' + Path(path).stem,
mkdir=True
) if is_export_yolo_stages else False
predictions = model(image, augment=False, visualize=is_export_yolo_stages)
time_end_det = time_sync()
predictions = non_max_suppression(
predictions,
0.5, 0.7, # CONF // IOU
0, # CLASSES 0 (PEDESTRIAN)
False, # AGNOSTIC
1000 # MAX DETECTIONS
)
info = info.upper()
for i, detection in enumerate(predictions): # detections per image
counter += 1
ori_image = image_0s.copy()
proc_info = '%gx%g ' % image.shape[2:] # print string
save_path = str(Path(output_dir) / Path(path).name)
annotator = Annotator(ori_image, line_width=2, pil=not ascii)
if detection is not None and len(detection):
detection[:, :4] = scale_coords(image.shape[2:], detection[:, :4], ori_image.shape).round()
bbox_xywh = xyxy2xywh(detection[:, 0:4])
confidences = detection[:, 4]
classes = detection[:, 5]
for detected_class in detection[:, -1].unique():
amount = (detection[:, -1] == detected_class).sum() # detections per class
proc_info += f"{amount} {names[int(detected_class)]}{'s' * (amount > 1)}, ".upper() # add to string
time_start_reid = time_sync()
results = reid.forward(bbox_xywh.cpu(), confidences.cpu(), classes.cpu(), ori_image)
time_end_reid = time_sync()
if len(results) > 0:
for j, (result, conf) in enumerate(zip(results, confidences)):
class_num = result[5]
identifier = result[4]
bboxes = result[0:4]
detected_class = int(class_num) # integer class
label = f'{identifier} {names[detected_class]} {conf:.2f}'
annotator.box_label(bboxes, label, color=colors(int(identifier % 255), True))
else:
reid.increment_ages()
LOGGER.info(
f'[\n'
f'\tTIME :: {time_end_det - time_start_det + time_end_reid - time_start_reid:.3f}s\n'
f'\tDATA :: {info}.\n'
f'\tPROC :: {proc_info}\n'
f']'
)
ori_image = annotator.result()
if is_save:
if path_video != save_path:
path_video = save_path
if isinstance(write_video, cv2.VideoWriter):
write_video.release()
if cap:
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
else:
fps = 30
w, h = ori_image.shape[1], ori_image.shape[0]
save_path += '.mp4'
write_video = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
write_video.write(ori_image)
if is_display:
cv2.imshow(path, ori_image)
if cv2.waitKey(1) == ord('q'):
raise StopIteration
if is_save:
LOGGER.info(f'[\n\t SAVED TO {os.getcwd() + os.sep + output_dir} \n]')
@Gooey(
program_name="Tracker",
default_size=(800, 600),
)
def main():
parser = GooeyParser(description="Tracking params")
req_group = parser.add_argument_group(
"Main",
"Mandatory params",
gooey_options={
'columns': 1
}
)
req_group.add_argument(
'--video_file',
metavar="File",
required=True,
help="Pick video file for tracking from your machine",
widget="FileChooser",
nargs="?",
gooey_options={
'wildcard':
'MP4 (*.mp4)|*.mp4',
'message': 'Choose video to track'
}
)
req_group.add_argument(
'--reid_model',
metavar="ReID Model",
required=True,
help="Choose ReID net model from the list below",
choices=[
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'resnet50_fc512', 'se_resnet50', 'se_resnet50_fc512', 'se_resnet101', 'se_resnext50_32x4d',
'se_resnext101_32x4d', 'densenet121', 'densenet169', 'densenet201', 'densenet161', 'densenet121_fc512',
'inceptionresnetv2', 'inceptionv4', 'xception', 'resnet50_ibn_a', 'resnet50_ibn_b', 'nasnsetmobile',
'mobilenetv2_x1_0', 'mobilenetv2_x1_4', 'shufflenet', 'squeezenet1_0', 'squeezenet1_0_fc512',
'squeezenet1_1', 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0',
'mudeep', 'resnet50mid', 'hacnn', 'pcb_p6', 'pcb_p4', 'mlfn', 'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5',
'osnet_x0_25', 'osnet_ibn_x1_0', 'osnet_ain_x1_0', 'osnet_ain_x0_75', 'osnet_ain_x0_5',
'osnet_ain_x0_25'
]
)
opt_group = parser.add_argument_group("Optional params")
opt_group.add_argument(
'--save_results',
metavar="Save",
help="Save resulting video",
action="store_false"
)
arguments = parser.parse_args()
with torch.no_grad():
track(arguments)
if __name__ == '__main__':
main()
| nk-v/Tracker | main.py | main.py | py | 7,664 | python | en | code | 0 | github-code | 13 |
10686719159 |
import random
from qlearnexamples import *
# The Q-Learning Algorithm
# EXERCISE ASSIGNMENT:
# Implement the Q-learning algorithm for MDPs.
# The Q-values are represented as a Python dictionary Q[s,a],
# which is a mapping from the state indices s=0..stateMax to
# and actions a to the Q-values.
#
# Choice of actions can be completely random, or, if you are interested,
# you could implement some scheme that prefers better actions, e.g.
# based on Multi-arm Bandit problems (find more about these in the literature:
# this is an optional addition to the programming assignment.)
# OPTIONAL FUNCTIONS:
# You can implement and use the auxiliary functions bestActionFor and execute
# if you want, as auxiliary functions for Qlearning and makePolicy and makeValues.
# bestActionFor chooses the best action for 'state', given Q values
def bestActionFor(mdp,state,Q):
lst = mdp.applicableActions(state)
if lst==[]:
return -1
#print(Q)
bestAction=lst[0]
val=Q[state,bestAction]
for i in lst:
if val < Q[state,i]:
bestAction=i
val=Q[state,i]
return bestAction
# valueOfBestAction gives the value of best action for 'state'
def valueOfBestAction(mdp,state,Q):
best = bestActionFor(mdp,state,Q)
if best == -1:
return 0
return Q[state, best]
# 'execute' randomly chooses a successor state for state s w.r.t. action a.
# The probability with which is given successor is chosen must respect
# to the probability given by mdp.successors(s,a).
# It returns a tuple (s2,r), where s2 is the successor state and r is
# the reward that was obtained.
def execute(mdp,s,a):
succs=mdp.successors(s,a)
randSuccArray = []
randProbArray = []
randRewArray = []
myArray=[]
for s in succs:
randSuccArray.append(s[0])
randProbArray.append(s[1])
randRewArray.append(s[2])
sizeA=len(randSuccArray)
for s in range(sizeA):
x=int(100*randProbArray[s])
for n in range(x):
myArray.append(randSuccArray[s])
s2=random.choice(myArray)
for i in range(sizeA):
if randSuccArray[i]==s2:
return (s2,randRewArray[i])
#print((s2,randRewArray[i]))
# OBLIGATORY FUNCTION:
# Qlearning returns the Q-value function after performing the given
# number of iterations i.e. Q-value updates.
def Qlearning(mdp,gamma,lambd,iterations):
# The Q-values are a real-valued dictionary Q[s,a] where s is a state and a is an action.
Q = dict()
for s in range(mdp.stateMax+1):
acts = mdp.applicableActions(s)
for a in acts:
Q[s,a]=0
s=0
for i in range(iterations):
if a==-1:
break
a=random.choice(mdp.applicableActions(s))
sAndr = execute(mdp,s,a)
s2=sAndr[0]
r2=sAndr[1]
Q[s,a] = (1-lambd)*Q[s,a]+lambd*(r2 + gamma*valueOfBestAction(mdp,s2,Q))
s=s2
return Q
# OBLIGATORY FUNCTION:
# makePolicy constructs a policy, i.e. a mapping from state to actions,
# given a Q-value function as produced by Qlearning.
def makePolicy(mdp,Q):
# A policy is an action-valued dictionary P[s] where s is a state
P = dict()
states = [ x for x in range(0,mdp.stateMax+1)]
for s in states:
P[s] = bestActionFor(mdp,s,Q)
return P
# OBLIGATORY FUNCTION:
# makeValues constructs the value function, i.e. a mapping from states to values,
# given a Q-value function as produced by Qlearning.
def makeValues(mdp,Q):
# A value function is a real-valued dictionary V[s] where s is a state
V = dict()
states = [ x for x in range(0,mdp.stateMax+1)]
for s in states:
V[s] = valueOfBestAction(mdp,s,Q)
return V
| beyzabutun/Artificial-Intelligence | qlearn/qlearn.py | qlearn.py | py | 3,531 | python | en | code | 0 | github-code | 13 |
3870210741 | import hppfcl
import numpy as np
import meshcat
import meshcat.geometry as mg
import meshcat.transformations as tf
import pinocchio as pin
from distutils.version import LooseVersion
import warnings
from typing import Any, Dict, Union, List
MsgType = Dict[str, Union[str, bytes, bool, float, 'MsgType']]
def npToTTuple(M):
L = M.tolist()
for i in range(len(L)):
L[i] = tuple(L[i])
return tuple(L)
def npToTuple(M):
if len(M.shape) == 1:
return tuple(M.tolist())
if M.shape[0] == 1:
return tuple(M.tolist()[0])
if M.shape[1] == 1:
return tuple(M.T.tolist()[0])
return npToTTuple(M)
def load_primitive(geom: hppfcl.ShapeBase):
import meshcat.geometry as mg
# Cylinders need to be rotated
basic_three_js_transform = np.array([[1., 0., 0., 0.],
[0., 0., -1., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.]])
RotatedCylinder = type("RotatedCylinder", (mg.Cylinder,), {"intrinsic_transform": lambda self: basic_three_js_transform })
# Cones need to be rotated
if isinstance(geom, hppfcl.Capsule):
if hasattr(mg, 'TriangularMeshGeometry'):
obj = createCapsule(2. * geom.halfLength, geom.radius)
else:
obj = RotatedCylinder(2. * geom.halfLength, geom.radius)
elif isinstance(geom, hppfcl.Cylinder):
obj = RotatedCylinder(2. * geom.halfLength, geom.radius)
elif isinstance(geom, hppfcl.Cone):
obj = RotatedCylinder(2. * geom.halfLength, 0, geom.radius, 0)
elif isinstance(geom, hppfcl.Box):
obj = mg.Box(npToTuple(2. * geom.halfSide))
elif isinstance(geom, hppfcl.Sphere):
obj = mg.Sphere(geom.radius)
elif isinstance(geom, hppfcl.Plane):
To = np.eye(4)
To[:3, 3] = geom.d * geom.n
TranslatedPlane = type("TranslatedPlane", (mg.Plane,), {"intrinsic_transform": lambda self: To})
sx = 10
sy = 10
obj = TranslatedPlane(sx, sy)
elif isinstance(geom, hppfcl.Ellipsoid):
obj = mg.Ellipsoid(geom.radii)
elif isinstance(geom, (hppfcl.Plane,hppfcl.Halfspace)):
plane_transform : pin.SE3 = pin.SE3.Identity()
# plane_transform.translation[:] = geom.d # Does not work
plane_transform.rotation = pin.Quaternion.FromTwoVectors(pin.ZAxis,geom.n).toRotationMatrix()
TransformedPlane = type("TransformedPlane", (Plane,), {"intrinsic_transform": lambda self: plane_transform.homogeneous })
obj = TransformedPlane(1000,1000)
elif isinstance(geom, hppfcl.ConvexBase):
obj = loadMesh(geom)
else:
msg = "Unsupported geometry type for (%s)" % (type(geom) )
warnings.warn(msg, category=UserWarning, stacklevel=2)
obj = None
return obj
def loadMesh(mesh):
if isinstance(mesh,(hppfcl.Convex,hppfcl.BVHModelBase)):
if isinstance(mesh,hppfcl.BVHModelBase):
num_vertices = mesh.num_vertices
num_tris = mesh.num_tris
call_triangles = mesh.tri_indices
call_vertices = mesh.vertices
elif isinstance(mesh,hppfcl.Convex):
num_vertices = mesh.num_points
num_tris = mesh.num_polygons
call_triangles = mesh.polygons
call_vertices = mesh.points
faces = np.empty((num_tris,3),dtype=int)
for k in range(num_tris):
tri = call_triangles(k)
faces[k] = [tri[i] for i in range(3)]
if LooseVersion(hppfcl.__version__) >= LooseVersion("1.7.7"):
vertices = call_vertices()
else:
vertices = np.empty((num_vertices,3))
for k in range(num_vertices):
vertices[k] = call_vertices(k)
vertices = vertices.astype(np.float32)
if num_tris > 0:
mesh = mg.TriangularMeshGeometry(vertices, faces)
else:
mesh = mg.Points(
mg.PointsGeometry(vertices.T, color=np.repeat(np.ones((3,1)),num_vertices,axis=1)),
mg.PointsMaterial(size=0.002))
return mesh
def createCapsule(length, radius, radial_resolution = 30, cap_resolution = 10):
nbv = np.array([max(radial_resolution, 4), max(cap_resolution, 4)])
h = length
r = radius
position = 0
vertices = np.zeros((nbv[0] * (2 * nbv[1]) + 2, 3))
for j in range(nbv[0]):
phi = (( 2 * np.pi * j) / nbv[0])
for i in range(nbv[1]):
theta = ((np.pi / 2 * i) / nbv[1])
vertices[position + i, :] = np.array([np.cos(theta) * np.cos(phi) * r,
np.cos(theta) * np.sin(phi) * r,
-h / 2 - np.sin(theta) * r])
vertices[position + i + nbv[1], :] = np.array([np.cos(theta) * np.cos(phi) * r,
np.cos(theta) * np.sin(phi) * r,
h / 2 + np.sin(theta) * r])
position += nbv[1] * 2
vertices[-2, :] = np.array([0, 0, -h / 2 - r])
vertices[-1, :] = np.array([0, 0, h / 2 + r])
indexes = np.zeros((nbv[0] * (4 * (nbv[1] - 1) + 4), 3))
index = 0
stride = nbv[1] * 2
last = nbv[0] * (2 * nbv[1]) + 1
for j in range(nbv[0]):
j_next = (j + 1) % nbv[0]
indexes[index + 0] = np.array([j_next * stride + nbv[1], j_next * stride, j * stride])
indexes[index + 1] = np.array([j * stride + nbv[1], j_next * stride + nbv[1], j * stride])
indexes[index + 2] = np.array([j * stride + nbv[1] - 1, j_next * stride + nbv[1] - 1, last - 1])
indexes[index + 3] = np.array([j_next * stride + 2 * nbv[1] - 1, j * stride + 2 * nbv[1] - 1, last])
for i in range(nbv[1]-1):
indexes[index + 4 + i * 4 + 0] = np.array([j_next * stride + i, j_next * stride + i + 1, j * stride + i])
indexes[index + 4 + i * 4 + 1] = np.array([j_next * stride + i + 1, j * stride + i + 1, j * stride + i])
indexes[index + 4 + i * 4 + 2] = np.array([j_next * stride + nbv[1] + i + 1, j_next * stride + nbv[1] + i, j * stride + nbv[1] + i])
indexes[index + 4 + i * 4 + 3] = np.array([j_next * stride + nbv[1] + i + 1, j * stride + nbv[1] + i, j * stride + nbv[1] + i + 1])
index += 4 * (nbv[1] - 1) + 4
return mg.TriangularMeshGeometry(vertices, indexes)
class Plane(mg.Geometry):
"""A plane of the given width and height.
"""
def __init__(self, width: float, height: float, widthSegments: float = 1, heightSegments: float = 1):
super().__init__()
self.width = width
self.height = height
self.widthSegments = widthSegments
self.heightSegments = heightSegments
def lower(self, object_data: Any) -> MsgType:
return {
u"uuid": self.uuid,
u"type": u"PlaneGeometry",
u"width": self.width,
u"height": self.height,
u"widthSegments": self.widthSegments,
u"heightSegments": self.heightSegments,
}
def meshcat_material(r, g, b, a):
material = mg.MeshPhongMaterial()
material.color = int(r * 255) * 256 ** 2 + int(g * 255) * 256 + \
int(b * 255)
material.opacity = a
return material
def create_visualizer(grid: bool=False, axes: bool=False) -> meshcat.Visualizer:
# vis = meshcat.Visualizer(zmq_url="tcp://127.0.0.1:6000")
vis = meshcat.Visualizer()
vis.delete()
if not grid:
vis["/Grid"].set_property("visible", False)
if not axes:
vis["/Axes"].set_property("visible", False)
return vis
def load_convex(path: str) -> hppfcl.ConvexBase:
shape: hppfcl.ConvexBase
loader = hppfcl.MeshLoader()
mesh_: hppfcl.BVHModelBase = loader.load(path)
mesh_.buildConvexHull(True, "Qt")
shape = mesh_.convex
return shape
def rgbToHex(color):
if len(color) == 4:
c = color[:3]
opacity = color[3]
else:
c = color
opacity = 1.
hex_color = '0x%02x%02x%02x' % (int(c[0] * 255), int(c[1] * 255), int(c[2] * 255))
return hex_color, opacity
def renderPoint(vis: meshcat.Visualizer, point: np.ndarray, point_name: str,
color=np.ones(4), radius_point=0.001):
hex_color, opacity = rgbToHex(color)
vis[point_name].set_object(mg.Sphere(radius_point), mg.MeshLambertMaterial(color=hex_color, opacity=opacity))
vis[point_name].set_transform(tf.translation_matrix(point))
def renderLine(vis: meshcat.Visualizer, pt1: np.ndarray, pt2: np.ndarray, name: str,
linewidth=1, color=np.array([0., 0., 0., 1.])):
hex_color, _ = rgbToHex(color)
points = np.hstack([pt1.reshape(-1, 1), pt2.reshape(-1, 1)]).astype(np.float32)
vis[name].set_object(mg.Line(mg.PointsGeometry(points), mg.MeshBasicMaterial(color=hex_color, linewidth=linewidth)))
RED_COLOR = np.array([1.0, 0., 0., 1.0])
class AgimusScene:
collision_objects: List[hppfcl.CollisionObject]
viewer: meshcat.Visualizer
mc_shapes: List[meshcat.geometry.Geometry]
shape_colors: List[np.ndarray]
_colres_idx: int
def __init__(self):
self.viewer = create_visualizer(False, False)
self.clear_scene()
def clear_scene(self):
self.mc_shapes = []
self.collision_objects = []
self.shape_colors = []
self._colres_idx = 0
self.viewer.delete()
def register_object(self, shape: hppfcl.ShapeBase, M: pin.SE3, shape_color=np.ones(3), transparent=False):
shape.computeLocalAABB()
cobj = hppfcl.CollisionObject(shape, M)
self.collision_objects.append(cobj)
self.mc_shapes.append(load_primitive(shape))
color = np.ones(4)
color[:3] = shape_color
color[3] = 1
if transparent:
color[3] = 0.2
self.shape_colors.append(color)
def render_scene(self):
for s, cobj in enumerate(self.collision_objects):
M = pin.SE3(cobj.getTransform())
shape_name = f"shape_{s}"
if isinstance(cobj,(hppfcl.Plane, hppfcl.Halfspace)):
T = M
T.translation += M.rotation @ (cobj.d * cobj.n)
T = T.homogeneous
else:
T = M.homogeneous
# Update viewer configuration.
self.viewer[shape_name].set_transform(T)
def clear_renderer(self):
self.init_renderer()
def init_renderer(self):
self.viewer.delete()
self._colres_idx = 0
for s, shape in enumerate(self.mc_shapes):
shape_name = f"shape_{s}"
self.viewer[shape_name].set_object(shape, meshcat_material(*self.shape_colors[s]))
def visualize_separation_vector(self, colres: hppfcl.CollisionResult):
if colres.isCollision:
contact: hppfcl.Contact = colres.getContacts()[0]
p1 = contact.getNearestPoint1()
p2 = contact.getNearestPoint2()
name = f"sep_vec_{self._colres_idx}"
renderPoint(self.viewer, p1, name + "/p1", RED_COLOR, 0.005)
renderPoint(self.viewer, p2, name + "/p2", RED_COLOR, 0.005)
renderLine(self.viewer, p1, p2, name + "/sep_vec", 1., RED_COLOR)
self._colres_idx += 1
def delete_separation_vectors(self):
for i in range(self._colres_idx):
name = f"sep_vec_{i}"
self.viewer[name].delete()
self._colres_idx = 0
def create_complex_scene():
# Create some shapes
scene = AgimusScene()
shapes = []
transforms = []
pin.seed(0)
np.random.seed(0)
N = 25
for _ in range(N):
shape = hppfcl.Ellipsoid(0.05, 0.15, 0.2)
shapes.append(shape)
shape = hppfcl.Capsule(0.1, 0.2)
shapes.append(shape)
shape = load_convex("./assets/mesh.stl")
shapes.append(shape)
for s in range(len(shapes)):
M = pin.SE3.Random()
transforms.append(M)
color = np.random.rand(3)
scene.register_object(shapes[s], M, color)
# Add walls
walls_color = np.ones(3)
wall_size = 4.0
# X-axis
M = pin.SE3.Identity()
M.translation = np.array([-wall_size, 0., 0.])/2
transforms.append(M)
shape = hppfcl.Box(0.5, wall_size, wall_size)
shapes.append(shape)
scene.register_object(shapes[-1], M, walls_color, True)
M = pin.SE3.Identity()
M.translation = np.array([wall_size, 0., 0.])/2
transforms.append(M)
shape = hppfcl.Box(0.5, wall_size, wall_size)
shapes.append(shape)
scene.register_object(shapes[-1], M, walls_color, True)
# Y-axis
M = pin.SE3.Identity()
M.translation = np.array([0., -wall_size, 0.])/2
transforms.append(M)
shape = hppfcl.Box(wall_size, 0.5, wall_size)
shapes.append(shape)
scene.register_object(shapes[-1], M, walls_color, True)
M = pin.SE3.Identity()
M.translation = np.array([0., wall_size, 0.])/2
transforms.append(M)
shape = hppfcl.Box(wall_size, 0.5, wall_size)
shapes.append(shape)
scene.register_object(shapes[-1], M, walls_color, True)
# Y-axis
M = pin.SE3.Identity()
M.translation = np.array([0., 0., -wall_size])/2
transforms.append(M)
shape = hppfcl.Box(wall_size, wall_size, 0.5)
shapes.append(shape)
scene.register_object(shapes[-1], M, walls_color, True)
M = pin.SE3.Identity()
M.translation = np.array([0., 0., wall_size])/2
transforms.append(M)
shape = hppfcl.Box(wall_size, wall_size, 0.5)
shapes.append(shape)
scene.register_object(shapes[-1], M, walls_color, True)
return shapes, transforms, scene
| agimus-project/winter-school-2023 | simulation/sim2_collision/utils_render.py | utils_render.py | py | 13,620 | python | en | code | 0 | github-code | 13 |
1808191237 | from abc import ABC, abstractmethod
from typing import List, NamedTuple, Union
from flask import current_app, g
from serpapi import GoogleSearch
SEARCH_PARAMS = {
"engine": "google_scholar",
"hl": "en",
"start": 0,
"num": "20",
}
class GoogleScholarAuthor(NamedTuple):
name: str
link: str
class GoogleScholarResult(NamedTuple):
summary: str
link: Union[str, None]
versions_link: Union[str, None]
citations_link: Union[str, None]
title: str
total_citations: Union[int, None]
authors: List[GoogleScholarAuthor]
class GoogleScholarSearch(NamedTuple):
created_at: str
results: List[GoogleScholarResult]
class GoogleScholarBase(ABC):
@abstractmethod
def find(self, search: str) -> GoogleScholarSearch:
...
def get_google_scholar() -> GoogleScholarBase:
if 'google_scholar' not in g:
g.google_scholar = current_app.config['GoogleScholar']()
return g.google_scholar
class GoogleScholar(GoogleScholarBase):
def find(self, search: str) -> GoogleScholarSearch:
p = dict(SEARCH_PARAMS)
p['q'] = search
p['api_key'] = current_app.config['SERPAPI_KEY']
return asGoogleScholarSearch(GoogleSearch(p).get_dict())
def asGoogleScholarSearch(d: dict) -> GoogleScholarSearch:
results = []
if 'organic_results' not in d:
print('no results', d)
raise 'no results'
for r in d['organic_results']:
authors = []
for a in r['publication_info'].get('authors', []):
authors.append(GoogleScholarAuthor(a['name'], a['link']))
results.append(GoogleScholarResult(
r['publication_info']['summary'],
r.get('link', None),
r['inline_links'].get('versions', {}).get('link', None),
r['inline_links'].get('cited_by', {}).get('link', None),
r['title'],
r['inline_links'].get('cited_by', {}).get('total', None),
authors
))
return GoogleScholarSearch(
d['search_metadata']['created_at'],
results
) | OHDSI/CommunityDashboard | projects/plots/plots/services/google_scholar.py | google_scholar.py | py | 2,077 | python | en | code | 5 | github-code | 13 |
72714660177 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 29 13:44:51 2016
@author: sthomp
Command Line Script to run vetter on the K2 data.
Inputs
File of this info or the info itself
EpicId
Campaign
Period (days)
epoch (bkjd)
depth (ppm)
config File
"""
import dave.pipeline.clipboard as clipboard
import numpy as np
import sys
import os
import getopt as getopt
import dave.pipeline.exporter as ex
import dave.pipeline.multiPagePlot as mpp
import datetime
import dave.pipeline.pipeline as dpp #You need this
import dave.stellar.readStellarTable as stel
def main():
"""A bare bones main program"""
# print len(sys.argv)
if len(sys.argv) < 2:
usage()
sys.exit()
try:
opts, args = getopt.getopt(sys.argv[1:], "hf:c:o:1:l:", ["help", "output=","config=","file=","one=","lc="])
except getopt.GetoptError as err:
# print help information and exit:
usage()
sys.exit()
cfgFile=""
ephemFile=""
output=""
detrendType="pdc"
data=np.zeros((1,6),dtype=float)
# print np.shape(data)
for o, a in opts:
if o in ("-f","--file"):
ephemFile = a
print("Ephemeris File is: %s" % ephemFile)
data=loadEphemFile(ephemFile)
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output = a
print("Output File is %s\n" % output)
elif o in ("-c", "--config"):
cfgFile= a
print("Config File is: %s\n" % cfgFile)
elif o in ("-1", "--one"):
data[0,:]=np.transpose(np.array(a.split(),dtype=float))
elif o in ("-l", "--lc"):
detrendType=a
else:
assert False, "Unhandled option"
sys.exit()
#Check all required inputs are sane
if np.sum(np.fabs(data)) == 0:
raise IOError("No empheris file loaded. Use --file or --one")
outputPath = os.path.split( os.path.realpath(output) )[0]
if not os.access(outputPath, os.W_OK):
raise IOError("Can not create output file: %s" %(output))
if not os.path.isfile(cfgFile) or not os.access(cfgFile, os.R_OK):
raise IOError("Can not read config file: %s" %(cfgFile))
cfg=loadConfigInput(cfgFile)
cfg['detrendType']=detrendType
cfg=suppConfiguration(cfg)
#print cfg
for i,epic in enumerate(data[:,0]):
cfg['campaign']=int(data[i,1])
try:
dep=data[i,4]/1.0e6
except:
dep=.00005
try:
dur = data[i,5]
except:
dur = 3.0
clip=runOneEphem(epic,data[i,2],data[i,3],cfg,duration= dur,depth=dep)
# print clip.__meta__
if ('exception' not in clip.keys()):
outfile=runExport(clip,output)
print('Created Outputs %s\n\n' % outfile)
else:
print("No Outputs\n")
fid=open(output,'a')
fid.write("%s %f 0 0 0 0 0 0 0 0 \t-1 -1 -1 -1 NO_Analysis\n" % (clip.value,clip.bls.period))
fid.close()
#outfile=runExport(clip,output)
print(clip.exception)
print(clip.backtrace)
def usage():
"""Help message
"""
print("justVet -f input ephem file -c config file -o output filename\n")
print("writes stuff to current directory\n\n")
print("Format of the input ephem file is\n")
print("epic campaign period_days epoch_bkjd depth_ppm")
print("To run just one, use -1 \"epic campaign period epoch depth(ppm)\"")
print("You still need -c cfg.in and -o output.txt")
print("Use -l or --lc to pick your light curve")
print("The names of the light curve choices are pdc,everest,sff,agp,varcat (not yet)")
print("Default is the PDC light curves.")
print("Chose the same top directory for onepageBasename and modshiftBasename for all images to end up in same directory.")
def loadEphemFile(ephemFile):
"""
Load a file full of ephemerides
return data array
0=epicId
1=campaign
2=period
3=epoch
4=depth
"""
data=np.loadtxt(ephemFile,dtype=float,comments='#',delimiter=None)
# print "Loaded %s\n" % ephemFile
return data
def loadConfigInput(cfgFile):
"""
Load a file with information you need for the configuration
Add into your config file and return.
"""
cfg={}
info=np.loadtxt(cfgFile,dtype=str,delimiter=":",comments='#')
for i,key in enumerate(info[:,0]):
try:
cfg[key]=float(info[i,1])
except ValueError:
cfg[key]=info[i,1]
return cfg
def suppConfiguration(cfg):
"""Load the default pipeline configuration and adjust as necessary
"""
#Edit the input configuration with things specific to this task.
cfg['debug'] = False
# tasks = """dpp.checkDirExistTask dpp.serveTask dpp.extractLightcurveFromTpfTask
# dpp.computeCentroidsTask dpp.rollPhaseTask dpp.cotrendSffDataTask
# dpp.detrendDataTask dpp.trapezoidFitTask dpp.lppMetricTask
# dpp.modshiftTask dpp.measureDiffImgCentroidsTask dpp.dispositionTask
# dpp.saveOnError""".split()
tasks = """dpp.checkDirExistTask dpp.serveTask dpp.extractLightcurveTask
dpp.computeCentroidsTask dpp.rollPhaseTask dpp.cotrendDataTask
dpp.detrendDataTask dpp.blsTask dpp.trapezoidFitTask
dpp.modshiftTask dpp.measureDiffImgCentroidsTask dpp.dispositionTask
dpp.saveClip""".split()
cfg['taskList'] = tasks
searchTaskList = """blsTask trapezoidFitTask modshiftTask
measureDiffImgCentroidsTask dispositionTask""".split()
cfg['searchTaskList'] = searchTaskList
try:
cfg['timeout_sec'] = int(cfg['timeout_sec'])
except:
cfg['timeout_sec'] = 150
return cfg
def runOneEphem(k2id,period,epoch,config,duration=3.5,depth=.0001):
"""
Run just the vetting and return an output.
Inputs:
-------------
k2id
(int) Epic id of the target to run on.
period
(float) period of the target
epoch
(float) Time in days
config
(dict) Dictionary of configuration parameters
"""
taskList = config['taskList'];
clip = clipboard.Clipboard()
clip['config'] = config
clip['value'] = k2id
out = clipboard.Clipboard()
out['period'] = period
out['epoch'] = epoch
out['duration_hrs'] = duration
out['depth'] = depth
clip['bls'] = out
#Check that all the tasks are properly defined
for t in taskList:
f = eval(t)
#Now run them.
for t in taskList:
f = eval(t)
clip = f(clip)
return clip
def runExport(clip,output):
"""
run the exporters based on the input clip.
Append the important information to the output File.
"""
per=np.round(clip.bls.period*10)
epoch=np.round(clip.bls.epoch)
basedir=clip.config['onepageBasename']
try:
clip['config']['stellarPar']=['Mass','Rad','Teff','dis','rho','prov','logg']
clip=stel.addStellarToClip(clip)
clip=stel.estimatePlanetProp(clip)
except:
print('No Stellar Values')
outstr,header=ex.createExportString(clip, delimiter=" ", badValue="nan")
fid=open(output,'a')
#fid.write("%s\n" % header)
fid.write("%s\n" % outstr)
fid.close()
tag="%i-%02i-%04i-%s" % (clip.value,per,epoch,clip.config.detrendType)
outfile="%s/%09i/jvet%s" % (basedir,int(clip.value),tag)
thedir=basedir + str(int(clip.value))
try:
os.mkdir(thedir)
except OSError:
donothing = -999.
# print "Cannot create directory " + thedir
#print thedir
date=datetime.datetime.now()
if ('disposition' not in clip.keys()):
clip['disposition'] = 'No Disposition Determined'
clip.disposition.isCandidate = 0
clip.disposition.isSignificantEvent = 0
mpp.plot_multipages(outfile, clip, date)
return outfile
if __name__ == "__main__":
main()
| exoplanetvetting/DAVE | runbackend/justVetK2.py | justVetK2.py | py | 8,284 | python | en | code | 9 | github-code | 13 |
6048080557 | # _*_ coding:utf-8 _*_
import json
import requests
url = 'https://api.github.com/some/endpoint'
payload = {'some': 'data'}
headers = {'content-type': 'application/json'}
r = requests.post(url, data=json.dumps(payload), headers=headers)
print(r.url)
print(r.text)
payload = {'key': 'value1', 'key2': 'value2'}
r = requests.post("http://httpbin.org/post", data=payload)
print(r.url)
print(r.text)
# post一个多部分编码(Multiart-Encoded)的文件
url = 'http://httpbin.org/post'
files = {'file': open('report.xls', 'rb')}
r = requests.post(url, files=files)
| VersionBeathon/Expension | practice_requests/create_head.py | create_head.py | py | 567 | python | en | code | 0 | github-code | 13 |
22107827089 | import pandas as pd
import logging
from decouple import config
from sqlalchemy import create_engine
def salidas():
RUTA_CSV = 'C:\\Users\\WalterPc\\Documents\\Alkemy\\'
nombre = 'archivo.csv'
archivo = nombre
engine = create_engine(conection())
archivocsv=pd.read_csv(RUTA_CSV+archivo, sep=',')
archivocsv.to_sql('salidas', engine, if_exists='append', dtype= None, index=False, method= None)
def cantidades():
RUTA_CSV = 'C:\\Users\\WalterPc\\Documents\\Alkemy\\'
nombre = 'archivo.csv'
archivo = nombre
engine = create_engine(conection())
archivocsv=pd.read_csv(RUTA_CSV+archivo, sep=',')
archivocsv.to_sql('cantidades', engine, if_exists='append', dtype= None, index=False, method= None)
def cantidad1():
RUTA_CSV = 'C:\\Users\\WalterPc\\Documents\\Alkemy\\'
nombre = 'cantidad.csv'
archivo = nombre
engine = create_engine(conection())
archivocsv=pd.read_csv(RUTA_CSV+archivo, sep=',')
archivocsv.to_sql('cines', engine, if_exists='append', dtype= None, index=False, method= None)
logging.basicConfig(
level=logging.INFO
)
def conection():
user=config('USUARIO_PSQL')
contraseña=config('CONTRASEÑA_PSQL')
host=config('PSQL_HOST')
db=config('BASEDEDATOS_PSQL')
databaseconec= f'postgresql://{user}:{contraseña}@{host}/{db}'
return databaseconec
| Wquiroz2022/Analisis-de-Datos---Python | app/config.py | config.py | py | 1,378 | python | es | code | 0 | github-code | 13 |
43976370301 | import jieba
from collections import Counter
import math
#窗口大小为2
def combine2gram(cutword_list):
if len(cutword_list) == 1:
return []
res = []
for i in range(len(cutword_list) - 1):
res.append(cutword_list[i] + cutword_list[i + 1])
return res
#窗口大小为3
def combine3gram(cutword_list):
if len(cutword_list) <= 2:
return []
res = []
for i in range(len(cutword_list) - 2):
res.append(cutword_list[i] + cutword_list[i + 1] + cutword_list[i + 2])
return res
def calculate(corpus,mode):
if mode == 1:
# 1-gram
chars = []
for para in corpus:
for i in range(len(para)):
chars += para[i]
char_num = len(chars)
ct = Counter(chars)
vocab1 = ct.most_common()
entropy_1gram = sum([-(eve[1] / char_num) * math.log((eve[1] / char_num), 2) for eve in vocab1])
print("词库总词数:", char_num, " ", "不同词的个数:", len(vocab1))
print("出现频率前10的1-gram词语:", vocab1[:10])
print("entropy_1gram:", entropy_1gram)
if mode == 2:
# 2-gram
char_2gram = []
for para in corpus:
cutword_list = []
for i in range(len(para)):
cutword_list += para[i]
char_2gram += combine2gram(cutword_list)
# 2-gram的频率统计
char_2gram_num = len(char_2gram)
ct2 = Counter(char_2gram)
vocab2 = ct2.most_common()
# 2-gram相同句首的频率统计
same_1st_word = [eve[0] for eve in char_2gram]
assert char_2gram_num == len(same_1st_word)
ct_1st = Counter(same_1st_word)
vocab_1st = dict(ct_1st.most_common())
entropy_2gram = 0
for eve in vocab2:
p_xy = eve[1] / char_2gram_num
first_word = eve[0][0]
entropy_2gram += -p_xy * math.log(eve[1] / vocab_1st[first_word], 2)
print("词库总词数:", char_2gram_num, " ", "不同词的个数:", len(vocab2))
print("出现频率前10的2-gram词语:", vocab2[:10])
print("entropy_2gram:", entropy_2gram)
if mode == 3:
# 3-gram
char_3gram = []
for para in corpus:
cutword_list = []
for i in range(len(para)):
cutword_list += para[i]
char_3gram += combine3gram(cutword_list)
# 3-gram的频率统计
char_3gram_num = len(char_3gram)
ct3 = Counter(char_3gram)
vocab3 = ct3.most_common()
# print(vocab3[:20])
# 3-gram相同句首两个词语的频率统计
same_2st_word = [eve[:2] for eve in char_3gram]
assert char_3gram_num == len(same_2st_word)
ct_2st = Counter(same_2st_word)
vocab_2st = dict(ct_2st.most_common())
entropy_3gram = 0
for eve in vocab3:
p_xyz = eve[1] / char_3gram_num
first_2word = eve[0][:2]
entropy_3gram += -p_xyz * math.log(eve[1] / vocab_2st[first_2word], 2)
print("词库总词数:", char_3gram_num, " ", "不同词的个数:", len(vocab3))
print("出现频率前10的3-gram词语:", vocab3[:10])
print("entropy_3gram:", entropy_3gram)
#获取预处理后存储在当前目录下的语料文件
with open("processed_content.txt", "r", encoding="utf-8") as f:
#取出预处理过的文本
corpus = [eve.strip("\n") for eve in f]
#mode表示N-gram中的N,这里可以输入1、2或者3;
mode =3
#输入参数即可输出熵计算结果
calculate(corpus,mode)
| iluveatingmyf/DL-NLP2022-Homework | entropy_calculating_char.py | entropy_calculating_char.py | py | 3,629 | python | en | code | 0 | github-code | 13 |
1330807783 |
import csv
import os
import datetime
import re
def read_csv_file(filename):
results = []
with open(filename, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
count = 0
for row in csv_reader:
count = count + 1
if "FOODTYPE" in row:
if row["FOODTYPE"] != "":
results.append(row)
return results
def get_sorted_unique_column_values(data, column_name):
unique_list = {}
for row in data:
col_val = row[column_name]
if col_val not in unique_list:
unique_list[col_val] = 1
else:
unique_list[col_val] = unique_list[col_val] + 1
sorted_unique_list = sorted(unique_list.keys())
return sorted_unique_list
def get_sorted_unique_column_values_and_counts(data, column_name):
unique_list = {}
for row in data:
col_val = row[column_name]
if col_val not in unique_list:
unique_list[col_val] = 1
else:
unique_list[col_val] = unique_list[col_val] + 1
sorted_unique_keys = sorted(unique_list.keys())
sorted_unique_list = {}
for key in sorted_unique_keys:
sorted_unique_list[key] = unique_list[key]
return sorted_unique_list
def read_template(filename):
data = ""
with open(filename, "r") as template_file:
data = template_file.read()
return data
def create_page(template, data):
responsive_filter_button_list_html = create_responsive_filter_button_list(data)
list_html = create_vendor_list_html(data)
new_page = template
new_page = new_page.replace("<responsive-filter-buttons-here/>", responsive_filter_button_list_html)
new_page = new_page.replace("<list-here/>", list_html)
new_page = new_page.replace("<date-generated/>", datetime.datetime.now().isoformat())
return new_page
def create_responsive_filter_button_list(data):
foodtypes = get_sorted_unique_column_values(data, "FOODTYPE")
button_template = "<a id='-id-' href='#' class='list-group-item filter-button'>-foodtype-</a>\n"
button_list = ""
for foodtype in foodtypes:
new_button = button_template.replace("-foodtype-", foodtype)
new_button = new_button.replace("-id-", "filter-" + create_id_from_text(foodtype))
button_list = button_list + new_button
return button_list
def create_id_from_text(text):
id = text.lower()
id = re.sub(r"[^a-zA-Z0-9]", "", id)
return id
def create_vendor_list_html(data):
list_template = """<li>
<h2 id='-id-' class='vendor'>-CONTRACT-</h2>
<p class='foodtype'>-FOODTYPE-</p>
<p class='product'>-PRODUCT-</p>
<p class='location'><a href='https://www.google.com/maps/dir/?api=1&destination=-y-,-x-'>Walk</a></p>
</li>
"""
list_html = ""
for row in data:
new_item = list_template
for field in ["CONTRACT", "FOODTYPE", "PRODUCT", "y", "x"]:
field_value = ""
if field in row:
field_value = str(row[field])
new_item = new_item.replace("-" + field + "-", field_value)
new_item = new_item.replace("-id-", create_id_from_text(str(row["CONTRACT"])))
list_html = list_html + new_item
return list_html
def main():
fair_data_filename = os.path.join(".", "data", "nc_state_fair_vendor_data.csv")
html_template_filename = os.path.join(".", "template", "template.html")
print("Data file: " + fair_data_filename)
print("HTML template file: " + html_template_filename)
fair_data = read_csv_file(fair_data_filename)
print("Total fair data rows: " + str(len(fair_data)))
html_template = read_template(html_template_filename)
page_html = create_page(html_template, fair_data)
results_filename = os.path.join(".", "site_results", "index.html")
with open(results_filename, "w") as page_file:
page_file.write(page_html)
print()
print("Count by FOODTYPE:")
foodtypes = get_sorted_unique_column_values_and_counts(fair_data, "FOODTYPE")
count = 0
for key in sorted(foodtypes.keys()):
count = count + foodtypes[key]
print("\t" + key + ": " + str(foodtypes[key]))
print("\t______")
print("\tTotal: " + str(count))
if __name__ == '__main__':
main()
| stevezieglerva/nc-state-fair-ride-and-food-finder | create_page.py | create_page.py | py | 3,912 | python | en | code | 0 | github-code | 13 |
37882452380 | directions = [(-1,0), (1, 0), (0,-1), (0,1)]
def helper(r, c, count, lst, num):
if count == 7:
return [num]
else:
for dr, dc in directions:
if (dr+r>3) or (dr+r<0) or (dc+c>3) or (dc+c<0):
continue
lst.add(helper(r+dr, c+dc, count+1, lst, num*10 + int(board[r+dr][c+dc])))
return lst
T = int(input().strip())
board = []
final_list=set()
# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.
for test_case in range(1, T + 1):
for i in range(4):
board.append(input().strip().split(" "))
for r in range (len(board)):
for c in range (len(board[0])):
final_list = helper(r, c, 1, final_list, int(board[r][c]))
print("#{0} {1}".format(test_case, len(set(final_list)))) | kod4284/kod-algo-note | 삼성Expert/D4/2819-격자판의-숫자-이어붙이기/solution2.py | solution2.py | py | 815 | python | en | code | 0 | github-code | 13 |
39587358131 | # POC for ACR token creation
import adal # <= ToDo: should probably be using MSAL
import requests
import os
import json
# can use this to debug requests
import http.client
# Registry Token mgmt features are not yet available in the SDK
# can replace using env vars with KeyVault entries
tenant = os.environ['AZURE_TENANT_ID']
authority_url = 'https://login.microsoftonline.com/' + tenant
client_id = os.environ['AZURE_CLIENT_ID']
client_secret = os.environ['AZURE_CLIENT_SECRET']
resource = 'https://management.azure.com'
subscription_id = os.environ["AZURE_SUBSCRIPTION_ID"]
context = adal.AuthenticationContext(authority_url)
token = context.acquire_token_with_client_credentials(resource, client_id, client_secret)
headers = {'Authorization': 'Bearer ' + token['accessToken'], 'Content-Type': 'application/json'}
params = {}
scope = f'/subscriptions/{subscription_id}'
resourceGroupName = 'akstests'
registry= 'edwinspoctests' # replace with your own ACR registry
params = {}
# we need this for the preview token features
apiversion = '2021-08-01-preview'
acrTokenResourceApi = f'registries/{registry}/tokens'
acrScopeMapResourceApi = f'registries/{registry}/scopeMaps'
def decode_list_tokens_response(response):
# print all the token names:
if response:
# read the JSON content
json_resp = response.json()
# get the items
items = json_resp['value']
for item in items:
print('Token: ', item['name'])
print(' Scope: ', item['properties']['scopeMapId'])
def decode_list_scopemaps_response(response):
# print all the token names:
if response:
# read the JSON content
json_resp = response.json()
# get the items
items = json_resp['value']
for item in items:
print('ScopeMap: ', item['name'])
print(' Description: ', item['properties']['description'])
print(' Actions: ', item['properties']['actions'])
# List token Ids
def decode_token_ids(response):
# print all the token names:
if response:
# read the JSON content
json_resp = response.json()
# get the items
items = json_resp['value']
ids = []
for item in items:
ids.append(item["id"])
return ids
# list ops
# GET https://management.azure.com/providers/Microsoft.ContainerRegistry/operations?api-version=2019-05-01
# url = f'{resource}providers/Microsoft.ContainerRegistry/operations?api-version=2019-05-01'
# list tokens
def get_list_token_url():
url = f'{resource}/{scope}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/{acrTokenResourceApi}?api-version=2021-08-01-preview'
return url
# list scopemaps
def get_list_scopemap_url():
url = f'{resource}/{scope}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/{acrScopeMapResourceApi}?api-version=2021-08-01-preview'
return url
# generate credentials (and passwords)
def get_generate_credentials_url():
url = f'{resource}/{scope}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registry}/generateCredentials?api-version={createtokenapiversion}'
return url
# create token
# using TokenUpdateParameters
# https://github.com/Azure/azure-sdk-for-python/blob/eb12059c40d04c96da60455a807d347be15ccfa8/sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2020_11_01_preview/models/_models.py
# https://github.com/Azure/azure-cli/blob/7148fd2d7f76853a8cfd3232c8ee3c27fcc6d207/src/azure-cli/azure/cli/command_modules/acr/tests/latest/recordings/test_acr_connectedregistry.yaml#L1255
createtokenapiversion='2020-11-01-preview'
def get_create_token_url(tokenName):
url = f'{resource}/{scope}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/{acrTokenResourceApi}/{tokenName}?api-version={createtokenapiversion}'
return url
def generate_token_json():
token_attributes = {
'properties':{
'scopeMapId':'/subscriptions/7fdcde88-0aa7-4342-ac7c-fceebb18912e/resourceGroups/akstests/providers/Microsoft.ContainerRegistry/registries/edwinakstests/scopeMaps/_repositories_push',
'status':'disabled'
}
}
return token_attributes
# generate credentials
# NOTE: a password is either called "password1" or "password2", which are used as enums in the token system
# using this API spec
# https://github.com/Azure/azure-rest-api-specs/blob/main/specification/containerregistry/resource-manager/Microsoft.ContainerRegistry/preview/2021-08-01-preview/examples/TokenUpdate.json
# exmaple here:
# https://github.com/Azure/azure-rest-api-specs/blob/main/specification/containerregistry/resource-manager/Microsoft.ContainerRegistry/preview/2021-08-01-preview/examples/RegistryGenerateCredentials.json
def generate_credentials_json(tokenId):
generatecredentials_parameters = {
'tokenId':tokenId,
'name':'password1',
}
return generatecredentials_parameters
# examples:
# list tokens
# r1 = requests.get(get_list_token_url(), headers=headers, params=params)
# decode_list_tokens_response(r1)
# print(json.dumps(r1.json(), indent=4, separators=(',', ': ')))
# list scope maps
# r2 = requests.get(get_list_scopemap_url(), headers=headers, params=params)
# decode_list_scopemaps_response(r2)
http.client.HTTPConnection.debuglevel = 1
r3 = requests.put(get_create_token_url("myPyTest2"), headers=headers, json=generate_token_json()) # json=generate_create_token_json())
print(json.dumps(r3.json(), indent=4, separators=(',', ': ')))
# an example to generate a token password
r4 = requests.get(get_list_token_url(), headers=headers, params=params)
# we shall just choose the first token in the list of tokens returned as an example
tokenIDToUpdate=decode_token_ids(r4)[0]
print('Updating this token password:')
print(tokenIDToUpdate)
# http.client.HTTPConnection.debuglevel = 1
r5 = requests.post(get_generate_credentials_url(), headers=headers, json=generate_credentials_json(tokenIDToUpdate)) # json=generate_create_token_json())
print(json.dumps(r5.json(), indent=4, separators=(',', ': '))) | edwin-huber/ACR_Token_POC | src/ACR_Rest_Client.py | ACR_Rest_Client.py | py | 6,170 | python | en | code | 0 | github-code | 13 |
32648513996 | from django.db import models
class RainbowEntry(models.Model):
baseID = models.AutoField(primary_key=True)
base = models.CharField("Base", max_length=15, null=True, blank=True)
hashes = models.TextField("Hashes", null=True, blank=True)
added = models.DateTimeField("Added", auto_now_add=True)
class Client(models.Model):
client_id = models.AutoField(primary_key=True)
preferred_project = models.ForeignKey('Project', null=True, blank=True)
class Project(models.Model):
project_id = models.AutoField(primary_key=True)
name = models.CharField("Name", max_length=50)
code = models.URLField("Code", max_length=200)
def __unicode__(self):
return self.name
class InputData(models.Model):
input_data_id = models.AutoField(primary_key=True)
data = models.CharField(max_length=100)
project = models.ForeignKey("Project")
num_dealt = models.IntegerField(default=0)
class Job(models.Model):
job_id = models.AutoField(primary_key=True)
client = models.ForeignKey('Client')
input_data = models.ForeignKey('InputData')
result = models.CharField(max_length=65, null=True, blank=True)
checkpoint = models.IntegerField(default=0)
'''
For projects:
add\_input(input\_id, input, project\_id)
clients(input\_id)
result(input\_id)
kill(input\_id)
For browser:
new()
get\_input(input\_id)
return\_result(input\_id, result)
checkpoint(percentage)
project
project\_id
name
code
jobs
input
input\_id
project\_id
value
job
job\_id
input\_id
ip
last\_checkpoint\_time
checkpoint
user
user\_id
project\_preference
''' | rclmenezes/Mebro | jp/models.py | models.py | py | 1,608 | python | en | code | 1 | github-code | 13 |
31929390814 | """
utility module for azimuthal-related plots
"""
import numpy as np
from scipy.signal import gaussian
import util
###############################################################################
outer_start = 1.1
outer_end = 2.3
### Helper Methods ###
def my_searchsorted(array, target):
""" np.searchsorted, but it works all the time """
for i, x in enumerate(array):
if target > x:
pass
else:
return i
return len(array)
def get_radial_peak(averagedDensity, fargo_par, start = outer_start, end = outer_end):
""" find peak in azimuthally-averaged density in the outer disk (i.e. the vortex) """
######## Get Parameters #########
rad = fargo_par["rad"]
########### Method ##############
outer_disk_start = np.searchsorted(rad, start) # look for max radial density beyond r = 1.1
outer_disk_end = np.searchsorted(rad, end) # look for max density before r = 2.3
peak_rad_outer_index = np.argmax(averagedDensity[outer_disk_start : outer_disk_end])
peak_index = outer_disk_start + peak_rad_outer_index
peak_rad = rad[peak_index]
peak_density = averagedDensity[peak_index]
return peak_rad, peak_density
def get_radial_min(averagedDensity, peak_rad, fargo_par):
""" find minimum in azimuthally-averaged density in the outer disk (i.e. the gap edge) """
######## Get Parameters #########
rad = fargo_par["rad"]
########### Method ##############
try:
outer_disk_start = np.searchsorted(rad, 1.0) # look for min radial density beyond r = 1.1
outer_disk_end = np.searchsorted(rad, peak_rad)
min_rad_outer_index = np.argmin(averagedDensity[outer_disk_start : outer_disk_end])
min_index = outer_disk_start + min_rad_outer_index
min_rad = rad[min_index]
min_density = averagedDensity[min_index]
#print "Min", min_rad, min_density
return min_rad, min_density
except:
# No Gap Yet
return peak_rad, 0
def get_peak(density, fargo_par, start = outer_start, end = outer_end):
""" return location of peak in data in outer disk """
######## Get Parameters #########
rad = fargo_par["rad"]
theta = fargo_par["theta"]
########### Method ##############
outer_disk_start = np.searchsorted(rad, start) # look for max density beyond r = 1.1
outer_disk_end = np.searchsorted(rad, end) # look for max density before r = 2.3
density_segment = density[outer_disk_start : outer_disk_end]
argmax = np.argmax(density_segment)
arg_r, arg_phi = np.unravel_index(argmax, np.shape(density_segment))
arg_r += outer_disk_start
return arg_r, arg_phi
def get_azimuthal_peak(density, fargo_par, start = outer_start, end = outer_end):
""" return shift needed to shift vortex peak to 180 degrees """
######## Get Parameters #########
rad = fargo_par["rad"]
theta = fargo_par["theta"]
########### Method ##############
arg_r, arg_phi = get_peak(density, fargo_par, start = start, end = end)
### Calculate shift for true center to 180 degrees ###
middle = np.searchsorted(theta, np.pi)
shift_peak = int(middle - arg_phi)
return shift_peak
def get_azimuthal_center(density, fargo_par, threshold = 0.05, start = outer_start, end = outer_end):
""" return shift needed to shift vortex center to 180 degrees """
######## Get Parameters #########
rad = fargo_par["rad"]
theta = fargo_par["theta"]
scale_height = fargo_par["AspectRatio"]
surface_density_zero = fargo_par["Sigma0"]
########### Method ##############
### Identify center using threshold ###
# Search outer disk only
outer_disk_start = np.searchsorted(rad, start) # look for max density beyond r = 1.1
outer_disk_end = np.searchsorted(rad, end) # look for max density before r = 2.3
density_segment = density[outer_disk_start : outer_disk_end]
# Get peak in azimuthal profile
avg_density = np.average(density_segment, axis = 1) # avg over theta
segment_arg_peak = np.argmax(avg_density)
arg_peak = np.searchsorted(rad, rad[outer_disk_start + segment_arg_peak])
peak_rad = rad[arg_peak]
# Zoom in on peak --- Average over half a scale height
half_width = 0.25 * scale_height
zoom_start = np.searchsorted(rad, peak_rad - half_width)
zoom_end = np.searchsorted(rad, peak_rad + half_width)
density_sliver = density[zoom_start : zoom_end]
length = len(density_sliver); std = length / 3.0
weights = gaussian(length, std)
avg_density_sliver = np.average(density_sliver, weights = weights, axis = 0) # avg over rad
# Move Minimum to Zero Degrees (vortex cannot cross zero)
arg_min = np.argmin(avg_density_sliver)
shift_min = int(0 - arg_min)
avg_density_sliver = np.roll(avg_density_sliver, shift_min)
# Spot two threshold crossovers
left_edge = my_searchsorted(avg_density_sliver, threshold)
right_edge = len(theta) - my_searchsorted(avg_density_sliver[::-1], threshold) - 1
center = (left_edge + right_edge) / 2.0
### Calculate shift for true center to 180 degrees ###
middle = np.searchsorted(theta, np.pi)
shift_c = int(middle - (center - shift_min))
return shift_c
### Extract Values ###
def get_contrast(data, fargo_par):
""" for polar data, returns contrast between peak and opposite point """
######## Get Parameters #########
rad = fargo_par["rad"]
theta = fargo_par["theta"]
########### Method ##############
# Get indices
arg_r, arg_phi = get_peak(data, fargo_par)
# Get index of opposite
phi = theta[arg_phi]
opposite_phi = (phi + np.pi) % (2 * np.pi)
arg_opposite = np.searchsorted(theta, opposite_phi)
# Get target values
data_peak = data[arg_r, arg_phi]
data_opposite = data[arg_r, arg_opposite]
contrast = data_peak / data_opposite
return contrast, data_peak, data_opposite
def get_extent(data, fargo_par, normalize = False, threshold = 0.5, sliver_width = 0.5, start = outer_start, end = outer_end):
""" Get azimuthal extent at peak across a given threshold """
######## Get Parameters #########
rad = fargo_par["rad"]
theta = fargo_par["theta"]
scale_height = fargo_par["AspectRatio"]
########### Method ##############
# Search outer disk only
outer_disk_start = np.searchsorted(rad, start) # look for max density beyond r = 1.1
outer_disk_end = np.searchsorted(rad, end) # look for max density before r = 2.3
data_segment = data[outer_disk_start : outer_disk_end]
# Get peak in azimuthal profile
avg_data = np.average(data_segment, axis = 1) # avg over theta
segment_arg_peak = np.argmax(avg_data)
arg_peak = np.searchsorted(rad, rad[outer_disk_start + segment_arg_peak])
peak_rad = rad[arg_peak]
# Zoom in on peak --- Average over half a scale height
half_width = (0.5 * sliver_width) * scale_height
zoom_start = np.searchsorted(rad, peak_rad - half_width)
zoom_end = np.searchsorted(rad, peak_rad + half_width)
data_sliver = data[zoom_start : zoom_end]
length = len(data_sliver); std = length / 3.0
weights = gaussian(length, std)
azimuthal_profile = np.average(data_sliver, weights = weights, axis = 0) # avg over rad to get azimuthal profile
if normalize:
azimuthal_profile /= np.max(azimuthal_profile)
# Move minimum to theta = zero
arg_min = np.argmin(azimuthal_profile)
shift_min = int(0 - arg_min)
azimuthal_profile = np.roll(azimuthal_profile, shift_min)
# Find extents with the threshold
left_theta_i = my_searchsorted(azimuthal_profile, threshold)
right_theta_i = len(theta) - (my_searchsorted(azimuthal_profile[::-1], threshold)) - 1
left_theta = theta[left_theta_i]
right_theta = theta[right_theta_i]
extent = right_theta - left_theta
return extent
def get_radial_extent(data, fargo_par, normalize = False, threshold = 0.5, sliver_width = 10.0, start = 0.6, end = 3.0):
""" Get radial extent at peak across a given threshold """
######## Get Parameters #########
rad = fargo_par["rad"]
theta = fargo_par["theta"]
scale_height = fargo_par["AspectRatio"]
########### Method ##############
# Search outer disk only
outer_disk_start = np.searchsorted(rad, start) # look for max density beyond r = 1.1
outer_disk_end = np.searchsorted(rad, end) # look for max density before r = 2.3
rad_segment = rad[outer_disk_start : outer_disk_end]
data_segment = data[outer_disk_start : outer_disk_end]
# Get peak in azimuthal profile
avg_data = np.average(data_segment, axis = 0) # avg over rad
arg_peak = np.argmax(avg_data)
peak_theta = theta[arg_peak]
# Move minimum to theta = zero (first get peak in azimuthally-averaged profile)
avg_data = np.average(data_segment, axis = 1) # avg over theta
segment_arg_peak = np.argmax(avg_data)
arg_peak = np.searchsorted(rad, rad[outer_disk_start + segment_arg_peak])
peak_rad = rad[arg_peak]
arg_min = np.argmin(data_segment[peak_rad])
shift_min = int(0 - arg_min)
data_segment = np.roll(data_segment, shift_min, axis = -1)
# Zoom in on center --- Average over sliver width
half_width = (0.5 * sliver_width) * (np.pi / 180.0)
zoom_start = np.searchsorted(theta, peak_theta - half_width)
zoom_end = np.searchsorted(theta, peak_theta + half_width)
data_sliver = data_segment[:, zoom_start : zoom_end]
length = len(data_sliver[0]); std = length / 3.0
weights = gaussian(length, std)
radial_profile = np.average(data_sliver, weights = weights, axis = 1) # avg over rad to get azimuthal profile
if normalize:
radial_profile /= np.max(radial_profile)
# Find extents with the threshold
left_rad_i = my_searchsorted(radial_profile, threshold)
right_rad_i = len(theta) - (my_searchsorted(radial_profile[::-1], threshold)) - 1
left_rad = rad_segment[left_rad_i]
right_rad = rad_segment[right_rad_i]
extent = right_rad - left_rad
return extent
### Data ###
def get_profiles(density, fargo_par, args, normalize = False, shift = None, start = outer_start, end = outer_end):
""" Gather azimuthal radii and profiles (doesn't have to be density) """
######## Get Parameters #########
rad = fargo_par["rad"]
theta = fargo_par["theta"]
scale_height = fargo_par["AspectRatio"]
surface_density_zero = fargo_par["Sigma0"]
########### Method ##############
if normalize:
density /= surface_density_zero
if shift is not None:
density = np.roll(density, shift)
# Find Peak in Radial Profile (in Outer Disk)
averagedDensity = np.average(density, axis = 1)
peak_rad, peak_density = get_radial_peak(averagedDensity, fargo_par, start = start, end = end)
# Gather Azimuthal Profiles
num_profiles = args.num_profiles
spread = (args.num_scale_heights / 2.0) * scale_height
azimuthal_radii = np.linspace(peak_rad - spread, peak_rad + spread, num_profiles)
azimuthal_indices = [np.searchsorted(rad, this_radius) for this_radius in azimuthal_radii]
azimuthal_profiles = [density[azimuthal_index, :] for azimuthal_index in azimuthal_indices]
return azimuthal_radii, azimuthal_profiles
###############################################################################
### Plotting ###
def get_max_y(size, taper_time):
""" return size name corresponding to size number """
max_ys = {}
if taper_time < 10.1:
max_ys[1.0] = 1500; max_ys[0.3] = 400; max_ys[0.1] = 100
max_ys[0.03] = 40; max_ys[0.01] = 20; max_ys[0.0001] = 4.0
return max_ys[size]
else:
max_ys[1.0] = 600; max_ys[0.3] = 125; max_ys[0.1] = 30
max_ys[0.03] = 20; max_ys[0.01] = 10; max_ys[0.0001] = 2.5
return max_ys[size]
###############################################################################
### Analytic ###
def get_analytic_profile(angle, r, dr, dtheta, aspect_ratio, S, max_density = 1, scale_height = 0.06):
""" Calculates analytic azimuthal dust density profile for a given aspect ratio and S = St / \delta """
def semiminor_axis(angle, dr, dtheta):
""" maps angle in vortex to semiminor axis input to dust distribution (Eq. 65 from Lyra + Lin 13) """
# Note: The azimuthal edge of the vortex should map to the radial half-width (in units of the scale height)
return (dr / scale_height) * (angle / dtheta)
#def semiminor_axis2(angle, dr, dtheta):
# """ old method: incorrect probably??? """
# r_over_dr = r / dr
# return r_over_dr * (angle * (np.pi / 180.0)) * 0.06 * 2
def scale_function_sq(aspect_ratio):
xi = 1 + aspect_ratio**(-2); vorticity = 1.5 / (aspect_ratio - 1)
first_term = 2.0 * vorticity * aspect_ratio
second_term = xi**(-1) * (2 * (vorticity)**2 + 3)
return first_term - second_term
x = semiminor_axis(angle, dr, dtheta)
f_sq = scale_function_sq(aspect_ratio)
coeff = max_density * (S + 1)**(1.5)
exp = np.exp(-(S + 1) * x**2 * f_sq / 2.0)
return coeff * exp
| Sportsfan77777/vortex | code_synthetic_images/archive/azimuthal.py | azimuthal.py | py | 13,155 | python | en | code | 1 | github-code | 13 |
4087258661 | import numpy as np
import matplotlib.pyplot as plt
import urllib.request
# ごくシンプルな畳み込み層を定義しています。
class Conv:
def __init__(self, W, filters, kernel_size):
self.filters = filters
self.kernel_size = kernel_size
self.W = W # np.random.rand(filters, kernel_size[0], kernel_size[1])
def f_prop(self, X):
k_h, k_w = self.kernel_size
out = np.zeros((filters, X.shape[0]-k_h+1, X.shape[1]-k_w+1))
for k in range(self.filters):
for i in range(out[0].shape[0]):
for j in range(out[0].shape[1]):
x = X[i:i+k_h, j:j+k_w]
out[k,i,j] = np.dot(self.W[k].flatten(), x.flatten())
return out
# ごくシンプルなプーリング層を定義しています。
# 1チャンネルの特徴マップのプーリングのみを想定しています。
class Pool:
def __init__(self, pool_size, strides):
self.pool_size = pool_size
self.strides = strides
def f_prop(self, X):
k_h, k_w = self.pool_size
s_h, s_w = self.strides
out = np.zeros(((X.shape[0]-k_h)//s_h+1, (X.shape[1]-k_w)//s_w+1))
for i in range(out.shape[0]):
for j in range(out.shape[1]):
out[i,j] = np.max(X[i*s_h:i*s_h+k_h, j*s_w:j*s_w+k_w])
return out
local_filename, headers = urllib.request.urlretrieve('https://aidemyexcontentsdata.blob.core.windows.net/data/5100_cnn/circle.npy')
X = np.load(local_filename)
local_filename_w, headers = urllib.request.urlretrieve('https://aidemyexcontentsdata.blob.core.windows.net/data/5100_cnn/weight.npy')
W = np.load(local_filename_w)
# 畳み込み
filters = 4
kernel_size = (3,3)
conv = Conv(W=W, filters=filters, kernel_size=kernel_size)
C = conv.f_prop(X)
# プーリング1
pool_size = (2,2)
strides = (1,1)
pool1 = Pool(pool_size, strides)
P1 = [pool1.f_prop(C[i]) for i in range(len(C))]
# プーリング2
pool_size = (3,3)
strides = (2,2)
pool2 = Pool(pool_size, strides)
P2 = [pool2.f_prop(C[i]) for i in range(len(C))]
# --------------------------------------------------------------
# 以下はすべて可視化のためのコードです。
# --------------------------------------------------------------
plt.imshow(X)
plt.title('元画像', fontsize=12)
plt.show()
plt.figure(figsize=(10,1))
for i in range(filters):
plt.subplot(1,filters,i+1)
ax = plt.gca() # get current axis
ax.tick_params(labelbottom="off", labelleft="off", bottom="off", left="off") # 軸の削除
plt.imshow(C[i])
plt.suptitle('畳み込み結果', fontsize=12)
plt.show()
plt.figure(figsize=(10,1))
for i in range(filters):
plt.subplot(1,filters,i+1)
ax = plt.gca() # get current axis
ax.tick_params(labelbottom="off", labelleft="off", bottom="off", left="off") # 軸の削除
plt.imshow(P1[i])
plt.suptitle('プーリング結果', fontsize=12)
plt.show()
plt.figure(figsize=(10,1))
for i in range(filters):
plt.subplot(1,filters,i+1)
ax = plt.gca() # get current axis
ax.tick_params(labelbottom="off", labelleft="off", bottom="off", left="off") # 軸の削除
plt.imshow(P2[i])
plt.suptitle('プーリング結果', fontsize=12)
plt.show() | yasuno0327/LearnCNN | aidemy/cnn/task8.py | task8.py | py | 3,246 | python | en | code | 1 | github-code | 13 |
37862226263 | import numpy as np
def idlMod(a, b):
"""
Emulate 'modulo' behavior of IDL.
Parameters
----------
a : float or array
Numerator
b : float
Denominator
Returns
-------
IDL modulo : float or array
The result of IDL modulo operation.
"""
if isinstance(a, np.ndarray):
s = np.sign(a)
m = np.mod(a, b)
m[(s < 0)] -= b
else:
m = a % b
if a < 0: m -= b
return m | sczesla/PyAstronomy | src/pyasl/asl/idlMod.py | idlMod.py | py | 448 | python | en | code | 134 | github-code | 13 |
26108341204 | import heapq
import sys
V, E = map(int, input().split())
start = int(input())
g = [[] for _ in range(V+1)]
for _ in range(E):
u, v, w = map(int, input().split())
g[u].append([v, w])
D = [sys.maxsize] * (V+1)
def dijkstra(start):
q = []
heapq.heappush(q, (0, start))
D[start] = 0
while q:
dist, now = heapq.heappop(q)
if D[now] < dist: # start에서 now까지의 최소비용거리 D[now]가 now의 거리가 작을경우
continue
for v, w in g[now]:
cost = D[now] + w
if cost < D[v]:
D[v] = cost
heapq.heappush(q, (cost, v))
dijkstra(start)
for i in range(1, V+1):
if D[i] == sys.maxsize:
print("INF")
else:
print(D[i])
| necteo/CoTeStudy | boj/1753_최단경로.py | 1753_최단경로.py | py | 765 | python | en | code | 0 | github-code | 13 |
22236156146 | import unittest
from unittest import mock
import uuid
import tempfile
import os
from pathlib import Path
import numpy as np
from iblutil.io.parquet import uuid2np, np2uuid, np2str, str2np
from iblutil.io import params
import iblutil.io.jsonable as jsonable
from iblutil.numerical import intersect2d, ismember2d, ismember
class TestParquet(unittest.TestCase):
def test_uuids_conversions(self):
str_uuid = 'a3df91c8-52a6-4afa-957b-3479a7d0897c'
one_np_uuid = np.array([-411333541468446813, 8973933150224022421])
two_np_uuid = np.tile(one_np_uuid, [2, 1])
# array gives a list
self.assertTrue(all(map(lambda x: x == str_uuid, np2str(two_np_uuid))))
# single uuid gives a string
self.assertTrue(np2str(one_np_uuid) == str_uuid)
# list uuids with some None entries
uuid_list = ['bc74f49f33ec0f7545ebc03f0490bdf6', 'c5779e6d02ae6d1d6772df40a1a94243',
None, '643371c81724378d34e04a60ef8769f4']
assert np.all(str2np(uuid_list)[2, :] == 0)
def test_uuids_intersections(self):
ntotal = 500
nsub = 17
nadd = 3
eids = uuid2np([uuid.uuid4() for _ in range(ntotal)])
np.random.seed(42)
isel = np.floor(np.argsort(np.random.random(nsub)) / nsub * ntotal).astype(np.int16)
sids = np.r_[eids[isel, :], uuid2np([uuid.uuid4() for _ in range(nadd)])]
np.random.shuffle(sids)
# check the intersection
v, i0, i1 = intersect2d(eids, sids)
assert np.all(eids[i0, :] == sids[i1, :])
assert np.all(np.sort(isel) == np.sort(i0))
v_, i0_, i1_ = np.intersect1d(eids[:, 0], sids[:, 0], return_indices=True)
assert np.setxor1d(v_, v[:, 0]).size == 0
assert np.setxor1d(i0, i0_).size == 0
assert np.setxor1d(i1, i1_).size == 0
for a, b in zip(ismember2d(sids, eids), ismember(sids[:, 0], eids[:, 0])):
assert np.all(a == b)
# check conversion to numpy back and forth
uuids = [uuid.uuid4() for _ in np.arange(4)]
np_uuids = uuid2np(uuids)
assert np2uuid(np_uuids) == uuids
class TestParams(unittest.TestCase):
@mock.patch('sys.platform', 'linux')
def test_set_hidden(self):
with tempfile.TemporaryDirectory() as td:
file = Path(td).joinpath('file')
file.touch()
hidden_file = params.set_hidden(file, True)
self.assertFalse(file.exists())
self.assertTrue(hidden_file.exists())
self.assertEqual(hidden_file.name, '.file')
params.set_hidden(hidden_file, False)
self.assertFalse(hidden_file.exists())
self.assertTrue(file.exists())
class TestsJsonable(unittest.TestCase):
def setUp(self) -> None:
self.tfile = tempfile.NamedTemporaryFile(delete=False)
def testReadWrite(self):
data = [{'a': 'thisisa', 'b': 1, 'c': [1, 2, 3]},
{'a': 'thisisb', 'b': 2, 'c': [2, 3, 4]}]
jsonable.write(self.tfile.name, data)
data2 = jsonable.read(self.tfile.name)
self.assertEqual(data, data2)
jsonable.append(self.tfile.name, data)
data3 = jsonable.read(self.tfile.name)
self.assertEqual(data + data, data3)
def tearDown(self) -> None:
self.tfile.close()
os.unlink(self.tfile.name)
if __name__ == "__main__":
unittest.main(exit=False, verbosity=2)
| int-brain-lab/iblutil | tests/test_io.py | test_io.py | py | 3,441 | python | en | code | 0 | github-code | 13 |
24344322593 | #!/usr/bin/python
from max6675 import MAX6675, MAX6675Error
import time
import socket#for sockets
import sys#for exit
import struct
# Make sure to use the pi's GPIO numbers of pins rather than the generic pin numbers 1-40 as they do not match.
cs_pin = 24 #(CS)
clock_pin = 23 #(SCLK/SCK)
data_pin = 22 #(SO/MOSI)
units = "c" # Leave as Celsius as Roastmaster can convert if desired.
thermocouple = MAX6675(cs_pin, clock_pin, data_pin, units)
def udp_socket():
# This is the IP and port we will be multicasting to via UDP protocol.
# This way we do not have to worry about handshakes or the IP of the iOS device.
localIP = "224.0.0.1"
localPort = 5050
multicastGroup = (localIP, localPort)
# Create a datagram socket
UDPServerSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
# UDPServerSocket.settimeout(0.3)
ttl = struct.pack('b', 1)
UDPServerSocket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
print("UDP server up")
running = True
while(running):
try:
try:
# This is the actual temperature from the thermocouple.
temp = thermocouple.get()
# print("tc: {}".format(temp))
payload = '{"RPChannel":1,"RPEventType":3,"RPValue":%s,"RPMetaType":3000}' % (temp)
datagram = '{"RPVersion":"RDP_1.0","RPSerial":"kaldidrum","RPEpoch":%s,"RPPayload":[%s]}' % (time.time(), payload)
bytesToSend = str.encode(datagram)
try:
# Sending data to Roastmaster
UDPServerSocket.sendto(bytesToSend, multicastGroup)
# Display this via cli for feedback/fallback.
print(bytesToSend)
except socket:
print >>sys.stderr, 'could not send'
except MAX6675Error as e:
temp = "Thermocouple Error: "+ e.value
running = False
print("tc: {}".format(temp))
# How long in seconds to wait before sending again. Roastmaster recommends max of 1 (5 seconds is considered a broken connection).
time.sleep(0.3)
except KeyboardInterrupt:
running = False
# Cleanup when interrupted.
UDPServerSocket.close()
# Run main function.
udp_socket()
# Cleanup when done.
thermocouple.cleanup()
| sheparddw/pi-coffee-roaster-probe | sendTempToRDP.py | sendTempToRDP.py | py | 2,438 | python | en | code | 1 | github-code | 13 |
30000443028 | import json
# breadJson
breadJson = [
{
"breadType": "cream",
"recipe": {
"flour": 100,
"water": 100,
"cream": 200
}
},
{
"breadType": "sugar",
"recipe": {
"flour": 100,
"water": 50,
"sugar": 200
}
},
{
"breadType": "butter",
"recipe": {
"flour": 100,
"water": 100,
"butter": 50
}
}
]
# 추상 클래스, 메소스 생성
class bread():
CREAM = 0
SUGAR = 1
BUTTER = 2
def print_bread(self):
pass
# creambread 클래스
class creambread(bread):
def print_bread(self, data):
data = json.loads(data)
for key, value in data.items():
if key == "breadType":
print(key, ":", value)
else:
for keys, values in data['recipe'].items():
print(keys, ":", values)
break
# sugarbread 클래스
class sugarbread(bread):
def print_bread(self, data):
data = json.loads(data)
for key, value in data.items():
if key == "breadType":
print(key, ":", value)
else:
for keys, values in data['recipe'].items():
print(keys, ":", values)
break
# butterbread 클래스
class butterbread(bread):
def print_bread(self, data):
data = json.loads(data)
for key, value in data.items():
if key == "breadType":
print(key, ":", value)
else:
for keys, values in data['recipe'].items():
print(keys, ":", values)
break
# 팩토리 생성
class breadFactory():
def getbread(self, breadType, data):
if breadType == bread.CREAM:
return creambread().print_bread(data)
elif breadType == bread.SUGAR:
return sugarbread().print_bread(data)
elif breadType == bread.BUTTER:
return butterbread().print_bread(data)
if __name__ == '__main__':
breadJsondata = breadFactory() # 팩토리 객체 생성
for typebread in breadJson: # Json을 순회하면서 Type확인 후 맞는 클래스 호출
if typebread["breadType"] == "cream":
data = json.dumps(typebread)
breadJsondata.getbread(bread.CREAM, data)
elif typebread["breadType"] == "sugar":
data = json.dumps(typebread)
breadJsondata.getbread(bread.SUGAR, data)
else:
data = json.dumps(typebread)
breadJsondata.getbread(bread.BUTTER, data)
| jhs3104/Test_vcanus | 1/bread.py | bread.py | py | 2,687 | python | en | code | 0 | github-code | 13 |
15147955039 | #coding=utf-8
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from dataset.bd_xjtu_dataset import collate_fn, dataset
import torch
import torch.utils.data as torchdata
from torchvision import datasets, models, transforms
from torchvision.models import resnet50
import torch.optim as optim
from torch.optim import lr_scheduler
from utils.train_util import train, trainlog
from torch.nn import CrossEntropyLoss
import logging
from dataset.data_aug import *
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
'''
http://dianshi.baidu.com/gemstone/competitions/detail?raceId=17
'''
save_dir = '/media/hszc/model/detao/baidu_model/resnet50'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
logfile = '%s/trainlog.log'%save_dir
trainlog(logfile)
rawdata_root = '/media/hszc/data/detao/data/baidu/datasets/train'
all_pd = pd.read_csv("/media/hszc/data/detao/data/baidu/datasets/train.txt",sep=" ",
header=None, names=['ImageName', 'label'])
train_pd, val_pd = train_test_split(all_pd, test_size=0.15, random_state=43,
stratify=all_pd['label'])
print(val_pd.shape)
'''数据扩增'''
data_transforms = {
'train': Compose([
RandomRotate(angles=(-15,15)),
ExpandBorder(size=(368,368),resize=True),
RandomResizedCrop(size=(336, 336)),
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]),
'val': Compose([
ExpandBorder(size=(336,336),resize=True),
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_set = {}
data_set['train'] = dataset(imgroot=rawdata_root,anno_pd=train_pd,
transforms=data_transforms["train"],
)
data_set['val'] = dataset(imgroot=rawdata_root,anno_pd=val_pd,
transforms=data_transforms["val"],
)
dataloader = {}
dataloader['train']=torch.utils.data.DataLoader(data_set['train'], batch_size=4,
shuffle=True, num_workers=4,collate_fn=collate_fn)
dataloader['val']=torch.utils.data.DataLoader(data_set['val'], batch_size=4,
shuffle=True, num_workers=4,collate_fn=collate_fn)
'''model'''
model =resnet50(pretrained=True)
model.avgpool = torch.nn.AdaptiveAvgPool2d(output_size=1)
model.fc = torch.nn.Linear(model.fc.in_features,100)
base_lr =0.001
resume =None
if resume:
logging.info('resuming finetune from %s'%resume)
model.load_state_dict(torch.load(resume))
model = model.cuda()
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=1e-5)
criterion = CrossEntropyLoss()
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=6, gamma=0.1)
train(model,
epoch_num=50,
start_epoch=0,
optimizer=optimizer,
criterion=criterion,
exp_lr_scheduler=exp_lr_scheduler,
data_set=data_set,
data_loader=dataloader,
save_dir=save_dir,
print_inter=50,
val_inter=400) | OdingdongO/pytorch_classification | 2018_bd_xjtu_train.py | 2018_bd_xjtu_train.py | py | 3,045 | python | en | code | 251 | github-code | 13 |
70520255697 |
import os
import torch
import torch.nn as nn
from einops import rearrange
# import imageio.v3 as iio
import numpy as np
import copy
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
import math
import torch.nn.functional as F
import warnings
from IJEPA.video_dataset import VideoFrameDataset, ImglistToTensor
from argparse import ArgumentParser, Namespace
from IJEPA.decoders import Decoder, ATMHead
from IJEPA.models import IJEPA_base, EarlyStop
from IJEPA.atm_loss import ATMLoss
from IJEPA.eval import compute_jaccard
def parse_args() -> Namespace:
parser = ArgumentParser("Decoder")
parser.add_argument("--train-dir", help="Name of dir with training data", required=True, type=str)
parser.add_argument("--val-dir", help="Name of dir with validation data", required=True, type=str)
parser.add_argument("--output-dir", required=True, type=str, help="Name of dir to save the checkpoints to")
parser.add_argument("--run-id", help="Name of the run", required=True, type=str)
parser.add_argument("--resume", default=False, type=bool, help="In case training was not completed resume from last epoch")
return parser.parse_args()
def load_data(root, annotation_file, batch_size=2):
preprocess = transforms.Compose([
ImglistToTensor(), # list of PIL images to (FRAMES x CHANNELS x HEIGHT x WIDTH) tensor
# transforms.Resize(299), # image batch, resize smaller edge to 299
transforms.Resize((128,128)),
# transforms.CenterCrop(299), # image batch, center crop to square 299x299
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset = VideoFrameDataset(
root_path=root,
annotationfile_path=annotation_file,
num_segments=1,
frames_per_segment=22,
imagefile_template='image_{:d}.png',
transform=preprocess,
mask=True,
test_mode=False
)
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4, # arbitrarily chosen
pin_memory=True
)
return dataloader
# Train the model
def train_model(epoch, decoder, encoder, criterion, optimizer, scheduler, dataloader, validationloader, num_epochs, output_dir, device, early_stop):
while epoch < num_epochs:
decoder.train()
# do we need to do encoder.eval() or something? Since we are not training it, we want to deactivate the dropouts
train_loss = 0
for i, data in enumerate(dataloader, 0):
inputs, labels, target_masks = data
inputs, labels, target_masks = inputs.to(device), labels.to(device), target_masks.to(device)
inputs = inputs[:, :11]
optimizer.zero_grad()
### forward pass through encoder to get the embeddings
predicted_embeddings = encoder(inputs.transpose(1, 2))
# Reshape predicted embeddings to (b t) (h w) m
predicted_embeddings = rearrange(predicted_embeddings, 'b t n m -> (b t) n m')
target_masks = rearrange(target_masks, 'b t n m -> (b t) n m')
### forward pass through decoder to get the masks
outputs = decoder(predicted_embeddings)
# the target_mask tensor is of shape b f h w
### compute the loss and step
loss = criterion(outputs, target_masks, -1)
train_loss += loss.item()
loss.backward()
optimizer.step()
# Update the scheduler learning rate
scheduler.step()
if i % 50 == 0 and epoch < 5:
print(f"Current loss: {loss.item()}")
avg_epoch_loss = train_loss / len(dataloader)
# # Validation loss
# decoder.eval()
# val_loss = 0
# jaccard_scores = []
# with torch.no_grad():
# for data in validationloader:
# inputs, labels, target_masks = data
# inputs, labels, target_masks = inputs.to(device), labels.to(device), target_masks.to(device)
# inputs = inputs[:, :11]
# ### compute predictions
# predicted_embeddings = encoder(inputs.transpose(1, 2))
# # Reshape predicted embeddings to (b t) (h w) m
# predicted_embeddings = rearrange(predicted_embeddings, 'b t n m -> (b t) n m')
# target_masks = rearrange(target_masks, 'b t n m -> (b t) n m')
# ### forward pass through decoder to get the masks
# outputs = decoder(predicted_embeddings)
# # compute loss
# val_loss += criterion(outputs, target_masks, -1)
# ## want to go from batch * frames x height x width x num_classes with logits to batch * frames x height x width with class predictions
# predicted = torch.argmax(outputs['pred_masks'], 1)
# jaccard_scores.append(compute_jaccard(predicted, target_masks, device))
# # per-pixel accuracy on validation set
# # is this a good metric? Probably not
# avg_val_loss = val_loss / len(validationloader)
# average_jaccard = sum(jaccard_scores) / len(jaccard_scores)
current_lr = optimizer.param_groups[0]['lr']
# print(f"Epoch: {epoch + 1}, Learning Rate: {current_lr:.6f}, Avg train loss: {avg_epoch_loss:.4f}, Avg val loss: {avg_val_loss:.4f}, Avg Jaccard: {average_jaccard:.4f}")
print(f"Epoch: {epoch + 1}, Learning Rate: {current_lr:.6f}, Avg train loss: {avg_epoch_loss:.4f}")
# # Early Stopping
# if average_jaccard > early_stop.best_value:
# torch.save(decoder.module.state_dict() if torch.cuda.device_count() > 1 else decoder.state_dict(), os.path.join(output_dir, 'models/decoder/best',"best_model.pkl"))
# early_stop.step(average_jaccard, epoch)
# if early_stop.stop_training(epoch):
# print(
# "early stopping at epoch {} since valdiation loss didn't improve from epoch no {}. Best value {}, current value {}".format(
# epoch, early_stop.best_epoch, early_stop.best_value, average_jaccard
# ))
# break
# Used this approach (while and epoch increase) so that we can get back to training the loaded model from checkpoint
epoch += 1
# Checkpoint
torch.save({
'epoch': epoch,
'model_state_dict': decoder.module.state_dict() if torch.cuda.device_count() > 1 else decoder.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'early_stop': early_stop,
}, os.path.join(output_dir, 'models/decoder', "checkpoint_decoder.pkl"))
return {
"epochs": epoch,
"train_loss": train_loss,
"model": decoder
}
if __name__ == "__main__":
torch.cuda.empty_cache()
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
args = parse_args()
batch_size = 2
# Make run dir
if not os.path.exists(os.path.join(args.output_dir,args.run_id)):
os.makedirs(os.path.join(args.output_dir,args.run_id), exist_ok=True)
save_dir = os.path.join(args.output_dir,args.run_id)
os.makedirs(os.path.join(save_dir, "models/decoder"), exist_ok=True)
os.makedirs(os.path.join(save_dir, "models/decoder/best"), exist_ok=True)
# Load train data and validation data
train_data_dir = os.path.join(args.train_dir, 'data')
train_annotation_dir = os.path.join(args.train_dir, 'annotations.txt')
val_data_dir = os.path.join(args.val_dir, 'data')
val_annotation_dir = os.path.join(args.val_dir, 'annotations.txt')
print('Loading train data...')
dataloader = load_data(train_data_dir, train_annotation_dir, batch_size)
print('Loading val data...')
validationloader = load_data(val_data_dir, val_annotation_dir, batch_size)
num_epochs = 100
total_steps = num_epochs * len(dataloader)
# should these also come from global config?
div_factor = 5 # max_lr/div_factor = initial lr
final_div_factor = 10 # final lr is initial_lr/final_div_factor
patience = 10
# Used this approach so that we can getv back to training the loaded model from checkpoint
epoch = 0
# get these params from global config? to ensure that it always matches the trained IJEPA model
# load encoder
encoder = IJEPA_base(img_size=128, patch_size=8, in_chans=3, norm_layer=nn.LayerNorm, num_frames=22, attention_type='divided_space_time', dropout=0.1, mode="test", M=4, embed_dim=384,
# encoder parameters
enc_depth=10,
enc_num_heads=6,
enc_mlp_ratio=4.,
enc_qkv_bias=False,
enc_qk_scale=None,
enc_drop_rate=0.,
enc_attn_drop_rate=0.,
enc_drop_path_rate=0.1,
# predictor parameters
pred_depth=10,
pred_num_heads=6,
pred_mlp_ratio=4.,
pred_qkv_bias=False,
pred_qk_scale=None,
pred_drop_rate=0.1,
pred_attn_drop_rate=0.1,
pred_drop_path_rate=0.1,
# positional and spacial embedding parameters
pos_drop_rate=0.1,
time_drop_rate=0.1)
# for k,v in encoder.state_dict().items():
# print(k)
# load decoder
# decoder = Decoder(input_dim=768, hidden_dim=3072, num_hidden_layers=2)
decoder = ATMHead(img_size=128, H=160, W=240, in_channels=384, use_stages=1)
decoder.to(device)
criterion = ATMLoss(48, 1)
criterion.to(device)
# criterion = nn.CrossEntropyLoss() # since we will have label predictions?
# Just using same optimizer and scheduler as IJEPA, will need to change later
# probably higher lr than IJEPA
optimizer = torch.optim.AdamW(decoder.parameters(), lr=0.001, weight_decay=0.05)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_steps, eta_min=0.000001)
### load pretrained IJEPA model -> should this just load from the latest IJEPA checkpoint?
path_best = os.path.join(save_dir, "models/best")
if os.path.exists(path_best):
checkpoint = torch.load(os.path.join(path_best, "best_model.pkl"), map_location=device)
encoder_state_dict = checkpoint # checkpoint['model_state_dict']
# encoder_state_dict['mode'] = 'test'
encoder.load_state_dict(encoder_state_dict)
encoder.to(device)
# path_partial = os.path.join(save_dir, "models/partial")
# if os.path.exists(path_partial):
# checkpoint = torch.load(os.path.join(path_partial, "checkpoint.pkl"), map_location=device)
# encoder_state_dict = checkpoint['model_state_dict']
# # encoder_state_dict['mode'] = 'test'
# encoder.load_state_dict(encoder_state_dict)
# encoder.to(device)
early_stop = EarlyStop(patience)
if args.resume:
print("Attempting to find existing checkpoint")
path_partials = os.path.join(save_dir, "models/decoder")
if os.path.exists(path_partials):
checkpoint = torch.load(os.path.join(path_partials, "checkpoint_decoder.pkl"), map_location=device)
decoder.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
early_stop = checkpoint['early_stop']
epoch = checkpoint['epoch']
# if torch.cuda.device_count() > 1:
# print("Model training will be distributed to {} GPUs.".format(torch.cuda.device_count()))
# decoder = nn.DataParallel(decoder)
# decoder.to(device)
results = train_model(epoch, decoder, encoder, criterion, optimizer, scheduler, dataloader, validationloader, num_epochs, save_dir, device, early_stop)
# run full evaluation at this point?
print(f'Decoder training finshed at epoch {results["epochs"]}, trainig loss: {results["train_loss"]}')
| gbugli/DL_project_2023 | IJEPA/train_decoder.py | train_decoder.py | py | 12,512 | python | en | code | 0 | github-code | 13 |
2464952133 | import threading
import time
import random
class queue(object):
lock = threading.RLock()
def __init__(self):
self.item = -1
def add(self, n):
self.lock.acquire()
self.item = n
self.lock.release()
def remove(self):
self.lock.acquire()
saida = self.item
self.item = -1
self.lock.release()
return saida
def producer(queue, inputs, index):
while inputs > 0:
item = random.randint(0,256)
print("Producer notify: item N{0} adicionado por {1}".format(item, index))
queue.add(item)
time.sleep(1)
inputs -= 1
def consumer(queue, index):
global inputs
while inputs > 0:
item = queue.remove()
if item>=0:
print ("Consumer notify: {0} retirado por {1}".format(item, index))
inputs -= 1
time.sleep(0.5)
if __name__=="__main__":
inputs = 10
queue = queue()
t1 = threading.Thread(target=producer, args = (queue,inputs, 1))
t2 = threading.Thread(target=consumer, args=(queue, 2))
t3 = threading.Thread(target=consumer, args=(queue, 3))
t4 = threading.Thread(target=consumer, args=(queue, 4))
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t2.join()
t3.join()
t4.join() | fabiomoreirafms/CES-22-Exercicios | ExercicioThread.py | ExercicioThread.py | py | 1,314 | python | en | code | 0 | github-code | 13 |
17092913894 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.PartnerVO import PartnerVO
class KoubeiRetailWmsPartnerQueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiRetailWmsPartnerQueryResponse, self).__init__()
self._partners = None
self._total_count = None
@property
def partners(self):
return self._partners
@partners.setter
def partners(self, value):
if isinstance(value, list):
self._partners = list()
for i in value:
if isinstance(i, PartnerVO):
self._partners.append(i)
else:
self._partners.append(PartnerVO.from_alipay_dict(i))
@property
def total_count(self):
return self._total_count
@total_count.setter
def total_count(self, value):
self._total_count = value
def parse_response_content(self, response_content):
response = super(KoubeiRetailWmsPartnerQueryResponse, self).parse_response_content(response_content)
if 'partners' in response:
self.partners = response['partners']
if 'total_count' in response:
self.total_count = response['total_count']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/KoubeiRetailWmsPartnerQueryResponse.py | KoubeiRetailWmsPartnerQueryResponse.py | py | 1,313 | python | en | code | 241 | github-code | 13 |
22191539702 | from typing import Any, Iterator
import yaml
from linkml.validator.loaders.loader import Loader
class YamlLoader(Loader):
"""A loader for instances serialized as YAML"""
def __init__(self, source) -> None:
"""Constructor method
:param source: Path to YAML source
"""
super().__init__(source)
def iter_instances(self) -> Iterator[Any]:
"""Lazily yield instances from YAML source.
If the root of the document is an array, yield each element of the array. Otherwise,
yield the root element itself. Repeat for each document in the YAML file.
:return: Iterator over data instances
:rtype: Iterator[Any]
"""
with open(self.source) as source_file:
for document in yaml.safe_load_all(source_file):
if isinstance(document, list):
yield from document
else:
yield document
| linkml/linkml | linkml/validator/loaders/yaml_loader.py | yaml_loader.py | py | 955 | python | en | code | 228 | github-code | 13 |
34884240139 | #Nishit Patel
#0946768
#LAB04
import socket
local_address = '0.0.0.0'
local_port = 80
#TCP/IP socket
npsocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#bind ip address
npsocket.bind((local_address,local_port))
#print("Server listening on IP Address : "+ local_address +"\nPort "+ "80")
print("Server starting up on Address: {0} \nPort: {1}".format(local_address,local_port))
#listen to clients
npsocket.listen(5)
while True:
print("waiting for connection")
connection,client_address = npsocket.accept()
try:
print("Connection from: " + client_address[0])
while True:
client_data = connection.recv(16)
print("Data from client: "+ client_data.decode('UTF-8'))
if client_data:
print("Data is sent back to client.")
connection.sendall(client_data)
else:
print("No data received from client: "+ client_address[0])
except Exception as ex:
pass
finally:
connection.close()
| nishitpatel28/security_application_lab4 | Echo_Server.py | Echo_Server.py | py | 1,095 | python | en | code | 0 | github-code | 13 |
23572265779 | a=input()
b=input()
if len(a)<len(b):
print('LESS')
elif len(a)>len(b):
print('GREATER')
else:
f=0
for i in range(len(a)):
if a[i]>b[i]:
print('GREATER')
break
elif a[i]<b[i]:
print('LESS')
break
else:
f+=1
continue
if f==len(a):
print('EQUAL')
| Kota28/AtCoder | ABC59_B.py | ABC59_B.py | py | 399 | python | en | code | 0 | github-code | 13 |
32664834120 | """Retrieve list of emails of people who hold access to a service."""
import json
import os
import jasmin_account_api_client as jclient
import jasmin_account_api_client.api.services as jservices
import jasmin_account_api_client.api.users as jusers
SERVICE_ID = 92
client = jclient.AuthenticatedClient("https://accounts.jasmin.ac.uk")
client.client_credentials_flow(
client_id=os.environ["JASMIN_CLIENT_ID"],
client_secret=os.environ["JASMIN_CLIENT_SECRET"],
scopes=[
"jasmin.services.serviceroles.all:read",
"jasmin.auth.users.all:read",
],
)
# Get all the roles which are active in a service.
service_roles = jservices.services_roles_list.sync(SERVICE_ID, client=client)
# Get their usernames.
accesses = []
for role in service_roles:
accesses += role.accesses
usernames = [x.user.username for x in accesses]
# Get all the users
all_users = jusers.users_list.sync(client=client)
# Filter by service.
emails = {x.email for x in all_users if (x.username in usernames) and x.email}
print(json.dumps(list(emails), indent=2, sort_keys=True))
| cedadev/jasmin-account-api-client | examples/emails_of_roleholders.py | emails_of_roleholders.py | py | 1,082 | python | en | code | 0 | github-code | 13 |
33451067427 | import requests
class Market:
def __init__(self, question, address):
self.question = question
self.address = address
def market_checker():
api_url = "https://clob.polymarket.com/markets"
markets = []
response = requests.get(api_url, timeout=10, verify=False).json()
for element in [x for x in response if x['active'] is True and x['closed'] is False]:
markets.append(Market(element["question"], element["fpmm"]))
return markets
| udvarid/CoreWarDon | util/market_checker.py | market_checker.py | py | 480 | python | en | code | 0 | github-code | 13 |
44648342961 | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import math
import numpy as np
import datetime
from scipy import stats
def plot_cases_per_country(cummulative_data):
countries = ['Canada', 'US', 'China', 'Taiwan*']
# plot cumulative cases
for country in countries:
plt.plot(cummulative_data['date'], cummulative_data[country])
plt.legend(countries)
plt.title("Cumulative Cases over Time")
plt.xlabel('Date')
plt.ylabel('Cases')
plt.yscale('log')
plt.savefig('./rates_picture/Cumulative-Cases-over-Time.png')
plt.clf()
def plot_cases_perday_country(cummulative_data):
countries = ['Canada per day', 'US per day', 'China per day', 'Taiwan* per day']
# plot cumulative cases
for country in countries:
if country == 'US per day':
continue
plt.plot(cummulative_data['date'], cummulative_data[country])
plt.legend(countries)
plt.title("Cumulative Cases over Time")
plt.xlabel('Date')
plt.ylabel('Cases')
plt.savefig('./rates_picture/Cumulative-Cases-over-Time.png')
plt.clf()
def conv_datetime(list):
lst = []
for date in list:
date_lst = []
date_lst.append(datetime.datetime.strptime(date[0],'%Y-%m-%d'))
date_lst.append(datetime.datetime.strptime(date[1], '%Y-%m-%d'))
lst.append(date_lst)
return lst
def four_bargraphs_range(c_data):
#countries = ['Canada per day', 'US per day', 'China per day', 'Taiwan* per day']
countries = ['Canada', 'US', 'China', 'Taiwan*']
latest_date = ['2020-08-10','2020-08-10']
range_openings = [['2020-04-1','2020-05-25'],['2020-04-3','2020-06-17'],['2020-01-06','2020-04-08']]
range_date = conv_datetime(range_openings)
fig, axs = plt.subplots(nrows=2, ncols=2, constrained_layout=True)
i = 0
# Plot Cases per day
for ax in axs.flat:
try:
c_str = countries[i] + ' per day'
ax.plot(c_data['date'],c_data[c_str])
ax.set_title(countries[i] + " New Cases per Day")
ax.set_ylabel("New Cases")
max = c_data[c_str].max()
ax.vlines(range_date[i][0], 0, max, colors='r')
ax.vlines(range_date[i][1], 0, max, colors='r')
except IndexError:
break
i += 1
plt.savefig('./rates_picture/Cases-per-day-over-Time.png')
plt.clf()
# Find instataneous slope per day
c_data['Canada_slope'] = c_data['Canada per day'].shift(1,fill_value=0)
c_data['Canada_slope'] = (c_data['Canada per day']-c_data['Canada_slope'])/c_data['Canada_slope']
c_data.loc[(c_data.Canada_slope.isnull()) | (c_data.Canada_slope == np.inf), 'Canada_slope']=0
c_data['US_slope'] = c_data['US per day'].shift(1, fill_value=0)
c_data['US_slope'] = (c_data['US per day'] - c_data['US_slope']) / c_data['US_slope']
c_data.loc[(c_data.US_slope.isnull()) | (c_data.US_slope == np.inf), 'US_slope'] = 0
c_data['China_slope'] = c_data['China per day'].shift(1, fill_value=0)
c_data['China_slope'] = (c_data['China per day'] - c_data['China_slope']) / c_data['China_slope']
c_data.loc[(c_data.China_slope.isnull()) | (c_data.China_slope == np.inf), 'China_slope'] = 0
c_data['Taiwan_slope'] = c_data['Taiwan* per day'].shift(1, fill_value=0)
c_data['Taiwan_slope'] = (c_data['Taiwan* per day'] - c_data['Taiwan_slope']) / c_data['Taiwan_slope']
c_data.loc[(c_data.Taiwan_slope.isnull()) | (c_data.Taiwan_slope == np.inf), 'Taiwan_slope'] = 0
c_data.drop(c_data.head(1).index, inplace=True)
#create 4 barplots
fig, axs = plt.subplots(nrows=2, ncols=2, constrained_layout=True)
i = 0
for ax in axs.flat:
try:
c_str = countries[i] + "_slope"
rename_str_c = countries[i]+"_slope_closed"
rename_str_o = countries[i] + "_slope_opened"
country_iso = c_data[(c_data['date'] > range_date[i][0]) & (c_data['date'] < range_date[i][1])][['date',c_str]].rename(columns={c_str:rename_str_c})
country_not_iso = c_data[c_data['date'] < range_date[i][0]][['date', c_str]].rename(columns={c_str: rename_str_o})
c_iso = country_iso[rename_str_c].to_list()
n_iso = country_not_iso[rename_str_o].to_list()
tot_size_c = len(c_iso)
tot_size_o = len(n_iso)
if tot_size_c > tot_size_o:
c_iso = c_iso[:tot_size_o]
else:
n_iso = n_iso[:tot_size_c]
print("p-value ",countries[i],stats.wilcoxon(c_iso,n_iso).pvalue)
country_iso = pd.melt(country_iso)
country_not_iso = pd.melt(country_not_iso)
country_data = pd.concat([country_iso, country_not_iso])
country_data = country_data[country_data['variable'] != 'date']
ax = sns.boxplot(x="value", y="variable", data=country_data,ax=ax)
except IndexError:
break
except ValueError:
break
i += 1
plt.clf()
def main():
sns.set()
cummulative_data = pd.read_csv('cleaned-data.csv',parse_dates=['date'])
plot_cases_per_country(cummulative_data)
plot_cases_perday_country(cummulative_data)
four_bargraphs_range(cummulative_data)
if __name__ == '__main__':
main() | HenryF23/COVID-19-Analysis | Project/analyze_rates.py | analyze_rates.py | py | 5,280 | python | en | code | 0 | github-code | 13 |
11628913075 | #!/usr/bin/env python
import numpy as np
import pytest
from deap import tools
from olympus import Observations, ParameterVector
from olympus.planners import Genetic
# use parametrize to test multiple configurations of the planner
@pytest.mark.parametrize(
"pop_size, cx_prob, mut_prob, mate_args, mutate_args, select_args",
[
(
10,
0.5,
0.3,
{"function": tools.cxTwoPoint},
{
"function": tools.mutGaussian,
"mu": 0,
"sigma": 0.2,
"indpb": 0.2,
},
{"function": tools.selTournament, "tournsize": 3},
),
(
15,
0.2,
0.8,
{"function": tools.cxOnePoint},
{"function": tools.mutShuffleIndexes, "indpb": 0.2},
{"function": tools.selRoulette, "k": 5},
),
(
12,
0.6,
0.1,
{"function": tools.cxUniform},
{"function": tools.mutFlipBit, "indpb": 0.2},
{"function": tools.selRandom, "k": 6},
),
(
16,
0.4,
0.2,
{"function": tools.cxSimulatedBinary, "eta": 20},
{
"function": tools.mutGaussian,
"mu": 0,
"sigma": 0.2,
"indpb": 0.5,
},
{"function": tools.selBest, "k": 4},
),
],
)
def test_planner_ask_tell(
two_param_space,
pop_size,
cx_prob,
mut_prob,
mate_args,
mutate_args,
select_args,
):
planner = Genetic(
pop_size=pop_size,
cx_prob=cx_prob,
mut_prob=mut_prob,
mate_args=mate_args,
mutate_args=mutate_args,
select_args=select_args,
)
planner.set_param_space(param_space=two_param_space)
param = planner.ask()
value = ParameterVector().from_dict({"objective": 0.0})
obs = Observations()
obs.add_observation(param, value)
planner.tell(observations=obs)
def test_generating_new_offsprings(two_param_space):
planner = Genetic(pop_size=4)
planner.set_param_space(param_space=two_param_space)
obs = Observations()
for i in range(10):
param = planner.recommend(observations=obs)
obj = np.sum(param.to_array() ** 2)
value = ParameterVector(dict={"objective": obj})
obs.add_observation(param, value)
def test_resetting_planner(two_param_space):
planner = Genetic(pop_size=3)
planner.set_param_space(param_space=two_param_space)
# run once
obs = Observations()
for i in range(5):
param = planner.recommend(observations=obs)
obj = np.sum(param.to_array() ** 2)
value = ParameterVector(dict={"objective": obj})
obs.add_observation(param, value)
# run again from scratch
planner.reset()
obs = Observations()
for i in range(5):
param = planner.recommend(observations=obs)
obj = np.sum(param.to_array() ** 2)
value = ParameterVector(dict={"objective": obj})
obs.add_observation(param, value)
| aspuru-guzik-group/olympus | tests/test_planners/test_planner_genetic.py | test_planner_genetic.py | py | 3,149 | python | en | code | 70 | github-code | 13 |
5943374798 | # -*- coding: utf-8 -*-
import os
import sqlite3 as sql
from flask import Flask
from flask import request
from flask import send_file, render_template, redirect
from urllib.parse import unquote_plus
app = Flask(__name__)
@app.route('/')
def redirige():
return redirect("/index")
@app.route('/index', methods=['GET', 'POST'])
def index():
# On définit le path pour qu'il mène à la db située dans le même dossier que ce fichier quel que soit l'environnement d'exécution
dir_path = os.path.dirname(os.path.realpath(__file__))
con = sql.connect(dir_path + '/mercimax.db')
# On change le fonctionnement du curseur afin qu'il renvoie les résultats de requêtes SQL sous forme de liste de dictionnaires
# (c'est la liste des éléments renvoyés par la requête, chaque élément sous forme de dictionnaire, la clé est le nom de la colonne)
# ------------------
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
con.row_factory = dict_factory
# -------------------
cur = con.cursor()
# unquote_plus décode un argument d'url (par exemple un apostrophe est écrit %27 dans une url, cette fonction le retransforme en apostrophe)
zone = unquote_plus(request.args.get("zone") or "Versailles")
# On sélectionne ici les informations d'affichage de la zone à sélectionner
cur.execute('select * from zones where nom="' + zone + '"')
zoneInfo = cur.fetchall()[0]
zoneId = zoneInfo['id']
zoneLocation = {"longitude" : zoneInfo["longitude"],"latitude" : zoneInfo["latitude"],"zoom" : zoneInfo["zoom"]}
if request.method == 'POST':
# On va chercher les données du formulaire qui a été envoyé par la requête POST
markerTitle = request.form.get("markerTitle")
markerDescription = request.form.get("markerDescription")
markerLongitude = request.form.get("markerLongitude")
markerLatitude = request.form.get("markerLatitude")
markerIcon = request.form.get("markerIcon")
print(markerTitle,markerDescription,markerLongitude,markerLatitude,markerIcon)
# On insère les données dans la base de donnée par une requête SQL
cur.execute(
'''INSERT INTO "markers" (
"description",
"icon",
"longitude",
"latitude",
"zone"
)
VALUES("<strong>
''' + markerTitle + '</strong><p>' + markerDescription + '</p>", "' + markerIcon + '","' + markerLongitude + '","' + markerLatitude + '", ' + str(zoneId) + ')')
# On modifie le score
cur.execute("select id,coef from initiative where nom = '" + markerIcon + "'")
initiativesModifiées = cur.fetchall()
print(initiativesModifiées)
for initiative in initiativesModifiées:
cur.execute("update zones set score_" + initiative["id"][:3] + " = score_" + initiative["id"][:3] + " + " + str(initiative["coef"]*10) + " where id = " + str(zoneId))
cur.execute("update zones set score_total = score_total + " + str(initiative["coef"]*10) + " where id = " + str(zoneId))
# On confirme la modification de la base de donnée
# /!\ Si on enlève cette étape, les données seront quand même affichées sur la carte pour cet affichage web uniquement
# Les modifications seront annulées lorsqu'on se déconnecte de la base de données seulement (c'est à dire à la fin de cette fonction)
# Enlever la ligne suivante peut donc être un bon moyen de faire des tests d'affichage sans modifier la base de données
con.commit()
#------------------------------------------------------ Selection des markers ---------------------------------------------------------------------------------
cur.execute("select * from markers;")
markers = cur.fetchall()
# On crée une nouvelle variable avec les informations correspondantes à la syntaxe attendue par la fonction mapbox
features = []
# Marker est donc un dictionnaire avec les éléments suivants:
# - "description" : la description du point, en html, qui s'affiche quand on clique dessus
# - "icon" : un string qui correspond à l'icone à utiliser
# - "longitude", "latitude" : les coordonnées du point
for marker in markers:
feature = {}
feature["type"] = "Feature"
feature["properties"] = {}
feature["properties"]["description"] = marker["description"]
feature["properties"]["icon"] = marker["icon"]
feature["geometry"] = {"type": "Point"}
feature["geometry"]["coordinates"] = [marker["longitude"], marker["latitude"]]
features.append(feature)
markersData = {"type": "FeatureCollection", "features": features}
# On lui donne les icones à charger en plus dans mapbox (les images que l'on veut pouvoir utiliser ensuite pour l'affichage)
image_names = os.listdir(dir_path + '/icon_folder/')
image_names = ['/icons/' + image_name for image_name in image_names]
#------------------------------------------------------ Selection du score ---------------------------------------------------------------------------------
# Ici, on va chercher les informations du score de la zone sélectionnée en argument d'url
cur.execute('select * from zones where id = ' + str(zoneId))
zoneData = cur.fetchall()
score_total = zoneData[0]["score_total"]
score_env = zoneData[0]["score_env"]
score_soc = zoneData[0]["score_soc"]
score_eco = zoneData[0]["score_eco"]
# render_template renvoie la page html sélectionnée après l'avoir modifiée par une méthode type jinja2
return render_template('index.html', markersData=markersData, icons=image_names, score_total=score_total, score_env=score_env, score_soc=score_soc, score_eco=score_eco, zone = zone, zoneLocation = zoneLocation)
@app.route('/icons/<icon>')
def icon_display(icon):
# dir_path est le chemin qui mène au dossier parent, quel que soit l'environnement d'exécution
dir_path = os.path.dirname(os.path.realpath(__file__))
if os.path.exists(dir_path + '/icon_folder/' + icon):
return send_file(dir_path + '/icon_folder/' + icon)
@app.route('/js/<script>')
def send_script(script):
# dir_path est le chemin qui mène au dossier parent, quel que soit l'environnement d'exécution
dir_path = os.path.dirname(os.path.realpath(__file__))
return send_file(dir_path + '/static/js/' + script)
@app.route('/css/<file>')
def send_css(file):
# dir_path est le chemin qui mène au dossier parent, quel que soit l'environnement d'exécution
dir_path = os.path.dirname(os.path.realpath(__file__))
return send_file(dir_path + '/static/css/' + file)
@app.route('/favicon.ico')
def send_icon():
# Je sais pas si c'est spécifique à Flask mais lors d'une requête sur ce serveur, il y a automatiquement une requête vers /favicon.ico pour déterminer l'icone en haut sur le navigateur
# dir_path est le chemin qui mène au dossier parent, quel que soit l'environnement d'exécution
dir_path = os.path.dirname(os.path.realpath(__file__))
return send_file(dir_path + '/mercimax.ico')
@app.route('/test_page')
def test_function():
# cette url sert pour les tests
return render_template('test_html.html')
if __name__ == "__main__":
port = 5000
app.run(debug=True, host='127.0.0.1', port=port)
| AugustinCobena/Map-MerciMax | app.py | app.py | py | 7,518 | python | fr | code | 0 | github-code | 13 |
9891945104 | #
# Converted to Python by Eric Shen <ericshen@berkeley.edu>
# Sobel edge detector recognizer
#
import cv2
import numpy as np
import os
import argparse
import logging
log_format = '%(created)f:%(levelname)s:%(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_format) # log to file filename='example.log',
TAG = "sobel-recog:"
def main(capture, sandbox_send, sandbox_recv, files):
logging.debug(TAG + "inside main")
scale = 1;
delta = 0;
ddepth = cv2.CV_64F
frame_num = 0
while True:
logging.debug(TAG + "before reading frame")
retval, frame = capture.read()
if not retval:
break # end of video
logging.debug(TAG + "after reading frame")
frame_num += 1
img_blur = cv2.GaussianBlur(frame, (3, 3), 0, sigmaY=0)
# Converts it to grayscale
img_gray = cv2.cvtColor(img_blur, cv2.COLOR_RGB2GRAY)
# gradient x
grad_x = cv2.Sobel(img_gray, ddepth, dx=1, dy=0, ksize=3, scale=scale, delta=delta)
abs_grad_x = cv2.convertScaleAbs(grad_x)
# gradient Y
grad_y = cv2.Sobel(img_gray, ddepth, dx=0, dy=1, ksize=3, scale=scale, delta=delta)
abs_grad_y = cv2.convertScaleAbs(grad_y)
# Total Gradient (approximate)
grad = cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
logging.debug(TAG + "sending obj:num %d" % frame_num)
sandbox_send.send_pyobj((frame_num, grad))
| christhompson/recognizers-arch | apps/darkly/sobel/recog.py | recog.py | py | 1,460 | python | en | code | 1 | github-code | 13 |
71350187859 | from __future__ import annotations
from abc import ABCMeta
from abc import abstractmethod
from typing import List, Set, Dict, Union, Any, Optional
import numpy as np
import pandas as pd
import peperoncino as pp
class ColumnsChangedError(Exception):
pass
class RowsChangedError(Exception):
pass
class BaseProcessing(metaclass=ABCMeta):
"""
Abstruct class for data processing
"""
def __init__(self, is_fixed_columns: bool = True, is_fixed_rows: bool = True):
self._is_fixed_columns = is_fixed_columns
self._is_fixed_rows = is_fixed_rows
self._logs: List[Any] = []
self._indices: Optional[List[int]] = None
@property
def is_fixed_columns(self) -> bool:
return self._is_fixed_columns
@property
def is_fixed_rows(self) -> bool:
return self._is_fixed_rows
def _logging(self, msg: str, level: str = "info") -> None:
if level not in ["fatal", "error", "warning", "info", "debug"]:
raise ValueError(
"`level` should be one of fatal, error, warning, info and debug"
)
self._logs.append((msg, level))
def _flush_logs(self) -> None:
for msg, level in self._logs:
log_fn = getattr(pp.logger, level)
log_fn(msg)
self._logs.clear()
def process(self, dfs: List[pd.DataFrame]) -> List[pd.DataFrame]:
"""Processing dataframes
Parameters
----------
dfs : List[pd.DataFrame]
Returns
-------
List[pd.DataFrame]
"""
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, pd.DataFrame)
self._logging(f"Applying: {self.__class__.__name__}")
# memory columns
orig_cols = [df.columns for df in dfs]
orig_rows = [df.index for df in dfs]
dfs = self._process_with_limitation(dfs)
cols = [df.columns for df in dfs]
rows = [df.index for df in dfs]
self._logging_summary(cols, rows, orig_cols, orig_rows)
self._flush_logs()
for i, (col, orig_col) in enumerate(zip(cols, orig_cols)):
col, orig_col = set(col), set(orig_col)
if self.is_fixed_columns and len(col | orig_col) != len(col & orig_col):
raise ColumnsChangedError(
f"Number of columns are changed in df[{i}]."
f"Please refer to logs by setting proc.set_log_level(logging.DEBUG)"
)
for i, (row, orig_row) in enumerate(zip(rows, orig_rows)):
if self.is_fixed_rows and len(row | orig_row) != len(row & orig_row):
raise RowsChangedError(
f"Number of rows are changed in df[{i}]."
f"Please refer to logs by setting proc.set_log_level(logging.DEBUG)"
)
return dfs
@abstractmethod
def _process(self, dfs: List[pd.DataFrame]) -> List[pd.DataFrame]:
"""Abstract method of processing
Parameters
----------
dfs : List[pd.DataFrame]
Returns
-------
List[pd.DataFrame]
"""
pass
def _process_with_limitation(self, dfs: List[pd.DataFrame]) -> List[pd.DataFrame]:
# limit dataframes by `only` function
indices = self._indices
if indices is None:
indices = list(range(len(dfs)))
_dfs = [df for i, df in enumerate(dfs) if i in indices]
_dfs = self._process(_dfs)
for i in range(len(dfs)):
if i in indices:
dfs[i] = _dfs.pop(0)
return dfs
def _logging_summary(
self,
cols: List[pd.Index],
rows: List[pd.Index],
orig_cols: List[pd.Index],
orig_rows: List[pd.Index],
) -> None:
for i, (col, row, orig_col, orig_row) in enumerate(
zip(cols, rows, orig_cols, orig_rows)
):
added_cols = set(col) - set(orig_col)
dropped_cols = set(orig_col) - set(col)
self._logging(f"df[{i}]")
self._logging(f"#cols: {len(orig_col)} ---> {len(col)}")
self._logging(f"+cols: {added_cols}", level="debug")
self._logging(f"-cols: {dropped_cols}", level="debug")
self._logging(f"#rows: {len(orig_row)} ---> {len(row)}")
def only(self, indices: Union[int, List[int]]) -> BaseProcessing:
"""Limit the scope of processing
Parameters
----------
indices : Union[int, List[int]]
Returns
-------
BaseProcessing
self
"""
if not isinstance(indices, int) or isinstance(indices, list):
raise ValueError("indices must be int or list of ints")
if isinstance(indices, int):
indices = [indices]
self._indices = indices
return self
class SeparatedProcessing(BaseProcessing):
"""
Abstract class to process each dataframe separatedly.
"""
def _process(self, dfs: List[pd.DataFrame]) -> List[pd.DataFrame]:
return [self.sep_process(df) for df in dfs]
@abstractmethod
def sep_process(self, df: pd.DataFrame) -> pd.DataFrame:
"""Processing for single dataframe.
Parameters
----------
df : pd.DataFrame
Returns
-------
pd.DataFrame
A processed dataframe.
"""
pass
class MergedProcessing(BaseProcessing):
"""
Merge all dataframe and apply some processing simultaniously.
"""
def _process(self, dfs: List[pd.DataFrame]) -> List[pd.DataFrame]:
orig_dtypes = self._gather_dtypes(dfs)
dfs = [self._make_int_cols_nullable(df) for df in dfs]
dtypes = self._gather_dtypes(dfs)
df_indices = [df.index for df in dfs]
xcols: List[List[str]] = []
# set cols with valid dtype
for i, df in enumerate(dfs):
xcol = list(set(dtypes.keys()) - set(df.columns))
df = df.assign(**{c: np.nan for c in xcol})
df = df.astype(dtypes)
dfs[i] = df
xcols.append(xcol)
# preserve dataframe ids and merge dataframes
dfs = [df.assign(__DFID__=i) for i, df in enumerate(dfs)]
merged_df = pd.concat(dfs, axis=0, sort=False).reset_index(drop=True)
indices = [merged_df.query("__DFID__ == @i").index for i in range(len(dfs))]
merged_df = merged_df.drop(columns="__DFID__")
merged_df = self.simul_process(merged_df)
for i, (index, df_index, xcol) in enumerate(zip(indices, df_indices, xcols)):
df = merged_df.loc[index]
df = df.drop(columns=xcol)
df.index = df_index
dfs[i] = df
# Restore original dtype (only integers)
restore_int = {}
for c, dtype in dtypes.items():
if not dtype.startswith("Int"):
continue
restore_int[c] = orig_dtypes[c]
for i, df in enumerate(dfs):
_dtypes = {k: v for k, v in restore_int.items() if k in df.columns}
if len(_dtypes) > 0:
dfs[i] = df.astype(_dtypes)
return dfs
def _gather_dtypes(self, dfs: List[pd.DataFrame]) -> Dict[str, str]:
col_set: Set[str] = set()
for df in dfs:
col_set |= set(df.columns)
dtypes: Dict[str, str] = {}
for c in col_set:
dtype_set: Set[str] = set()
for df in dfs:
if c not in df.columns:
continue
dtype_set.add(str(df.dtypes[c]))
if len(dtype_set) >= 2:
raise ValueError(
f"Column {c} has different dtypes"
f"across given dataframes: {dtypes}"
)
dtypes[c] = dtype_set.pop()
return dtypes
def __process(self, dfs: List[pd.DataFrame]) -> List[pd.DataFrame]:
# make integer columns nullable
dfs = [self._make_int_cols_nullable(df) for df in dfs]
# preserve original index
orig_indices = [df.index for df in dfs]
# assign id and concat
dfs = [df.assign(__DFID__=i) for i, df in enumerate(dfs)]
merged_df = pd.concat(dfs, axis=0, sort=False).reset_index(drop=True)
# preserve which indices are belonging to which df
indices = [merged_df.query("__DFID__ == @i").index for i in range(len(dfs))]
merged_df = merged_df.drop(columns="__DFID__")
print(merged_df.dtypes)
dropped_cols = []
for df in dfs:
# columns added by merging operation will be dropped
dropped_cols.append(set(merged_df.columns) - set(df.columns))
merged_df = self.simul_process(merged_df)
for i, (df, index, orig_index) in enumerate(zip(dfs, indices, orig_indices)):
# columns dropped in processing will be dropped too
dcols = set(df.columns) - set(merged_df.columns)
dcols |= dropped_cols[i]
cols = [c for c in merged_df.columns if c not in dcols]
df = merged_df.loc[index].get(cols)
df.index = orig_index
dfs[i] = df
return dfs
def _make_int_cols_nullable(self, df: pd.DataFrame) -> pd.DataFrame:
dtypes = df.dtypes.astype(str)
# select int
dtypes = dtypes[dtypes.str.startswith("int")]
# capitalized(e.g. Int64) int type accepts NaN
dtypes = dtypes.str.capitalize()
return df.astype(dtypes)
@abstractmethod
def simul_process(self, df: pd.DataFrame) -> pd.DataFrame:
"""Processing for the merged dataframe
Parameters
----------
df : pd.DataFrame
Returns
-------
pd.DataFrame
"""
pass
| cafeal/peperoncino | peperoncino/processing.py | processing.py | py | 9,825 | python | en | code | 2 | github-code | 13 |
73019818579 | '''
1. List all interface
Confirm the interface, save selection for next running
2. Start to listen data in the interface
3. Ping device in 5s(may be need configure)
Send ping command
4. Collect response
Return the mac address
'''
from typing import Dict
from app.bootstrap import Bootstrap
from app.decorator import (receive_args,platform_setup)
from app.device import collect_devices
from scapy.all import conf, resolve_iface, NetworkInterface
from terminal_layout import *
from terminal_layout.extensions.choice import *
import signal
import os
import json
import time
def kill_app(signal_int, call_back):
'''Kill main thread
'''
os.kill(os.getpid(), signal.SIGTERM)
def select_ethernet_interface():
'''
load local network interface from config.json
if set use the configured one, else scan the network interfaces
'''
app_conf = {}
config_file_path = os.path.join(os.getcwd(), 'config.json')
try:
with open(config_file_path) as json_data:
app_conf = (json.load(json_data))
except:
print('Read configuration failed')
return None
description_list = [
conf.ifaces[item].description for item in conf.ifaces if conf.ifaces[item].mac]
if app_conf.__contains__('local'):
local_mac = app_conf['local']['name']
if local_mac in description_list and app_conf['local']['mac']:
return resolve_iface(local_mac)
ethernet_list = [
conf.ifaces[item].name for item in conf.ifaces if conf.ifaces[item].mac]
c = Choice('Which ehternet interface you are using?',
ethernet_list,
icon_style=StringStyle(fore=Fore.green),
selected_style=StringStyle(fore=Fore.green), default_index=0)
choice = c.get_choice()
if choice:
index, value = choice
# save to config.json
network_interface = resolve_iface(value)
app_conf['local'] = {
'name': network_interface.description,
'mac': network_interface.mac
}
try:
with open(config_file_path, 'w') as outfile:
json.dump(app_conf, outfile, indent=4, ensure_ascii=False)
return network_interface
except:
print('Write configuration failed')
return None
return None
def detect_devices(iface: NetworkInterface,args):
step_next = False
devices = []
while not step_next:
devices = collect_devices(iface)
if args.keep_detect:
if len(devices) == 0:
time.sleep(15)
continue
return devices
c = Choice('We have find {0} device(s), start to log?'.format(len(devices)),
['Yes', 'No'],
icon_style=StringStyle(fore=Fore.green),
selected_style=StringStyle(fore=Fore.green),
default_index=0)
choice = c.get_choice()
if choice:
index, _ = choice
if index == 0:
step_next = True
else:
step_next = False
print('Rescaning...')
return devices
def prepare(args):
if args.reset:
app_conf:Dict = {}
config_file_path = os.path.join(os.getcwd(), 'config.json')
try:
with open(config_file_path) as json_data:
app_conf = (json.load(json_data))
except:
print('Read configuration failed')
return None
try:
if not app_conf.__contains__('local'):
return None
del app_conf['local']
with open(config_file_path, 'w') as outfile:
json.dump(app_conf, outfile, indent=4, ensure_ascii=False)
except:
print('Write configuration failed')
return None
@platform_setup
@receive_args
def main(**kwargs):
prepare(kwargs['options'])
iface = select_ethernet_interface()
devices = detect_devices(iface,kwargs['options'])
Bootstrap().start(iface, devices)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
# Bootstrap().start()
| yiweisong/ins401-log | main.py | main.py | py | 4,209 | python | en | code | 0 | github-code | 13 |
9520324484 | import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from datetime import datetime, timedelta
import attr
import click
from click_loglevel import LogLevel
from tools.libs.net_utils import ip_if_not_local
from tools.libs.parse_args import LoggingArgumentParser
from tools.libs.text_utils import CompareContents
SERVICE_CONFIGS = {
'dns': '/etc/bind/named.conf',
'keepalived': '/etc/keepalived/',
'flask': '/home/nicola/dev/flask/canepa/apps/main.py',
'openvpn': '/etc/openvpn/canne.conf',
'smtp': '/etc/postfix/main.cf',
'wifi': '/etc/hostapd.conf',
'www': '/etc/apache2/apache2.conf',
}
CACHE_DIR = os.path.join(os.environ['HOME'], '.service_map')
def decode_first_line(filename: str) -> dict:
with open(filename, 'r') as f:
try:
return json.loads(f.readline().lstrip('#').strip())
except json.decoder.JSONDecodeError as e:
print(f'Error while decoding {filename} ({f.read()}): {e}')
raise
def error(text: str) -> str:
return f'Error: {text}'
@attr.s(repr=False, hash=True)
class ServiceConfig(dict):
'''
This is a dict with a `host` property to identify which host contains the configuration in the dict.
The keys of the dict are filenames, while the values are the contents of the files.
A cache is maintained in `${HOME}/.service_map/<hostname>.json`
'''
host: str = attr.ib()
service_name: str = attr.ib()
log: logging.Logger = attr.ib(default=logging.getLogger(__name__))
def __getitem__(self, key):
try:
return super().__getitem__(key)
except Exception:
self[key] = self._retrieve_config(key)
return super().__getitem__(key)
def cache_expired(self, filename, cache_life=timedelta(days=1)) -> bool:
if os.path.exists(filename):
curr_time = datetime.now()
cache_time = os.stat(filename).st_mtime
return curr_time > datetime.fromtimestamp(cache_time) + cache_life
return True
def _retrieve_config(self, filename):
if not os.path.isdir(CACHE_DIR):
os.mkdir(CACHE_DIR)
cache_file_name = os.path.join(CACHE_DIR, f'{self.host}.json')
cache_data = None
all_data = {}
self.log.debug(f'Checking if {self.host} is local')
if self.host and ip_if_not_local(self.host.strip('*')):
try:
with open(cache_file_name, 'r') as cache_file:
all_data = json.load(cache_file)
cache_data = all_data[filename]
except Exception as e:
self.log.debug(f"Cache file {cache_file_name} does't contain useful data ({e})")
else:
self.host = None
if (cache_data is None) or self.cache_expired(filename):
try:
cache_data = remote_command(self.host.strip('*') if self.host else None, ['cat', filename])
all_data[filename] = cache_data
with open(cache_file_name, 'w') as cache_file:
json.dump(all_data, cache_file)
except Exception as e:
self.log.debug(f'Unable to read {filename} from {self.host or "local"}: ({e})')
cache_data = f'{filename}: {e}'
return cache_data
def __repr__(self):
self.log.debug(f'repr {self.host}:{self.service_name}')
if self.service_name != 'keepalived' and self.service_name in SERVICE_CONFIGS:
return self[SERVICE_CONFIGS[self.service_name]]
return super().__repr__()
def __str__(self):
self.log.debug(f'str {self.host}:{self.service_name}')
return super().__str__()
class KAService(object):
def __init__(self, filename: str):
self._filename = filename
self._service_dict = None
self._service_name = None
@property
def filename(self):
return self._filename
@property
def hosts(self):
if self._hosts is None:
self._hosts = self.service_dict.get('vrrp', [])
return self._hosts
@property
def service_dict(self):
if self._service_dict is None:
self._service_dict = decode_first_line(self.filename)
return self._service_dict
@property
def service_name(self):
if self._service_name is None:
# TODO: replace with vrrp service name from conf
self._service_name = re.sub(r'\.conf$', '', self.filename)
return self._service_name
def __repr__(self):
# return f'{self.service_name}: {", ".join((i * "*") + h for i, h in enumerate(self.hosts))}'
# repr_hosts = (i * "*") + h for i, h in enumerate(self.hosts)
print(f"{len(self.hosts)} in {self}")
repr_hosts = [f'{i * "*"}{h}' for i, h in enumerate(self.hosts)]
return f'{self.service_name}: {", ".join(repr_hosts)}'
class ServiceCatalog(object):
def __init__(self, ka_dir: str, log: logging.Logger):
self.log = log
# cache for the `ServiceConfig`s
self.service_configs = dict()
# {service: [host1, host2, ...]}
self.services = {'keepalived': []}
# {host: [service1, service2, ...]}
self.hosts = defaultdict(set)
for dirpath, dirnames, filenames in os.walk(ka_dir):
for filename in filenames:
# TODO: replace with vrrp service name from conf
service_name = re.sub(r'\.conf$', '', filename)
service_dict = decode_first_line(os.path.join(dirpath, filename))
self.services[service_name] = service_dict['vrrp']
for i, host in enumerate(service_dict['vrrp']):
service_dict['vrrp'][i] = f"{i * '*'}{service_dict['vrrp'][i]}"
if self.config_differs(service_name, [service_dict['vrrp'][0], host]):
service_dict['vrrp'][i] = error(service_dict['vrrp'][i])
# if self.config_differs(service_name):
# for i in range(len(service_dict['vrrp'])):
# pass
self.services['keepalived'].extend(
[h for h in service_dict['vrrp'] if h not in self.services['keepalived']]
)
for host in service_dict['vrrp']:
self.hosts[host].add(ServiceConfig(host, 'keepalived'))
self.hosts[host].add(ServiceConfig(host, service_name))
def file_differs(self, filename, hosts: list, service_name: str) -> bool:
# re.sub(r'\.?(canne|)$', '.canne', host)
for host in hosts:
if host not in self.service_configs:
self.log.debug(f'Adding {host} to cache')
self.service_configs[host] = ServiceConfig(host, service_name)
for host in hosts[1:]:
if self.service_configs[host][filename] != self.service_configs[hosts[0]][filename]:
self.log.debug(CompareContents(self.service_configs[host][filename], self.service_configs[hosts[0]][filename]))
return True
return False
def config_differs(self, service_name, hosts: list = None) -> bool:
if hosts is None:
hosts = self.services[service_name]['vrrp']
try:
service_config = SERVICE_CONFIGS[service_name]
if service_config.endswith('/') or os.path.isdir(service_config):
return any(
[
self.file_differs(os.path.join(dirpath, filename), hosts, service_name)
for dirpath, dirnames, filenames in os.walk(service_config)
for filename in filenames
]
)
elif os.path.isfile(service_config):
return self.file_differs(service_config, hosts, service_name)
except Exception as e:
self.log.debug(f'Exception while comparing configs on {hosts} for {service_name} (no config file defined?): {e}')
return False
def remote_command(host: str, cmd: list):
if host:
cmd = ['ssh', host] + cmd
return subprocess.check_output(cmd).decode('utf-8')
def parse_args(argv: list):
p = LoggingArgumentParser()
p.add_argument('--ka-dir', '-k', required=True)
g = p.add_mutually_exclusive_group()
g.add_argument('--services', '-S', action='store_true')
g.add_argument('--hosts', '-H', action='store_true')
return p.parse_args(argv)
@click.command()
@click.option('--ka-dir', '-k', required=True)
@click.option('--hosts', '-H', is_flag=True, default=False, help='Group by host (default is by service)')
@click.option("-l", "--log-level", type=LogLevel(), default=logging.INFO)
def main(ka_dir: str, hosts: bool, log_level: str):
logging.basicConfig(level=log_level)
log = logging.getLogger(__name__)
catalog = ServiceCatalog(ka_dir, log)
if hosts:
click.echo(json.dumps(catalog.hosts, indent=2, default=str))
else:
click.echo(json.dumps(catalog.services, indent=2))
return 0
if __name__ == '__main__':
sys.exit(main())
| canepan/bot | src/tools/bin/service_map.py | service_map.py | py | 9,185 | python | en | code | 1 | github-code | 13 |
30630290472 | """ Python program to find factoral
using two method recursive and iterative"""
#By recursion method
def fact (n):
if n==1:
return 1
else:
return n*fact(n-1)
a=int(input("Enter the number to find the factorial: "))
print(f"The Factorial of {a} is {fact(a)}")
#By iteration method
def fact (n):
p=1
for i in range(1,n+1):
p*=i
return p
a=int(input("Enter the number to find the factorial: "))
print(f"The Factorial of {a} is {fact(a)}")
| aliasthomas/factorial | factorial.py | factorial.py | py | 516 | python | en | code | 1 | github-code | 13 |
11818122295 | class hmshow():
def __init__(self, word, letter, guessed):
self.word = word
self.letter = letter.upper()
self.guessed = guessed
def show(self):
_word = []
length = len(self.guessed)
if self.letter not in self.word:
print("Incorrect !!!")
for l,x in zip(self.word, range(0,length)):
if self.letter == l:
_word.append(l)
elif self.guessed[x] != "_":
_word.append(self.guessed[x])
else:
_word.append("_")
if __name__ == "__main__":
print(' '.join(_word))
else:
return _word
| PranabBandyopadhyay/Tutorial1 | hangman_show.py | hangman_show.py | py | 673 | python | en | code | 0 | github-code | 13 |
17091318724 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.MemberWalletBalanceDetailVO import MemberWalletBalanceDetailVO
class AntMerchantMemberwalletBalancedetailsQueryResponse(AlipayResponse):
def __init__(self):
super(AntMerchantMemberwalletBalancedetailsQueryResponse, self).__init__()
self._balance_detail_list = None
self._page_no = None
self._page_size = None
self._row_count = None
@property
def balance_detail_list(self):
return self._balance_detail_list
@balance_detail_list.setter
def balance_detail_list(self, value):
if isinstance(value, list):
self._balance_detail_list = list()
for i in value:
if isinstance(i, MemberWalletBalanceDetailVO):
self._balance_detail_list.append(i)
else:
self._balance_detail_list.append(MemberWalletBalanceDetailVO.from_alipay_dict(i))
@property
def page_no(self):
return self._page_no
@page_no.setter
def page_no(self, value):
self._page_no = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def row_count(self):
return self._row_count
@row_count.setter
def row_count(self, value):
self._row_count = value
def parse_response_content(self, response_content):
response = super(AntMerchantMemberwalletBalancedetailsQueryResponse, self).parse_response_content(response_content)
if 'balance_detail_list' in response:
self.balance_detail_list = response['balance_detail_list']
if 'page_no' in response:
self.page_no = response['page_no']
if 'page_size' in response:
self.page_size = response['page_size']
if 'row_count' in response:
self.row_count = response['row_count']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AntMerchantMemberwalletBalancedetailsQueryResponse.py | AntMerchantMemberwalletBalancedetailsQueryResponse.py | py | 2,065 | python | en | code | 241 | github-code | 13 |
36549546702 | import sys
import os
import configparser
import subprocess
import json
try:
from .matcher import Matcher
from .pbar import Pbar
from .run_sys_agent import agent_system
except ImportError as ex:
path=os.path.abspath('.')
if 'tools' in path.replace('\\','/').split('/'):#这里是为了便于开发调试
path=path.split('tools',maxsplit=1)[0]+'Library/utils'
else:
path=path+'/Library/utils'
if not path in (p.replace('\\','/') for p in sys.path):
sys.path.append(path)
from matcher import Matcher
from pbar import Pbar
from run_sys_agent import agent_system
class PY_ENV_CL:
'''checker and loader'''
def __init__(self,lib_path,ver='auto'):
if ver not in (2,3,'auto'):
raise Exception('版本错误')
self._py_ver=ver
if lib_path!=None:
self._lib_path=lib_path
if os.path.exists(self._lib_path) and os.path.isfile(self._lib_path):
self._mode='file'
else:
self._mode='dir'
ex=[False,False]
ex[0]=self._check_path_exists()
self._standard_path()
ex[1]=self._check_path_exists()
if sum(ex)==2:
pass
elif sum(ex)==0:
raise Exception('路径不存在')
elif ex[0]==True and ex[1]==False:
#raise Exception('debug')
self._lib_path=lib_path
self._clean_to_dir()
if not self._check_path_exists():raise Exception('debug')
self._check_py_ver()
self._conf_path=os.path.dirname( __file__)+'/env_conf.ini'
#self._conf_path='D:/ctf-tool/Library/utils/env_conf.ini'
self._config=self._read_default_conf()
self._check_conf()
def get_pyenv(self):
e=self._config.get("python", "python{}_path".format(self._py_ver)).strip()
if ' ' in e:
return '"'+e+'"'
else:
return e
def get_pyver(self):
return self._py_ver
def _read_default_conf(self):
'''读取'''
config = configparser.ConfigParser()
if not self._check_conf_exists():
if not config.has_section("python"): # 检查是否存在section
config.add_section("python")
else:
config.read(self._conf_path,encoding="utf-8")
if not config.has_section("python"): # 检查是否存在section
config.add_section("python")
return config
def _write_conf(self):
self._config.write(open(self._conf_path, "w",encoding='utf-8'))
def _user_input_config(self):
path=input('[!]输入python{}路径:'.format(self._py_ver))
if self._check_input_path(path):
return path
else:
yn=input('[!]警告:检测到路径不可用,是否继续?(y/n)').strip().lower()
if yn=='y':
return path
else:
exit(1)
def _check_conf(self):
'''检查配置文件正确性,不正确则用户输入'''
if not self._config.has_option("python", "python{}_path".format(self._py_ver)):
self._config.set("python", "python{}_path".format(self._py_ver),self._user_input_config())
else:
path=self._config.get("python", "python{}_path".format(self._py_ver))
if not self._check_input_path(path,'读入'):
print('[!]警告:读入的配置文件存在异常,路径不可用')
self._config.set("python", "python{}_path".format(self._py_ver),self._user_input_config())
self._write_conf()
def _check_input_path(self,path,mode='输入'):
'''检查输入的python路径是否正常'''
#检查环境变量
for p in os.environ['Path'].split(';'):
if 'python{}'.format(self._py_ver) in p or 'Python{}'.format(self._py_ver) in p:
return True
#检查可执行性
# p = subprocess.Popen([path,'--version'], shell=True,stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# data=[]
# while True:
# d=p.stdout.readline()
# if not d:
# break
# data.append(d)
# p.stdout.close()
# p.wait()
code,r,e=agent_system(path+' --version',' ',True)
if code!=0:
return False
#os.waitpid(p.pid, 0)
if r.startswith('Python {}'.format(self._py_ver)):
return True
#检查文件
abs_path=os.path.abspath(path)
if os.path.exists(path) or os.path.exists(abs_path):
if os.path.isfile(path) or os.path.isfile(abs_path):
pass
else:
print('[!]警告:检测到输入的是目录')
if abs_path[-1] not in ('/','\\'):abs_path+='/'
if os.path.exists(abs_path+'python.exe'):pass
else:
print('[!]错误:未检测到目录下的python.exe')
return False
return True
else:
return False
def _clean_to_dir(self):
'''path转成目录'''
if not os.path.isdir(self._lib_path):
if '/' in self._lib_path or sys.platform=='linux':
index=self._lib_path.rfind('/')
else:
index=self._lib_path.rfind('\\')
if index==-1:
self._lib_path='./'
else:
self._lib_path=self._lib_path[:index]
def _detect_py_ver(self,filename):
'''判断一个文件是py2还剩py3'''
if not os.path.exists(filename):
raise Exception('文件不存在')
with open(filename,'r',encoding='utf-8') as f:
data=f.read()
if 'print "' in data or "print '" in data or 'exec ' in data or 'xrange' in data or 'raw_input' in data:
return 2
elif 'print(' in data or 'print (' in data or 'exec(' in data:
return 3
else:
M=Matcher('*except *,*:*',tuple())
if True in (M.is_match(l) for l in data.split('\n')):
return 2
M.set_substr('*except * as *:*')
if True in (M.is_match(l) for l in data.split('\n')):
return 3
return False
def _check_py_ver(self):
'''判断所用lib版本'''
if self._py_ver=='auto':
file_list=self._scan_py()
for file_path in file_list:
r=self._detect_py_ver(file_path)
if r==False:
continue
self._py_ver=r
return
#获取版本失败,用户输入
self._user_input_pyver()
def _user_input_pyver(self):
while 1:
v=input('[!]请输入lib所用的python版本(2/3):').strip()
if v in ('2','3'):
break
else:
print('[!]只能输入(2/3)')
self._py_ver=int(v)
def _scan_files(self,directory,prefix=None,postfix=None):
files_list=[]
for root, sub_dirs, files in os.walk(directory):
for special_file in files:
if postfix:
if special_file.endswith(postfix):
files_list.append(os.path.join(root,special_file))
elif prefix:
if special_file.startswith(prefix):
files_list.append(os.path.join(root,special_file))
else:
files_list.append(os.path.join(root,special_file))
return files_list
def _scan_py(self):
'''扫描存在的文件'''
if self._mode=='dir':
return self._scan_files(self._lib_path,postfix=".py")
else:
return [self._lib_path]
def _check_conf_exists(self):
'''配置文件存在于utils里,统一管理'''
return os.path.exists(self._conf_path)
def _check_path_exists(self):
'''检查lib目录是否存在'''
return os.path.exists(self._lib_path)
def _standard_path(self):
'''全部变成相对路径'''
def process_subpath(p):
if p[:2] in ('.\\','./'):
p=p[2:]
elif p[0] in ('\\','/'):
p=p[1:]
if '/' in p or sys.platform=='linux':
p='./Library/'+p
else:
p='.\\Library\\'+p
return p
if len(self._lib_path)<10:#可能是部分路径(即只有Library之后的部分)
self._lib_path=process_subpath(self._lib_path)
elif self._lib_path[:10] in ('.\\Library\\','./Library/'):#标准的相对路径
pass
elif 'Library' in self._lib_path:#可能是绝对路径
self._lib_path=self._lib_path.split('Library',maxsplit=1)[1]
if '/' in self._lib_path or sys.platform=='linux':
self._lib_path='./Library'+self._lib_path
else:
self._lib_path='.\\Library'+self._lib_path
else:#部分路径
self._lib_path=process_subpath(self._lib_path)
class PY_PIP_CI:
'''checker and installer'''
def __init__(self,pyenv):
self._pyenv=pyenv
self._pip_list=self._get_py_piplist()
#self._bat_path='D:/ctf-tool/Library/utils/tmp.bat'
def ensure(self,pipname):
'''保证指定的pip已经安装,字符串或列表'''
if isinstance(pipname,str):
if pipname not in self._pip_list:
fd=self._install(pipname)
if fd==False:
print('[!]错误:pip软件包'+pipname+'安装失败')
exit(1)
elif isinstance(pipname,tuple) or isinstance(pipname,list):
#开进度条
bar=Pbar(show_percent_num=False,smooth=True,allow_skip_frame=False,vsync=True)
pnum=len(pipname)+1
for i,p in enumerate(pipname):
bar.set_rate(int(i+1//pnum*100),'check '+p+'...')
if p not in self._pip_list:
fd=self._install(p)
if fd==False:
bar.print('[!]错误:pip软件包'+pipname+'安装失败')
bar.clear(True)
exit(1)
bar.set_rate(100,'all done')
bar.clear(True)
else:
print('[!]错误:不支持的参数类型')
exit(1)
def _install(self,pipname):
fd=os.system('set PYTHONIOENCODING=UTF-8 &&' + self._pyenv+' -m pip install '+pipname)
if fd!=0:
return False
return True
def _get_py_piplist(self):
'''获取当前python环境的pip列表'''
#self._create_bat()
#s=subprocess.Popen(['cmd','/C',".\\Library\\utils\\tmp.bat"],bufsize=0,stdout=subprocess.PIPE,universal_newlines=True)
s=subprocess.Popen(['cmd','/C',"set PYTHONIOENCODING=UTF-8&&{} {}/get_py_pip_list.py".format(self._pyenv,os.path.dirname( __file__))],bufsize=0,stdout=subprocess.PIPE,stdin=subprocess.PIPE,universal_newlines=True)
result=''
while True:
nextline=s.stdout.readline().strip()
result+=nextline
if nextline=="" and s.poll()!=None:
break
result=result.split('|')[1]
#print(result)
j=json.loads(result.strip())
#self._clean_bat()
return j
def _create_bat(self):
bat='''chcp 65001
set PYTHONIOENCODING=UTF-8
{} ./Library/utils/get_py_pip_list.py'''.format(self._pyenv)
with open('./Library/utils/tmp.bat','w',encoding='utf-8') as f:
f.write(bat)
def _clean_bat(self):
if os.path.exists('./Library/utils/tmp.bat'):
os.remove('./Library/utils/tmp.bat')
if __name__ == "__main__":
piplist=PY_PIP_CI('c:/Python27/python.exe')
print(piplist._pip_list)
#print(PY_ENV_CL(None,3).get_pyenv())
#print(PY_ENV_CL(None,2).get_pyenv())
| ezeeo/ctf-tools | Library/utils/py_env_util.py | py_env_util.py | py | 12,214 | python | en | code | 8 | github-code | 13 |
23510532976 | from __future__ import division
import os
import numpy as np
import math
import csv
from time import localtime, strftime
from PIL import Image
import scipy.misc
import subprocess
import matplotlib.pyplot as mp
def loadDemo(data_path, resize_size):
# Read human_demo.txt
txt_name = [ss for ss in os.listdir(data_path) if ss.endswith(".txt")]
assert len(txt_name) > 0, 'Error | Loading data: No label found!'
demo_label_file = open(os.path.join(data_path, txt_name[0]), 'r')
labels = demo_label_file.readlines()
label_size = len(labels[0].split())
screenshots = sorted([ss for ss in os.listdir(data_path) if ss.endswith(".png")])
num_demo = len(screenshots)
assert num_demo == len(labels), 'Error | Loading data: Number of label error!'
data_image = np.empty((num_demo, resize_size[0], resize_size[1], 3))
data_label = np.empty((num_demo, label_size))
for i, ss in enumerate(screenshots):
img = scipy.misc.imread(os.path.join(data_path,ss), mode='RGB')
data_image[i, ...] = scipy.misc.imresize(img, size=resize_size)
data_label[i, ...] = [float(e) for e in labels[i].replace('\n', '').split()]
return data_image, data_label
def loadActionDemo(data_path, cut = -1):
data = np.load(os.path.join(data_path,'demo.npz'))
images = data['imgs']
actions = data['actions']
if cut > 0:
images = images[:cut, ...]
actions = actions[:cut, ...]
return images, actions
def SplitFrame(data_label, data_image, resize_size = None, num_frame = 5, split_at = 0):
if resize_size is None:
resize_size = [data_image.shape[1:2]]
num_demo = data_image.shape[0]
img_temp = np.empty([num_demo] + resize_size + [data_image.shape[3]])
for i in range(num_demo):
img_temp[i, ...] = scipy.misc.imresize(data_image[i, ...], size=resize_size)
if split_at == 0 or num_demo < split_at:
num_gif = num_demo - num_frame + 1
nf = np.arange(num_gif)
else:
num_gif_ori = num_demo // split_at
nf = np.concatenate([np.arange(i*split_at, (i+1)*split_at - num_frame + 1) for i in range(num_gif_ori)])
num_gif = len(nf)
shape = [num_gif, num_frame] + list(img_temp.shape[1:])
data_output = np.empty(shape)
for i,j in enumerate(nf):
data_output[i,...] = img_temp[j:(j+num_frame), ...]
return data_label[nf + num_frame - 1,...], data_output
def evaluate_direct(predicted_label, truth_label):
score = ((predicted_label - truth_label)**2).sum()
return score
def save_config(config):
# Save to all
with open('experiment_list.csv', "a") as output:
writer = csv.writer(output)
dd = strftime("%Y-%m-%d %H:%M:%S", localtime())
row = [dd, config.category, config.net_type, config.batch_size, config.lr, config.step_size,
config.action_step_size, config.action_cold_start, config.state_cold_start]
writer.writerow(row)
# Save full config
log_path = os.path.join(config.output_dir, config.category)
if not os.path.exists(log_path):
os.makedirs(log_path)
dict_config = vars(config)
with open(log_path + '/config.cfg', "w") as output:
writer = csv.writer(output)
for val in dict_config:
writer.writerow([val + " : " + str(dict_config[val])])
def final_save(samples, samples_a, category):
[num_video, num_frames, image_size, _, _] = samples.shape
if num_video > 60:
idx = np.concatenate([np.arange(4) + i*30 for i in range(num_video//30)])
else:
idx = np.arange(num_video)
saved_img = img2cell(np.concatenate(samples[idx, ...], axis=0))
scipy.misc.imsave("output_total/%s_final.png" % (category), saved_img)
mp.hist(samples_a)
mp.savefig("output_total/%s_actions.png" % (category))
# Inherited from STGConvnet
def loadVideoToFrames(data_path, syn_path, ffmpeg_loglevel = 'quiet'):
videos = [f for f in os.listdir(data_path) if f.endswith(".avi") or f.endswith(".mp4")]
num_videos = len(videos)
for i in range(num_videos):
video_path = os.path.join(data_path, videos[i])
out_dir = os.path.join(syn_path, "sequence_%d" % i)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
subprocess.call('ffmpeg -loglevel {} -i {} {}/%03d.png'.format(ffmpeg_loglevel,video_path, out_dir), shell=True)
return num_videos
# Inherited from STGConvnet
def cell2img(filename, out_dir='./final_result',image_size=224, margin=2):
img = scipy.misc.imread(filename, mode='RGB')
num_cols = img.shape[1] // image_size
num_rows = img.shape[0] // image_size
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for ir in range(num_rows):
for ic in range(num_cols):
temp = img[ir*(image_size+margin):image_size + ir*(image_size+margin),
ic*(image_size+margin):image_size + ic*(image_size+margin),:]
scipy.misc.imsave("%s/%03d.png" % (out_dir,ir*num_cols+ic), temp)
print(img.shape)
# Inherited from STGConvnet
def img2cell(images, col_num=10, margin=2):
[num_images, size_h, size_w, num_channel] = images.shape
row_num = int(math.ceil(num_images/col_num))
saved_img = np.zeros(((row_num * size_h + margin * (row_num - 1)),
(col_num * size_w + margin * (col_num - 1)),
num_channel), dtype=np.float32)
for idx in range(num_images):
ir = int(math.floor(idx / col_num))
ic = idx % col_num
temp = np.squeeze(images[idx])
temp = np.maximum(0.0, np.minimum(1.0, temp))
gLow = temp.min()
gHigh = temp.max()
temp = (temp - gLow) / (gHigh - gLow)
saved_img[(size_h + margin) * ir:size_h + (size_h + margin) * ir,
(size_w + margin) * ic:size_w + (size_w + margin) * ic, :] = temp
return saved_img
# Inherited from STGConvnet
def getTrainingData(data_path, num_frames=70, image_size=100, isColor=True, postfix='.png'):
num_channel = 3
if not isColor:
num_channel = 1
videos = [f for f in os.listdir(data_path) if f.startswith('sequence')]
num_videos = len(videos)
images = np.zeros(shape=(num_videos, num_frames, image_size, image_size, num_channel))
for iv in range(num_videos):
video_path = os.path.join(data_path, 'sequence_%d' % iv)
imgList = [f for f in os.listdir(video_path) if f.endswith(postfix)]
imgList.sort()
imgList = imgList[:num_frames]
for iI in range(len(imgList)):
image = Image.open(os.path.join(video_path, imgList[iI])).resize((image_size, image_size), Image.BILINEAR)
if isColor:
image = np.asarray(image.convert('RGB')).astype(float)
else:
image = np.asarray(image.convert('L')).astype(float)
image = image[..., np.newaxis]
images[iv, iI, :,:,:] = image
return images.astype(float)
# Inherited from STGConvnet
def saveSampleVideo(samples, out_dir, global_step=None, ffmpeg_loglevel='quiet', fps=25):
[num_video, num_frames, image_size, _, _] = samples.shape
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for ifr in range(num_frames):
saved_img = img2cell(np.squeeze(samples[:, ifr, :, :, :]))
scipy.misc.imsave("%s/step_%04d_%03d.png" % (out_dir, global_step, ifr), saved_img)
# Inherited from STGConvnet
def saveSampleSequence(samples, sample_dir, iter, col_num=10):
num_video = samples.shape[0]
for iv in range(num_video):
save_dir = os.path.join(sample_dir, "sequence_%d" % iv)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
scipy.misc.imsave("%s/%04d.png" % (save_dir, iter), img2cell(samples[iv], col_num=col_num))
def xcrange(d1, d2, d3 = -1, d4 = -1):
if d3 == -1:
x,y = np.meshgrid(range(d1), range(d2))
return zip(x.flatten(), y.flatten())
if d4 == -1:
x,y,z = np.meshgrid(range(d1), range(d2), range(d3))
return zip(x.flatten(), y.flatten(), z.flatten())
else:
x, y, z, c = np.meshgrid(range(d1), range(d2), range(d3), range(d4))
return zip(x.flatten(), y.flatten(), z.flatten(), c.flatten())
| fei960922/Research_STGC_IL | src/util.py | util.py | py | 8,251 | python | en | code | 1 | github-code | 13 |
23495299506 | #!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
arr = []
for _ in range(6):
arr.append(list(map(int, input().rstrip().split())))
sum_a = []
for index in range(0,4):
for key in range (0,4):
a = sum(arr[index][key:key+3]) + arr[index + 1][key + 1] + sum(arr[index + 2][key : key + 3])
sum_a.append(a)
max_s = sum_a[0]
for element in sum_a:
if element > max_s:
max_s = element
print(max_s)
| dvphuonguyen/pythonCoBan | BaiTapCoBan/2D_array.py | 2D_array.py | py | 562 | python | en | code | 0 | github-code | 13 |
31494025262 | # Classe TV: Faça um programa que simule um televisor criando-o como um objeto. O usuário deve ser capaz de
# informar o número do canal e aumentar ou diminuir o volume. Certifique-se de que o número do canal e o nível
# do volume permanecem dentro de faixas válidas.
class TV:
def __init__(self):
self.volume = 0
self.canal = 1
self.ligado = 'OFF'
def Volume(self, v):
if v == '+':
if self.volume == 60:
print('Volume Máximo\n')
else:
self.volume += 1
elif v == '-':
if self.volume == 0:
print('Volume Mínimo\n')
else:
self.volume -= 1
else:
print('Valor inválido\n')
def Canal(self, n):
if 0 < n < 100:
self.canal = n
else:
print('Os canais vão de 1 à 100\n')
def Ligar(self):
if self.ligado == 'ON':
self.ligado = 'OFF'
else:
self.ligado = 'ON'
tv = TV()
while True:
try:
print(f' {tv.ligado} Volume {tv.volume} Canal {tv.canal} \n')
a = input('(+,-) Volume\n (número) Canal\n (l) Liga/Desliga\n\n\n\n\n')
if a == '+':
tv.Volume(a)
elif a == '-':
tv.Volume(a)
elif a == 'l':
tv.Ligar()
else:
a = int(a)
tv.Canal(a)
except:
pass
| GuilhermeMastelini/Exercicios_documentacao_Python | Classes/Lição 6.py | Lição 6.py | py | 1,481 | python | pt | code | 0 | github-code | 13 |
18983427061 | import math
def ticket_price(age):
if 0 <= age < 7 or age >= 60:
return "Бесплатно"
elif 7 <= age < 18:
return "100 рублей"
elif 18 <= age < 25:
return "200 рублей"
elif 25 <= age < 60:
return "300 рублей"
else:
return "Ошибка"
def double(value):
new_value = value * 2
return new_value
def sum_func(first, second):
return first + second
class Circle:
class Circle:
def init(self, radius):
if type(radius) not in [int, float]:
raise TypeError("Радиус должен быть числом (int or float)")
if radius < 0:
raise ValueError("Радиус должен быть положительньй")
self.radius = radius
def get_radius(self):
return self.radius
def get_diameter(self):
return self.radius * 2
def get_perimeter(self):
return 2 * self.radius * math.pi
def get_circle_square(radius):
if type(radius) not in [int, float]:
raise TypeError("Should be int or float")
if radius < 0:
raise ValueError("Int or float should be >0")
return radius ** 2 * math.pi
def get_verbal_grade(grade):
if type(grade) != int: raise TypeError("Grade should be between 2 and 5 and integer")
if grade < 2 or grade > 5: raise ValueError("Grade should be between 2 and 5")
if grade == 2:
return "Bad"
if grade == 3:
return "not such bad but still bad"
if grade == 4:
return "Good"
if grade == 5:
return "Very good!"
| tonyglaz/small_projects | py_tests/utils.py | utils.py | py | 1,632 | python | en | code | 0 | github-code | 13 |
27216166598 | #!/usr/bin/python3
'''
这个写出来是为了测试main包好不好使的
这个基本上是一个test的例子, 以后基本上就按照这个文件写
'''
import sys
sys.path.append("..")
import insummer
from insummer.query_expansion import EntityFinder
from insummer.read_conf import config
from insummer.query_expansion1.semantic_complement import add
def test1():
conf = config("../../conf/question.conf")
f = open(conf["title_pos"])
titles = f.readlines()
indx = 0
for title in titles:
if indx > 20:
break
naive_finder = EntityFinder(title)
naive_finder.find(display=True)
indx += 1
def test2():
return add(1,1)
if __name__ == '__main__':
print(test2())
| lavizhao/insummer | code/test/test_main.py | test_main.py | py | 769 | python | en | code | 7 | github-code | 13 |
74593193616 | from os import environ
import os
SESSION_CONFIG_DEFAULTS = dict(real_world_currency_per_point=1,
participation_fee=0,
fixed_payment=25,
additional_payment=50,
variable_payment=1,
var_ratio_16=1.5,
var_ratio_25=3.5,
var_ratio_34=5.5,
use_browser_bots=False)
SESSION_CONFIGS = [
dict(
name="Control",
num_demo_participants=1,
app_sequence=["main"],
treatment=0,
# var_ratio_all=1,
),
dict(
name="BTS",
num_demo_participants=2,
app_sequence=["main"],
treatment=1,
),
]
LANGUAGE_CODE = "sk"
REAL_WORLD_CURRENCY_CODE = "CZK"
USE_POINTS = False
DEMO_PAGE_INTRO_HTML = ""
ROOMS = [dict(
name="stefunko",
display_name="Stefunko",
participant_label_file='_rooms/participant_labels.txt',
use_secure_urls=False,
)]
# for security, best to set admin password in an environment variable
SECRET_KEY = "blahblah"
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# STATICFILES_DIRS = os.path.join(BASE_DIR, "/main/static/")
# STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'static'),
# )
# if an app is included in SESSION_CONFIGS, you don"t need to list it here
INSTALLED_APPS = ["otree"]
# environ["DATABASE_URL"] = "postgres://postgres@localhost/django_db"
# environ["REDIS_URL"] = "redis://localhost:6379"
ADMIN_USERNAME = "stefunko"
environ["OTREE_ADMIN_PASSWORD"] = "hidden"
environ["OTREE_PRODUCTION"] = "1"
environ["OTREE_AUTH_LEVEL"] = "STUDY"
if environ.get("OTREE_PRODUCTION") not in {None, "", "0"}:
DEBUG = False
else:
DEBUG = True
| CarlMenger/DP_Stefunko | settings.py | settings.py | py | 2,190 | python | en | code | 0 | github-code | 13 |
35295874698 | from collections import defaultdict
n = int(input())
a = list(map(int, input().split()))
mod = 10**9+7
d = defaultdict(int)
for i in range(n):
d[a[i]] += 1
# odd
if n%2!=0:
for key in list(d.keys()):
if key==0 and d[key]!=1: exit(print(0))
elif key!=0 and key%2==0 and d[key]!=2: exit(print(0))
elif key%2!=0: exit(print(0))
ans = pow(2, n//2, mod)
# even
else:
for key in list(d.keys()):
if key%2==0: exit(print(0))
elif key%2!=0 and d[key]!=2: exit(print(0))
ans = pow(2, n//2, mod)
print(ans) | nozomuorita/atcoder-workspace-python | abc/abc050/c.py | c.py | py | 576 | python | en | code | 0 | github-code | 13 |
10191398595 | import copy
import sys
sys.setrecursionlimit(10 ** 6)
n = int(input())
arr = [list(map(int, input().split())) for _ in range(n)]
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
def dfs(x, y):
global cnt
visited[x][y] = 1
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < n and 0 <= ny < n and visited[nx][ny] == 0 and tmparr[nx][ny] == 1:
dfs(nx, ny)
maxarr = 0
for i in range(n):
maxarr = max(maxarr, max(arr[i]))
maxcnt = 0
for i in range(maxarr + 1):
tmparr = copy.deepcopy(arr)
visited = [[0] * n for _ in range(n)]
cnt = 0
for j in range(n):
for k in range(n):
if tmparr[j][k] <= i:
tmparr[j][k] = 0
else:
tmparr[j][k] = 1
for j in range(n):
for k in range(n):
if tmparr[j][k] == 1 and visited[j][k] == 0:
dfs(j, k)
cnt += 1
maxcnt = max(maxcnt, cnt)
print(maxcnt)
| Jinnie-J/Algorithm-study | baekjoon/[2468]안전영역.py | [2468]안전영역.py | py | 973 | python | en | code | 0 | github-code | 13 |
72641301139 | from flask import Flask, render_template, redirect, url_for, flash, request
from flask_bootstrap import Bootstrap
from flask_ckeditor import CKEditor
from datetime import date
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship
from sqlalchemy import Table, Column, Integer, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from flask_login import UserMixin, login_user, LoginManager, login_required, current_user, logout_user
from forms import CreatePostForm, RegisterForm, LoginForm, CommentForm
from flask_gravatar import Gravatar
from functools import wraps
import bleach
import os
# List of allowed HTML tags
ALLOWED_TAGS = ['p', 'b', 'i', 'u', 'em', 'strong', 'a']
def clean_html(html):
# Clean the HTML, allowing only the tags in ALLOWED_TAGS
cleaned_html = bleach.clean(html, tags=ALLOWED_TAGS, strip=True)
return cleaned_html
Base = declarative_base()
def admin_only(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.id or current_user.id != 1: # If user is not admin, redirect to login page
return "Error 404: Access Denied"
return f(*args, **kwargs) # If user is admin, proceed as usual
return decorated_function
login_manager = LoginManager()
app = Flask(__name__)
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY')
ckeditor = CKEditor(app)
Bootstrap(app)
login_manager.init_app(app)
##CONNECT TO DB
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get("DATABASE_URL", "sqlite:///blog.db")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
gravatar = Gravatar(app,
size=100,
rating='g',
default='retro',
force_default=False,
force_lower=False,
use_ssl=False,
base_url=None)
##CONFIGURE TABLES
class BlogPost(db.Model):
__tablename__ = "blog_posts"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(250), unique=True, nullable=False)
subtitle = db.Column(db.String(250), nullable=False)
date = db.Column(db.String(250), nullable=False)
body = db.Column(db.Text, nullable=False)
img_url = db.Column(db.String(250), nullable=False)
user_id = db.Column(db.Integer, ForeignKey('users.id'))
user = relationship('User', back_populates='blog_posts')
comments = relationship('Comment', back_populates="blog_post")
class User(UserMixin, db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
email = db.Column(db.String(250), nullable=False, unique=True)
password_hash = db.Column(db.String(250), nullable=False)
blog_posts = relationship('BlogPost', back_populates="user")
comments = relationship('Comment', back_populates='user')
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, ForeignKey('users.id'))
user = relationship('User', back_populates='comments')
blogpost_id = Column(Integer, ForeignKey('blog_posts.id'))
blog_post = relationship('BlogPost', back_populates='comments')
db.create_all()
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
@app.route('/')
def get_all_posts():
posts = BlogPost.query.all()
if current_user.is_authenticated:
user_id = current_user.id
print(user_id)
return render_template("index.html", all_posts=posts, logged_in=current_user.is_authenticated, user_id=user_id)
return render_template("index.html", all_posts=posts)
@app.route('/register', methods=['POST', 'GET'])
def register():
form = RegisterForm()
user = User()
if request.method == 'POST':
existing_user = User.query.filter_by(email=form.email.data).first()
if existing_user:
flash('You have already signed up with this email, login instead!')
return redirect(url_for('login'))
else:
user.name = form.name.data
user.email = form.email.data
user.password_hash = generate_password_hash(form.password.data, salt_length=8)
db.session.add(user)
db.session.commit()
login_user(user)
return redirect(url_for('get_all_posts'))
return render_template("register.html", form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
user = User.query.filter_by(email=form.email.data).first()
if request.method == 'POST':
if not user:
flash('Invalid username.')
return redirect(url_for('login'))
elif not check_password_hash(user.password_hash, form.password.data):
flash('Invalid password.')
return redirect(url_for('login'))
else:
login_user(user)
return redirect(url_for('get_all_posts'))
return render_template("login.html", logged_in=current_user.is_authenticated, form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('get_all_posts'))
@app.route("/post/<int:post_id>", methods=['GET', 'POST'])
def show_post(post_id):
requested_post = BlogPost.query.get(post_id)
comment_form = CommentForm()
if current_user.is_authenticated and comment_form.validate_on_submit():
new_comment = Comment(
user_id=current_user.id,
text=clean_html(comment_form.comment.text),
blogpost_id=post_id
)
db.session.add(new_comment)
db.session.commit()
return redirect(url_for('show_post', post_id=post_id))
elif not current_user.is_authenticated and request.method == 'POST':
flash('You need to log in to leave a comment')
return redirect(url_for('login'))
return render_template("post.html",
post=requested_post,
logged_in=current_user.is_authenticated,
user_id=current_user.id if current_user.is_authenticated else None,
form=comment_form)
@app.route("/about")
def about():
return render_template("about.html", logged_in=current_user.is_authenticated)
@app.route("/contact")
def contact():
return render_template("contact.html", logged_in=current_user.is_authenticated)
@app.route("/new-post", methods=["GET", "POST"])
@admin_only
def add_new_post():
form = CreatePostForm()
if form.validate_on_submit():
new_post = BlogPost(
user_id=current_user.id,
title=form.title.data,
subtitle=form.subtitle.data,
body=clean_html(form.body.data),
img_url=form.img_url.data,
date=date.today().strftime("%B %d, %Y")
)
db.session.add(new_post)
db.session.commit()
return redirect(url_for("get_all_posts"))
return render_template("make-post.html", form=form, logged_in=current_user.is_authenticated)
@app.route("/edit-post/<int:post_id>")
@admin_only
def edit_post(post_id):
post = BlogPost.query.get(post_id)
edit_form = CreatePostForm(
title=post.title,
subtitle=post.subtitle,
img_url=post.img_url,
author=post.author,
body=post.body
)
if edit_form.validate_on_submit():
post.title = edit_form.title.data
post.subtitle = edit_form.subtitle.data
post.img_url = edit_form.img_url.data
post.author = edit_form.author.data
post.body = edit_form.body.data
db.session.commit()
return redirect(url_for("show_post", post_id=post.id))
return render_template("make-post.html", form=edit_form, logged_in=current_user.is_authenticated)
@app.route("/delete/<int:post_id>")
@admin_only
def delete_post(post_id):
post_to_delete = BlogPost.query.get(post_id)
db.session.delete(post_to_delete)
db.session.commit()
return redirect(url_for('get_all_posts'))
@app.route("/delete-comment/<int:post_id>/<int:comment_id>", methods=["GET", "POST"])
@login_required
def delete_comment(post_id, comment_id):
comment_to_delete = Comment.query.get(comment_id)
db.session.delete(comment_to_delete)
db.session.commit()
return redirect(url_for('show_post', post_id=post_id))
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5002, debug=True)
| wuwen6937/blog | main.py | main.py | py | 8,613 | python | en | code | 0 | github-code | 13 |
17039339854 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayDigitalmgmtHrcominsuInsuclaimQueryModel(object):
def __init__(self):
self._data_key = None
@property
def data_key(self):
return self._data_key
@data_key.setter
def data_key(self, value):
self._data_key = value
def to_alipay_dict(self):
params = dict()
if self.data_key:
if hasattr(self.data_key, 'to_alipay_dict'):
params['data_key'] = self.data_key.to_alipay_dict()
else:
params['data_key'] = self.data_key
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayDigitalmgmtHrcominsuInsuclaimQueryModel()
if 'data_key' in d:
o.data_key = d['data_key']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayDigitalmgmtHrcominsuInsuclaimQueryModel.py | AlipayDigitalmgmtHrcominsuInsuclaimQueryModel.py | py | 917 | python | en | code | 241 | github-code | 13 |
35205512329 | from collections import Counter
from util import aoc
def parse(input):
return sorted(int(j) for j in input.splitlines())
def part_one(model):
hist = Counter()
hist[3] = 1 # last adapter -> device
prev = 0
for j in model:
hist[j - prev] += 1
prev = j
return hist[1] * hist[3]
def part_two(model):
hist = Counter()
hist[0] = 1 # the charge port
for adapter in model:
for jlt in range(adapter - 3, adapter):
hist[adapter] += hist[jlt]
return hist[model[-1]]
if __name__ == "__main__":
aoc.solve(
__file__,
parse,
part_one,
part_two,
)
| barneyb/aoc-2023 | python/aoc2020/day10/adapter_array.py | adapter_array.py | py | 659 | python | en | code | 0 | github-code | 13 |
74651580498 | import time
n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
usuario = 0
while usuario != 5:
print('=-='*10)
print('[1] SOMAR\n[2] MULTIPLICAR\n[3] MAIOR\n[4] NOVOS NÚMEROS\n[5] SAIR DO PROGRAMA')
usuario = int(input('Sua escolha: '))
print('=-='*10)
if usuario == 1:
print(f'A soma entre {n1} e {n2} é {n1+n2}')
elif usuario == 2:
print(f'A multiplicação entre {n1} e {n2} é {n1*n2}')
elif usuario == 3:
maior = max(n1, n2)
print(f'O maior número entre {n1} e {n2} é {maior}')
elif usuario == 4:
n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
elif usuario == 5:
print('Finalizando...')
time.sleep(2)
else:
print('Digite uma opção válida.')
time.sleep(1)
print('Fim do programa!')
| uRexxy/Python-Ex.-CEV | Exercícios/Exercício 059.py | Exercício 059.py | py | 865 | python | pt | code | 1 | github-code | 13 |
13382115386 | # Run Speed Up Data(Prepare Text File)
import os
from extractTextFile import OutputExtractor
# 首先從 Input 拿到不要執行的 Phase
inputf = input("Put the Phase that u not want to execute.(seperate by space) ").split(" ")
if not(len(inputf) == 1 and inputf[0] == ""):
notExecute = list(map(lambda x: int(x), inputf))
else:
notExecute = []
OUTPUTDIR = "logs"
# Strict26
DATASET = "out.png 10000 -0.19985997516420825 -0.19673850548118335 -1.0994739550641088 -1.1010040371755099 7680 4320"
# SEQUENCIALVIERSION = f"{DATASET['index']}-Sequencial.txt"
def getOutputPath(fileName: str):
return f"{OUTPUTDIR}/{fileName}"
def getSingleCoreFileName(i: int):
return f"Strict26-SingleCore-{i}.txt"
def getSingleCoreFileNameFull(i: int):
return f"Strict26-SingleCore-{i}-Full.txt"
def getMultipleCoreFileName(i: int):
return f"Strict26-MultiCore-{i}.txt"
def getMultipleCoreFileNameFull(i: int):
return f"Strict26-MultiCore-{i}-Full.txt"
print("Program Start.......")
if (os.path.isdir(f"./{OUTPUTDIR}")):
os.system(f"rm -rf ./{OUTPUTDIR}")
print(f"{OUTPUTDIR} deleted.")
os.mkdir(OUTPUTDIR)
print(f"{OUTPUTDIR} created.")
# Get Single Nodes Performance
# os.system(f"./seq {DATASET['n']} {DATASET['fIn']} {DATASET['fOut']} > {getOutputPath(SEQUENCIALVIERSION)}")
# print("Sequencial Version Completed.")
if 0 not in notExecute:
print("Single Node, Multiple Threads Start.")
# 12 Cores perNode
for i in range(12):
os.system(f"srun -n1 -c{i+1} ./hw2a-time-full {DATASET} > {getOutputPath(getSingleCoreFileNameFull(i))}")
print(f"{i+1} ncpus completed.")
print("----------------------------------------------------------")
if 1 not in notExecute:
# 4 & 8 Thread 平均分佈
os.system(f"srun -n1 -c4 ./hw2a-time {DATASET} > {getOutputPath(getSingleCoreFileName(4))}")
os.system(f"srun -n1 -c8 ./hw2a-time {DATASET} > {getOutputPath(getSingleCoreFileName(8))}")
print(f"4, 8 average cases per thread completed.")
print("----------------------------------------------------------")
if 2 not in notExecute:
# 12 Thread per Process, one Process Per Node
for i in range(4):
os.system(f"srun -N{i+1} -n{i+1} -c12 ./hw2b-time {DATASET} > {getOutputPath(getMultipleCoreFileName(i))}")
print(f"{i+1} multi-cores Full completed.")
print("----------------------------------------------------------")
if 3 not in notExecute:
# 12 Thread per Process, one Process Per Node
for i in range(4):
os.system(f"srun -N{i+1} -n{i+1} -c12 ./hw2b-time-full {DATASET} > {getOutputPath(getMultipleCoreFileNameFull(i))}")
print(f"{i+1} multi-cores completed.")
print("----------------------------------------------------------")
if 4 not in notExecute:
for i in range(1000):
os.system(f"srun -n1 -c12 ./hw2a-time-full {DATASET} {i} > {getOutputPath('blocksize-test.txt')}")
print(f"i={i} completed.") | Elven9/NTHU-2020PP-Mandelbrot-Set-Calculation | Script/runSpeedup.py | runSpeedup.py | py | 2,947 | python | en | code | 2 | github-code | 13 |
10348362786 | import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from twitterAPI import *
global BLFile
BLFile = 'defaultList.txt'
global username
def window():
app = QApplication(sys.argv)
win = QWidget()
flo = QFormLayout()
userIn = QLineEdit()
userIn.textChanged.connect(userchanged)
flo.addRow("Twitter user name without the @ ",userIn)
BLIn = QLineEdit()
BLIn.setText('defaultList.txt')
BLIn.textChanged.connect(blackchanged)
flo.addRow("Blacklist File", BLIn)
button = QPushButton()
button.setText('Check')
flo.addRow("", button)
button.clicked.connect(result)
win.setLayout(flo)
win.setWindowTitle("Scold or Troll")
win.show()
sys.exit(app.exec_())
def result():
#app = QApplication(sys.argv)
global BLFile
global username
letext = '<h1>Checking user ' + username + ' for potential ill intent...</h1>'
BL = Blacklist(BLFile)
che = Checker(username)
num_fol = che.numFol
num_hits = che.checkNum(BL)
letext += '<p>They follow ' + str(num_fol) + " people, " + str(num_hits) + ' of them are in the blacklist</p>'
letext += '<p>Those include:</p>'
formlist = listformat(che.listMatch(BL))
letext += formlist
d = QDialog()
textBrowser = QTextBrowser()
textBrowser.setHtml(letext)
vbox = QVBoxLayout()
vbox.addWidget(textBrowser)
d.setLayout(vbox)
d.resize(400,300)
d.exec_()
return
def userchanged(text):
global username
username = text
def blackchanged(text):
global BLFile
BLFile = text
def listformat(ls):
ret = '\n'
for i in ls:
ret += '<p>- '+ str(i) + '</p>'
return ret
if __name__ == '__main__':
window()
#result(1)
| Pilotman/scold-or-troll | main.py | main.py | py | 1,787 | python | en | code | 0 | github-code | 13 |
42166261240 | import subprocess
from selenium.common.exceptions import StaleElementReferenceException,\
MoveTargetOutOfBoundsException
import string
try:
import win32com.client
except:
pass
import enums
from base import *
import clsTestService
from general import General
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import re
import random
class Kea(Base):
driver = None
clsCommon = None
def __init__(self, clsCommon, driver):
self.driver = driver
self.clsCommon = clsCommon
#=============================================================================================================
#Upload locators:
#=============================================================================================================
KEA_ADD_NEW_MULTIPLE_QUESTION_BUTTON = ('xpath', "//button[contains(@class,'multiple-options-question-type')]")
KEA_ADD_NEW_MULTIPLE_QUESTION_BUTTON_ACTIVE = ('xpath', "//button[contains(@class,'multiple-options-question-type ng-star-inserted active')]")
KEA_ADD_NEW_MULTIPLE_QUESTION_BUTTON_DEFAULT = ('xpath', "//button[contains(@class,'multiple-options-question-type active ng-star-inserted')]")
KEA_ADD_NEW_REFLECTION_POINT_BUTTON = ('xpath', "//button[contains(@class,'reflection-point-question-type')]")
KEA_ADD_NEW_REFLECTION_POINT_BUTTON_ACTIVE = ('xpath', "//button[contains(@class,'reflection-point-question-type ng-star-inserted active')]")
KEA_ADD_NEW_TRUE_FALSE_QUESTION_BUTTON = ('xpath', "//button[contains(@class,'true-false-question-type')]")
KEA_ADD_NEW_TRUE_FALSE_QUESTION_BUTTON_ACTIVE = ('xpath', "//button[contains(@class,'true-false-question-type ng-star-inserted active')]")
KEA_ADD_NEW_ADD_QUESTION_TRUE_ANSWER_FIELD = ('xpath', "//textarea[@placeholder='Add the CORRECT Answer Here']")
KEA_ADD_NEW_QUESTION_FALSE_ANSWER_FIELD = ('xpath', "//textarea[@placeholder='Add Additional Answer Here']")
KEA_ADD_NEW_QUESTION_HINT_AND_WHY_TOGGLE_MENU_BUTTON = ('xpath', "//button[@class='menu-button unbutton']")
KEA_ADD_NEW_QUESTION_HINT_BUTTON = ('xpath', "//button[@class='unbutton menu-item' and contains(text(),'Hint')]")
KEA_ADD_NEW_QUESTION_WHY_BUTTON = ('xpath', "//button[@class='unbutton menu-item' and contains(text(),'Why')]")
KEA_ADD_NEW_QUESTION_NUMBER = ('xpath', "//span[@class='question-number']")
KEA_SELECT_VIDEO_FOR_EDIT = ('xpath', '//a[@class="btn btn-small btn-primary btn-select-media"]')
KEA_VIDEO_EDITOR_TAB = ('xpath', "//a[@class='nav-button' and @aria-label='Video Editor']")
KEA_VIDEO_EDITOR_TAB_ACTIVE = ('xpath', "//a[@class='nav-button active' and @aria-label='Video Editor']")
KEA_LAUNCH = ('xpath', "//i[@class='icon-editor']")
KEA_COLLAPSE_PANEL_BUTTON = ('xpath', "//button[contains(@class,'show-hide-button show-hide-button--hide') and @aria-label='Collapse Panel']")
KEA_EXPAND_PANEL_BUTTON = ('xpath', "//button[contains(@class,'show') and @aria-label='Expand Panel']")
KEA_COLLAPSED_PLAYER_CONTAINER = ('xpath', "//div[@class='player-components__container']")
KEA_EXPANDED_PLAYER_CONTAINER = ('xpath', "//div[contains(@class,'player-components-container__expanded')]")
KEA_EXIT_BUTTON = ('xpath', "//button[@class='nav-button' and @aria-label='Exit']")
KEA_MAIN_CONTAINER = ('xpath', "//div[@class='kea-main-container']")
KEA_MAIN_CONFIRMATION_POP_UP = ('xpath', "//div[contains(@class,'ui-widget-content ui-corner-all ui-shadow')]")
KEA_MAIN_CONFIRMATION_POP_UP_SURE_BUTTON = ('xpath', "//span[@class='ui-button-text ui-clickable' and contains(text(),'m sure')]")
KEA_MAIN_CONFIRMATION_POP_UP_CANCEL_BUTTON = ('xpath', "//button[contains(@class,'link--cancel') and contains(text(),'Cancel')]")
KEA_APP_DISPLAY = ('id', 'kea-anchor')
KEA_TAB_TITLE = ('xpath', "//h1[contains(@class,'title') and contains(text(),'TAB_NAME')]")
KEA_IFRAME = ('xpath', '//iframe[@class="span12 hostedEnabled kea-frame kea-iframe-js"]')
KEA_QUIZ_PLAYER = ('id', 'quiz-player_ifp')
KEA_LOADING_SPINNER = ('class_name', 'spinner')
KEA_LOADING_CONTAINER = ('xpath', "//div[contains(@class,'loading__container')]")
KEA_MEDIA_IS_BEING_PROCESSED = ('xpath', "//div[@class='kErrorMessageText' and contains(text(),'media is processing')]")
KEA_QUIZ_QUESTION_FIELD = ('id', 'questionTxt')
KEA_QUIZ_ANSWER = ('id', 'ANSWER_NUMBER')
KEA_QUIZ_ANSWER_GENERAL = ('xpath', "//textarea[contains(@id,'answer-text')]")
KEA_EDITOR_TAB = ('xpath', "//a[@aria-label='Video Editor']")
KEA_QUIZ_TAB = ('xpath', "//a[@class='nav-button' and @aria-label='Quiz']")
KEA_QUIZ_TAB_ACTIVE = ('xpath', "//a[@class='nav-button active' and @aria-label='Quiz']")
KEA_QUIZ_ADD_ANSWER_BUTTON = ('xpath', '//div[@class="add-answer-btn"]')
KEA_QUIZ_BUTTON = ('xpath', '//span[@class="ui-button-text ui-clickable" and text()="BUTTON_NAME"]')
KEA_QUIZ_SHUFFLE_BUTTON = ('xpath', '//div[@class="shuffle-answers"]')
KEA_QUIZ_LOADING_CONTAINER = ('xpath', '//div[@class="loading-backdrop show ng-star-inserted"]')
EDITOR_TABLE = ('xpath', '//table[@class="table table-condensed table-hover mymediaTable mediaTable full"]')
EDITOR_TABLE_SIZE = ('xpath', '//table[@class="table table-condensed table-hover mymediaTable mediaTable full"]/tbody/tr')
EDITOR_NO_MORE_MEDIA_FOUND_MSG = ('xpath', '//div[@id="quizMyMedia_scroller_alert" and text()="There are no more media items."]')
EDITOR_TIMELINE = ('xpath', '//div[@class="kea-timeline-playhead" and @style="transform: translateX(PIXELpx);"]')
EDITOR_TIME_PICKER = ('xpath', "//input[@class='ui-inputtext ui-corner-all ui-state-default ui-widget ui-state-filled']")
EDITOR_TIME_PICKER_HIGHLIGHTED_CONTAINER = ('xpath', "//p-inputmask[@id='jump-to__input' and contains(@class,'focus')]")
EDITOR_REALTIME_MARKER = ('xpath', "//span[@class='realtime-marker__head-box-time']")
EDITOR_REALTIME_MARKER_CONTAINER = ('xpath', "//div[contains(@class,'realtime-marker realtime-marker--sticky realtime-marker--no-box')]")
EDITOR_TIMELINE_OPTION_RESET = ('xpath', "//button[@aria-label='Reset']")
EDITOR_TIMELINE_OPTION_UNDO = ('xpath', "//button[@aria-label='Undo']")
EDITOR_TIMELINE_OPTION_REDO = ('xpath', "//button[@aria-label='Redo']")
EDITOR_TIMELINE_SET_IN = ('xpath', "//i[contains(@class,'kicon-pin_left')]")
EDITOR_TIMELINE_SET_OUT = ('xpath', "//i[contains(@class,'kicon-pin_right')]")
EDITOR_TIMELINE_SPLIT_ICON = ('xpath', "//button[@aria-label='Split']")
EDITOR_TIMELINE_DELETE_BUTTON = ('xpath', "//button[@aria-label='Delete']")
EDITOR_SAVE_BUTTON = ('xpath', "//button[@class='button--save ui-button-secondary default-button button--editor ui-button ui-widget ui-state-default ui-corner-all ui-button-text-only']")
EDITOR_SAVE_A_COPY_BUTTON = ('xpath', "//button[@class='save-as-button branded-button button--editor ui-button ui-widget ui-state-default ui-corner-all ui-button-text-only ng-star-inserted']")
EDITOR_SAVE_BUTTON_CONF = ('xpath', "//button[@class='button modal-footer-buttons__save branded-button ui-button ui-widget ui-state-default ui-corner-all ui-button-text-only']")
EDITOR_SAVED_MSG = ('xpath', "//strong[contains(.,'Media was successfully saved.')]")
EDITOR_SAVED_OK_MSG = ('xpath', "//button[contains(.,'OK')]")
EDITOR_CREATE_BUTTON = ('xpath', "//button[contains(.,'Create')]")
EDITOR_SUCCESS_MSG = ('xpath', "//p-header[contains(.,'Success')]")
EDITOR_TOTAL_TIME = ('xpath', "//span[@class='total-time']")
EDITOR_TOTAL_TIME_TOOLBAR = ('xpath', "//span[contains(@class,'toolbar__total-time')]")
EDITOR_GO_TO_MEDIA_PAGE_BUTTON = ('xpath', "//a[contains(.,'Media Page')]")
EDITOR_SEARCH_X_BUTTON = ('xpath', "//i[@class='v2ui-close-icon']")
KEA_EDITOR_MEDIA_DETAILS_SECTION = ('xpath', "//div[@class='media-details-pane']")
KEA_EDITOR_MEDIA_DETAILS_CONTAINER = ('xpath', "//div[contains(@class,'media-details-container')]")
KEA_ENTRY_NAME = ('xpath', "//span[@class='entry-name']")
KEA_TOGGLE_MENU_OPTION = ('xpath', "//span[contains(text(),'OPTION_NAME')]")
KEA_OPTION_NORMAL = ('xpath', "//label[contains(@class,'ng-star-inserted') and text()='OPTION_NAME']")
KEA_OPTION_ACTIVE = ('xpath', "//label[contains(@class,'ui-label-active') and text()='OPTION_NAME']")
KEA_OPTION_GRAYED_OUT = ('xpath', "//label[contains(@class,'ui-label-disabled') and text()='OPTION_NAME']")
KEA_OPTION_INPUT_FIELD = ('xpath', "//input[@id='FIELD_NAME']")
KEA_OPTION_TEXTAREA_FIELD = ('xpath', "//textarea[@id='FIELD_NAME']")
KEA_PREVIEW_ICON = ('xpath', "//i[@class='kicon-preview']")
KEA_LOADING_SPINNER_CONTAINER = ('xpath', "//div[@class='spinner-container']")
KEA_LOADING_SPINNER_QUIZ_PLAYER = ('xpath', "//div[@id='loadingSpinner_quiz-player']")
KEA_PREVIEW_PLAY_BUTTON = ('xpath', "//a[@class='icon-play comp largePlayBtn largePlayBtnBorder']")
KEA_PREVIEW_CLOSE_BUTTON = ('xpath', '//i[contains(@class,"kCloseBtn")]')
KEA_IFRAME_PREVIEW_PLAYER = ('xpath', "//iframe[@class='ng-star-inserted' and contains(@src,'iframeembed=true&playerId=kaltura_player')]")
KEA_IFRAME_BLANK = ('xpath', "//iframe[@title='Kaltura Editor Application']")
KEA_QUIZ_OPTIONS_REVERT_TO_DEFAULT_BUTTON = ('xpath', "//button[@class='link-button pull-right ui-button ui-widget ui-state-default ui-corner-all ui-button-text-only']")
KEA_TIMELINE_SECTION_CONTAINER = ('xpath', "//div[@class='markers-container']")
KEA_TIMELINE_SECTION_QUESTION_BUBBLE_CONTAINER = ('xpath', "//div[@class='kea-timeline-stacked-item kea-timeline-cuepoint']")
KEA_TIMELINE_SECTION_QUESTION_BUBBLE = ('xpath', "//i[@class='kicon-quiz-cuepoint-inner']")
KEA_TIMELINE_SECTION_QUESTION_BUBBLE_TITLE = ('xpath', "//p[@class='question-tooltip__content']")
KEA_TIMELINE_SECTION_QUESTION_BUBBLE_QUESTION_NUMBER = ('xpath', "//span[@class='question-tooltip__header__content']")
KEA_TIMELINE_SECTION_QUESTION_BUBBLE_QUESTION_TIMESTAMP = ('xpath', "//span[@class='question-tooltip__header__duration']")
KEA_TIMELINE_SECTION_TOTAL_QUESTION_NUMBER = ('xpath', "//span[@class='ng-tns-c14-1 ng-star-inserted' and contains(text(),'Total Q: QUESTION_NUMBER')]")
KEA_TIMELINE_PRESENTED_SECTIONS = ('xpath', "//div[contains(@class,'kea-timeline-stacked-item') and contains(@style,'background-image')]")
KEA_TIMELINE_SECTION_DRAG_HAND = ('xpath', "//div[@class='answer-drag-handle']")
KEA_PLAYER_CONTROLS_PLAY_BUTTON = ('xpath', "//button[@class='player-control player-control__play-pause' and @aria-label='Play']")
KEA_PLAYER_CONTROLS_PAUSE_BUTTON = ('xpath', "//button[@class='player-control player-control__play-pause' and @aria-label='Pause']")
KEA_PLAYER_CONTROLS_NEXT_ARROW_BUTTON = ('xpath', "//span[@class='arrows arrow-next']")
KEA_PLAYER_CONTROLS_PREVIOUS_ARROW_BUTTON = ('xpath', "//span[@class='arrows arrow-back']")
KEA_TIMELINE_CONTROLS_ZOOM_LEVEL_POINTER = ('xpath', "//span[@class='ui-slider-handle ui-state-default ui-corner-all ui-clickable ng-star-inserted']")
KEA_TIMELINE_CONTROLS_ZOOM_LEVEL_POINTER_VALUE = ('xpath', "//span[@class='ui-slider-handle ui-state-default ui-corner-all ui-clickable ng-star-inserted' and @style='left: VALUE%;']")
KEA_TIMELINE_CONTROLS_ZOOM_OUT_BUTTON = ('xpath', "//button[contains(@class,'zoom_button') and @aria-label='Zoom out']")
KEA_TIMELINE_CONTROLS_ZOOM_IN_BUTTON = ('xpath', "//button[contains(@class,'zoom_button') and @aria-label='Zoom in']")
KEA_TIMELINE_CONTROLS_ZOOM_IN_TOOLTIP = ('xpath', "//div[@class='ui-tooltip-text ui-shadow ui-corner-all' and text()='Zoom in']")
KEA_TIMELINE_CONTROLS_PLAY_BUTTON = ('xpath', "//div[contains(@class,'ui-tooltip-text ui-shadow ui-corner-all')]")
KEA_CONFIRMATION_POP_UP_CONTINUE = ('xpath', "//button[contains(@class,'button') and text()='Continue']")
KEA_CONFIRMATION_POP_UP_TITLE = ('xpath', "//div[@class='kErrorMessageTitle']")
KEA_CONFIRMATION_POP_UP_CONTAINER = ('xpath', "//div[@class='kErrorMessage']")
KEA_CONFIRMATION_POP_UP_CANCEL_BUTTON = ('xpath', "//button[contains(@class,'button--cancel') and text()='Cancel']")
KEA_CONFIRMATION_POP_UP_OK_BUTTON = ('xpath', "//button[contains(@class,'button--ok') and text()='OK']")
KEA_HOTSPOTS_URL_INPUT_ERROR = ('xpath', '//div[contains(@class,"url-input")]')
KEA_HOTSPOTS_TAB = ('xpath', "//a[@class='nav-button' and @aria-label='Hotspots']")
KEA_HOTSPOTS_TAB_ACTIVE = ('xpath', "//a[@class='nav-button active' and @aria-label='Hotspots']")
KEA_HOTSPOTS_ADD_NEW_BUTTON = ('xpath', '//span[@class="ui-button-text ui-clickable" and text()="Add Hotspot"]')
KEA_HOTSPOTS_DONE_BUTTON_ADVANCED_SETTINGS = ('xpath', '//span[@class="ui-button-text ui-clickable" and text()="Done"]')
KEA_HOTSPOTS_DONE_BUTTON_NORMAL = ('xpath', '//button[contains(@class,"btn btn-save pull-right")]')
KEA_HOTSPOTS_SAVE_BUTTON = ('xpath', '//span[@class="ui-button-text ui-clickable" and text()="Save"]')
KEA_HOTSPOTS_SAVE_BUTTON_PARENT = ('xpath', '//button[contains(@class,"ui-button-text-only")]')
KEA_HOTSPOTS_CANCEL_BUTTON = ('xpath', '//span[@class="ui-button-text ui-clickable" and text()="Cancel"]')
KEA_HOTSPOTS_ADVANCED_SETTINGS = ('xpath', '//button[@class="form-button" and text()="Advanced Settings"]')
KEA_HOTSPOTS_TOOL_TIP_CREATION_CANCEL_BUTTON = ('xpath', '//button[contains(@class,"form-button") and text()="Cancel"]')
KEA_HOTSPOTS_TOOL_TIP_CREATION_CONTAINER = ('xpath', '//div[@class="form-horizontal"]')
KEA_HOTSPOTS_FORM_TEXT_INPUT_FIELD = ('xpath', '//input[@id="inputText"]')
KEA_HOTSPOTS_FORM_LINK_INPUT_FIELD = ('xpath', '//input[@id="inputUrl"]')
KEA_HOTSPOTS_FORM_LINK_INPUT_FIELD_TIME = ('xpath', '//input[@id="jumpTo"]')
KEA_HOTSPOTS_FORM_LINK_TYPE_URL = ('xpath', "//label[contains(@class,'click-type__label') and text()='URL']")
KEA_HOTSPOTS_FORM_LINK_TYPE_TIME = ('xpath', "//label[contains(@class,'click-type__label') and text()='Time in this video']")
KEA_HOTSPOTS_FORM_TEXT_STYLE = ('xpath', '//label[contains(@class,"ui-dropdown-label ui-inputtext")]')
KEA_HOTSPOTS_FORM_TEXT_STYLE_VALUE = ('xpath', '//span[contains(@class,"ng-star-inserted") and text()="TEXT_STYLE"]')
KEA_HOTSPOTS_FORM_COLOR = ('xpath', '//div[@class="sp-preview-inner"]')
KEA_HOTSPOTS_FORM_COLOR_VALUE = ('xpath', '//input[@class="sp-input"]')
KEA_HOTSPOTS_FORM_TEXT_SIZE = ('xpath', '//input[@id="textSize"]')
KEA_HOTSPOTS_FORM_ROUNDNESS = ('xpath', '//input[@id="roundness"]')
KEA_HOTSPOTS_FORM_LOCATION_X = ('xpath', '//input[@id="position-x"]')
KEA_HOTSPOTS_FORM_LOCATION_Y = ('xpath', '//input[@id="position-y"]')
KEA_HOTSPOTS_FORM_SIZE_WIDTH = ('xpath', '//input[@id="size-width"]')
KEA_HOTSPOTS_FORM_SIZE_HEIGHT = ('xpath', '//input[@id="size-height"]')
KEA_HOTSPOTS_FORM_START_TIME = ('xpath', '//input[@id="startTime"]')
KEA_HOTSPOTS_FORM_END_TIME = ('xpath', '//input[@id="endTime"]')
KEA_HOTSPOTS_LIST_HEADER = ('xpath', "//div[@class='panel__header']")
KEA_HOTSPOTS_LIST_CONTENT = ('xpath', "//div[@class='panel__content']")
KEA_HOTSPOTS_LIST_PANEL_HOTSPOT = ('xpath', "//kea-hotspots-list-item[contains(@class,'ng-star-inserted')]")
KEA_HOTSPOTS_PLAYER_BUTTON = ('xpath', "//div[@class='hotspot__button']")
KEA_HOTSPOTS_PLAYER_HOTSPOT_CONTAINER = ('xpath', "//div[contains(@class,'hotspot__container ui-draggable ui-draggable-handle')]")
KEA_HOTSPOTS_PLAYER_HOTSPOT_CONTAINER_SELECTED = ('xpath', "//div[contains(@class,'selected ui-resizable')]")
KEA_HOTSPOTS_PLAYER_ADD_HOTSPOT_TOOLTIP = ('xpath', "//span[@class='message__text']")
KEA_HOTSPOTS_PANEL_ITEM_TITLE = ('xpath', "//div[contains(@class,'panel-item__title')]")
KEA_HOTSPOTS_PANEL_ITEM_LINK = ('xpath', "//div[contains(@class,'panel-item__link')]")
KEA_HOTSPOTS_PANEL_MORE_HAMBURGER_MENU = ('xpath', "//i[@class='kicon-more']")
KEA_HOTSPOTS_PANEL_ACTION_MENU_CONTAINER = ('xpath', "//div[contains(@class,'hotspot-action ui-menu ui-widget')]")
KEA_HOTSPOTS_PANEL_ACTION_MENU_DUPLICATE = ('xpath', "//span[@class='ui-menuitem-text' and text()='Duplicate']")
KEA_HOTSPOTS_PANEL_ACTION_MENU_EDIT = ('xpath', "//span[@class='ui-menuitem-text' and text()='Edit']")
KEA_HOTSPOTS_PANEL_ACTION_MENU_DELETE = ('xpath', "//span[@class='ui-menuitem-text' and text()='Delete']")
KEA_HOTSPOTS_DELETE_POP_UP_CONFIRMATION_BUTTON = ('xpath', "//button[contains(@class,'ng-star-inserted') and text()='Delete Hotspot']")
KEA_TIMELINE_SECTION_HOTSPOT_CONTAINER = ('xpath', '//div[contains(@class,"kea-timeline-stacked-item kea-timeline-stacked-item--audio-disabled")]')
KEA_TIMELINE_SECTION_HOTSPOT_DRAG_CONTAINER_RIGHT = ('xpath', '//div[contains(@class,"handle--right content-item__handle--selected")]')
KEA_TIMELINE_SECTION_HOTSPOT_DRAG_CONTAINER_LEFT = ('xpath', '//div[contains(@class,"handle--left content-item__handle--selected")]')
KEA_TIMELINE_SECTION_HOTSPOT_TRIM_EDGE_BUTTONS = ('xpath', '//i[contains(@class,"kicon-trim_handle content-item__handle-icon")]')
KEA_PLAYER_CONTAINER = ('xpath', '//div[@class="player-container"]')
KEA_ADD_NEW_OPEN_QUESTION_BUTTON = ('xpath', "//button[contains(@class,'open-question-question-type')]")
KEA_ADD_NEW_OPEN_QUESTION_BUTTON_ACTIVE = ('xpath', "//button[contains(@class,'open-question-question-type ng-star-inserted active')]")
KEA_ALLOW_MULTIPLE_ATTEMPTS_OPTION_GRAYED_OUT = ('xpath', '//label[@class="ui-chkbox-label ng-star-inserted" and text()="Allow Multiple Attempts"]')
KEA_NUMBER_OF_ALLOW_ATTEMPTS = ('xpath', '//input[@name="attemptsAllowed"]')
KEA_SCORE_TYPE_DROP_DOWN = ('xpath', '//label[contains(@class,"ui-dropdown-label ui-inputtext ui-corner-all ng-star-inserted")]')
KEA_SCORE_TYPE_OPTION = ('xpath', '//span[contains(@class, "ng-star-inserted") and text()="SCORE_TYPE"]')
#============================================================================================================
# @Author: Inbar Willman
def navigateToEditorMediaSelection(self, forceNavigate = False):
# Check if we are already in my media selection page
if forceNavigate == False:
if self.verifyUrl(localSettings.LOCAL_SETTINGS_KMS_MEDIA_SELECTION_URL, False, 1) == True:
writeToLog("INFO","Success Already in media selection page")
return True
# Click on add new drop down
if self.clsCommon.upload.addNewVideoQuiz() == False:
writeToLog("INFO","Failed to navigate to media selection page")
return False
return True
# @Author: Inbar Willman
# This method search for entry in media selection page and then opening KEA for the selected entry
def searchAndSelectEntryInMediaSelection(self, entryName, forceNavigate=True):
# Navigate to media selection page
if self.navigateToEditorMediaSelection(forceNavigate) == False:
return False
# Click on search bar and search for entry
self.clsCommon.myMedia.getSearchBarElement().click()
self.clsCommon.myMedia.getSearchBarElement().send_keys('"' + entryName + '"')
self.clsCommon.general.waitForLoaderToDisappear()
sleep(6)
if self.wait_element(self.clsCommon.myMedia.MY_MEDIA_NO_ENTRIES_FOUND, 1, True) != False:
writeToLog("INFO", "FAILED to find the " + entryName + " within the first try...")
sleep(10)
if self.click(self.EDITOR_SEARCH_X_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to click on the X button in order to clear the search")
return False
self.clsCommon.general.waitForLoaderToDisappear()
self.clsCommon.myMedia.getSearchBarElement().click()
self.clsCommon.myMedia.getSearchBarElement().send_keys('"' + entryName + '"')
self.clsCommon.general.waitForLoaderToDisappear()
if self.wait_element(self.clsCommon.myMedia.MY_MEDIA_NO_ENTRIES_FOUND, 1, True) != False:
writeToLog("INFO", "FAILED, the " + entryName + " couldn't be found inside the Editor after two tries")
return False
# Click on select button in order to open KEA
if self.click(self.KEA_SELECT_VIDEO_FOR_EDIT) == False:
writeToLog("INFO","FAILED to select entry and open KEA")
return False
sleep(4)
# Verify that we are in KEA page and app is displayed
if self.wait_visible(self.KEA_APP_DISPLAY, 40) == False:
writeToLog("INFO","FAILED to display KEA page")
return False
if self.wait_while_not_visible(self.KEA_LOADING_CONTAINER, 60) == False:
writeToLog("INFO", "FAILED to wait until the KEA Loading container disappeared")
return False
return True
# @Author: Inbar Willman
def startQuiz(self):
self.switchToKeaIframe()
# Click start button to start quiz
if self.keaQuizClickButton(enums.KeaQuizButtons.START) == False:
writeToLog("INFO","FAILED to click start quiz")
return False
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER, 50) == False:
writeToLog("INFO","FAILED to wait until spinner isn't visible")
return False
return True
# @Author: Inbar Willman
def addQuizQuestion(self, questionText, answerText, additionalAnswerList):
# if self.startQuiz() == False:
# writeToLog("INFO","FAILED to click start quiz")
# return False
sleep(60)
self.switchToKeaIframe()
# Enter in 'Multiple Choice' KEA Quiz Question screen
if self.selectQuestionType(enums.QuizQuestionType.Multiple) == False:
writeToLog("INFO", "FAILED to enter in " + enums.QuizQuestionType.Multiple.value + " Quiz Question screen")
return False
# Add question fields
if self.fillQuizFields(questionText, answerText, additionalAnswerList) == False:
writeToLog("INFO","FAILED to fill question fields")
return False
# Save Question
if self.saveQuizChanges() == False:
writeToLog("INFO", "FAILED to save the changes")
return False
return True
# @Author: Inbar Willman
def switchToKeaIframe(self):
if localSettings.TEST_CURRENT_IFRAME_ENUM == enums.IframeName.KEA:
return True
else:
localSettings.TEST_CURRENT_IFRAME_ENUM = enums.IframeName.KEA
self.swith_to_iframe(self.KEA_IFRAME)
return True
# @Author: Inbar Willman
def switchToKeaQuizPlayer(self):
if localSettings.TEST_CURRENT_IFRAME_ENUM == enums.IframeName.KEA_QUIZ_PLAYER:
return True
else:
localSettings.TEST_CURRENT_IFRAME_ENUM = enums.IframeName.KEA_QUIZ_PLAYER
self.swith_to_iframe(self.KEA_QUIZ_PLAYER)
return True
# @Author: Inbar Willman
def keaQuizClickButton(self, buttonName):
tmpButton = (self.KEA_QUIZ_BUTTON[0], self.KEA_QUIZ_BUTTON[1].replace('BUTTON_NAME', buttonName.value))
if self.wait_visible(tmpButton, 60, True) == False:
writeToLog("INFO", "FAILED to display " + buttonName.value + " button")
return False
if self.click(tmpButton) == False:
writeToLog("INFO","FAILED to click on " + buttonName.value + " button")
return False
return True
# @Author: Inbar Willman
def fillQuizFields(self, questionText, answerText, additionalAnswerList=''):
# Fill question text
if self.click(self.KEA_QUIZ_QUESTION_FIELD) == False:
writeToLog("INFO","FAILED to click on question text field")
return False
if self.send_keys(self.KEA_QUIZ_QUESTION_FIELD, questionText) == False:
writeToLog("INFO","FAILED to fill question field")
return False
# Fill First answer
tmpFirstAnswer = (self.KEA_QUIZ_ANSWER[0], self.KEA_QUIZ_ANSWER[1].replace('ANSWER_NUMBER', 'answer-text0'))
if self.click(tmpFirstAnswer) == False:
writeToLog("INFO","FAILED to click on first answer text field")
return False
if self.send_keys(tmpFirstAnswer, answerText) == False:
writeToLog("INFO","FAILED to fill first answer text field")
return False
# We verify if we want to user additional answers
if additionalAnswerList != '':
# Fill second answer if there are just two answers
if len(additionalAnswerList) == 1:
tmpSecondAnswer = (self.KEA_QUIZ_ANSWER[0], self.KEA_QUIZ_ANSWER[1].replace('ANSWER_NUMBER', 'answer-text1'))
if self.click(tmpSecondAnswer) == False:
writeToLog("INFO","FAILED to click on second answer text field")
return False
if self.send_keys(tmpSecondAnswer, additionalAnswerList[0]) == False:
writeToLog("INFO","FAILED to fill second answer text field")
return False
else:
i = 1
for answer in additionalAnswerList:
tmpAnswer = (self.KEA_QUIZ_ANSWER[0], self.KEA_QUIZ_ANSWER[1].replace('ANSWER_NUMBER', 'answer-text' + str(i)))
if self.click(tmpAnswer) == False:
writeToLog("INFO","FAILED to click on " + i +"th answer text field")
return False
if self.send_keys(tmpAnswer, answer) == False:
writeToLog("INFO","FAILED to fill " + i +"th answer text field")
return False
# Check if in the last answer, if not click add quiz button
if len(additionalAnswerList) != i:
if self.click(self.KEA_QUIZ_ADD_ANSWER_BUTTON) == False:
writeToLog("INFO","FAILED click add answer button")
return False
i = i + 1
return True
# @Author: Inbar Willman
# After creating quiz, click done.
# After that there are two options - Click 'Go to media page' or 'Edit Quiz'.
# Default value is 'Go To Media page'
def clickDone(self, doneOption=enums.KeaQuizButtons.GO_TO_MEDIA_PAGE):
if self.keaQuizClickButton(enums.KeaQuizButtons.DONE) == False:
writeToLog("INFO","FAILED to click Done button")
return False
# if self.wait_while_not_visible(self.KEA_LOADING_SPINNER, 30) == False:
# writeToLog("INFO","FAILED to wait until spinner isn't visible")
# return False
# Until we catch the locator of the overlay we are going to use sleep
sleep(5)
if doneOption == enums.KeaQuizButtons.GO_TO_MEDIA_PAGE:
if self.keaQuizClickButton(enums.KeaQuizButtons.GO_TO_MEDIA_PAGE) == False:
writeToLog("INFO","FAILED to click go to media page button")
return False
self.switch_to_default_content()
elif doneOption == enums.KeaQuizButtons.EDIT_QUIZ:
if self.keaQuizClickButton(enums.KeaQuizButtons.EDIT_QUIZ) == False:
writeToLog("INFO","FAILED to click edit quiz button")
return False
sleep(3)
else:
writeToLog("INFO","FAILED, unknown doneoption: '" + doneOption + "'")
return False
sleep (8)
return True
# @Author: Inbar Willman
# The function check and verify that the entries sort in my media are in the correct order
def verifySortInEditor(self, sortBy, entriesList):
if self.clsCommon.myMedia.SortAndFilter(enums.SortAndFilter.SORT_BY,sortBy) == False:
writeToLog("INFO","FAILED to sort entries")
return False
if self.clsCommon.myMedia.showAllEntries(searchIn=enums.Location.EDITOR_PAGE) == False:
writeToLog("INFO","FAILED to show all entries in editor page")
return False
sleep(10)
try:
entriesInMyMedia = self.wait_visible(self.EDITOR_TABLE).text.lower()
except NoSuchElementException:
writeToLog("INFO","FAILED to get entries list in galley")
return False
entriesInMyMedia = entriesInMyMedia.split("\n")
# run over the list and delete tab before the entry name
for idx, entry in enumerate(entriesInMyMedia):
entriesInMyMedia[idx] = entry.lstrip()
if self.clsCommon.myMedia.verifySortOrder(entriesList, entriesInMyMedia) == False:
writeToLog("INFO","FAILED ,sort by '" + sortBy.value + "' isn't correct")
return False
writeToLog("INFO","Success, My media sort by '" + sortBy.value + "' was successful")
return True
# @Author: Inbar Willman
# The function check and verify that the entries sort in my media are in the correct order
def verifyFiltersInEditor(self, entriesDict, noEntriesExpected=False):
if noEntriesExpected == True:
if self.wait_element(self.clsCommon.myMedia.ENTRY_NO_MEDIA_FOUND_MESSAGE, 1, multipleElements=True) != False:
writeToLog("INFO", "PASSED, no entries are displayed")
return True
else:
writeToLog("INFO", "Some entries are present, we will verify the dictionaries")
if self.clsCommon.myMedia.showAllEntries(searchIn=enums.Location.EDITOR_PAGE) == False:
writeToLog("INFO","FAILED to show all entries in editor page")
return False
sleep(10)
try:
entriesInEditor = self.wait_visible(self.EDITOR_TABLE).text.lower()
except NoSuchElementException:
writeToLog("INFO","FAILED to get entries list in galley")
return False
for entry in entriesDict:
#if entry[1] == True:
if entriesDict[entry] == True:
#if entry[0].lower() in entriesInAddToChannel == False:
if (entry.lower() in entriesInEditor) == False:
writeToLog("INFO","FAILED, entry '" + entry + "' wasn't found in editor page although he need to be found")
return False
#elif entry[1] == False:
if entriesDict[entry] == False:
# if entry[0].lower() in entriesInAddToChannel == True:
if (entry.lower() in entriesInEditor) == True:
writeToLog("INFO","FAILED, entry '" + entry + "' was found in editor page although he doesn't need to be found")
return False
writeToLog("INFO","Success, Only the correct media display in channel - pending tab")
return True
# @Author: Tzachi guetta
def launchKEA(self, entryName, navigateTo, navigateFrom, isCreateClippingPermissionIsOn=False):
if isCreateClippingPermissionIsOn == False:
if self.clsCommon.navigateTo(navigateTo, navigateFrom, entryName) == False:
return False
if navigateTo == enums.Location.EDIT_ENTRY_PAGE:
if self.click(self.KEA_LAUNCH) == False:
writeToLog("INFO","FAILED to click on KEA launch button")
return False
elif navigateTo == enums.Location.ENTRY_PAGE:
if self.clsCommon.entryPage.waitTillMediaIsBeingProcessed() == False:
writeToLog("INFO", "FAILED to wait until the " + entryName + " has been processed")
return False
self.click(self.clsCommon.entryPage.ENTRY_PAGE_DETAILS_BUTTON)
self.get_body_element().send_keys(Keys.PAGE_DOWN)
sleep(4)
if self.click(self.clsCommon.entryPage.ENTRY_PAGE_ACTIONS_DROPDOWNLIST) == False:
writeToLog("INFO","FAILED to click on Actions button (at entry page)")
return False
sleep(3.5)
if isCreateClippingPermissionIsOn == True:
if self.click(self.clsCommon.entryPage.ENTRY_PAGE_ACTIONS_DROPDOWNLIST_CREATE_CLIP_BUTTON) == False:
writeToLog("INFO","FAILED to click on Actions -> 'Create clip' button (at entry page)")
else:
if self.click(self.clsCommon.entryPage.ENTRY_PAGE_ACTIONS_DROPDOWNLIST_KEA_BUTTON) == False:
writeToLog("INFO","FAILED to click on Actions -> Launch KEA button (at entry page)")
return False
# Verify that we are in KEA page and app is displayed
if self.wait_visible(self.KEA_APP_DISPLAY, 40) == False:
writeToLog("INFO","FAILED to display KEA page")
return False
#sleeping two seconds in order to make sure that the loading screen is no longer present
sleep(2)
if isCreateClippingPermissionIsOn == True:
if self.verifyEditorForClippingPermission() == False:
writeToLog("INFO","FAILED to display just relevant editor buttons")
return False
sleep(3)
# We wait until the KEA page is successfully loaded
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER, 45) == False:
writeToLog("INFO","FAILED to wait until spinner isn't visible")
return False
sleep(5)
if self.wait_while_not_visible(self.KEA_MEDIA_IS_BEING_PROCESSED, 120) == False:
writeToLog("INFO", "FAILED to wait until the " + entryName + " has been processed during the launch kea")
return False
writeToLog("INFO","Success, KEA has been launched for: " + entryName)
return True
# @Author: Tzachi guetta
# interface to KEA's timeline functionalities: split, (Fade IN\out - TBD)
def editorTimelineActions(self, startTime, endTime='', openEditorTab=False, timelineAction=None):
self.switchToKeaIframe()
if openEditorTab == True:
if self.click(self.KEA_EDITOR_TAB, 45) == False:
writeToLog("INFO","FAILED to click on Editor Tab")
return False
if self.setEditorStartTime(startTime) == False:
writeToLog("INFO", "FAILED to select the split start point")
return False
sleep(1)
if timelineAction == enums.KeaEditorTimelineOptions.DELETE:
if self.click(self.EDITOR_TIMELINE_SPLIT_ICON) == False:
writeToLog("INFO","FAILED to click Split icon (time-line)")
return False
sleep(1)
if self.setEditorStartTime(endTime) == False:
return False
sleep(1)
if self.click(self.EDITOR_TIMELINE_SPLIT_ICON) == False:
writeToLog("INFO","FAILED to click Split icon (time-line)")
return False
sleep(1)
if self.click(self.EDITOR_TIMELINE_DELETE_BUTTON) == False:
writeToLog("INFO","FAILED to click delete icon (time-line)")
return False
elif timelineAction == enums.KeaEditorTimelineOptions.SPLIT:
if self.click(self.EDITOR_TIMELINE_SPLIT_ICON) == False:
writeToLog("INFO","FAILED to click Split icon (time-line)")
return False
sleep(1)
elif timelineAction == enums.KeaEditorTimelineOptions.SET_IN:
if self.click(self.EDITOR_TIMELINE_SET_IN, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Set In option")
return False
elif timelineAction == enums.KeaEditorTimelineOptions.SET_OUT:
if self.click(self.EDITOR_TIMELINE_SET_OUT, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Set Out option")
return False
else:
writeToLog("INFO", "FAILED, please make sure that you used a supported timeline action")
return False
writeToLog("INFO", "Timeline section has been successfully " + timelineAction.value + " edited")
return True
# @Author: Oleg Sigalov
# Helper method to set the start time in KEA editor, under the left bottom player corner
# splitStartTime: String to set the time, example: "00:10" represents 10 seconds
def setEditorStartTime(self, splitStartTime):
if self.click(self.EDITOR_TIME_PICKER) == False:
writeToLog("INFO","FAILED to click on input field")
return False
# send_keys doesn't work, use instead:
self.driver.execute_script("arguments[0].value='" + splitStartTime + "'", self.wait_element(self.EDITOR_TIME_PICKER))
if self.send_keys(self.EDITOR_TIME_PICKER, Keys.ENTER) == False:
writeToLog("INFO","FAILED to send Enter to input field")
return False
# Verify marker moved correctly
markerElement = self.wait_element(self.EDITOR_REALTIME_MARKER)
if markerElement == False:
writeToLog("INFO","FAILED to get the marker element")
return False
if markerElement.text != splitStartTime + ".00":
writeToLog("INFO","FAILED to set marker to:" + splitStartTime)
return False
return True
# @Author: Tzachi guetta
# Currently support split only
# expectedEntryDuration = the duration of the new entry
def trimEntry(self, entryName, startTime, endTime, expectedEntryDuration, navigateTo, navigateFrom, openEditorTab=False, isCreateClippingPermissionIsOn=False, timelineAction=enums.KeaEditorTimelineOptions.DELETE):
if self.launchKEA(entryName, navigateTo, navigateFrom, isCreateClippingPermissionIsOn) == False:
writeToLog("INFO","Failed to launch KEA for: " + entryName)
return False
sleep(2)
if localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST != enums.Application.MEDIA_SPACE:
self.click(self.clsCommon.kafGeneric.KAF_REFRSH_BUTTON)
else:
self.refresh()
sleep(3)
self.switchToKeaIframe()
sleep(6)
if self.editorTimelineActions(startTime, endTime, openEditorTab, timelineAction) == False:
writeToLog("INFO","FAILED to split the entry: " + str(entryName))
return False
sleep(1)
if self.saveEditorChanges(saveCopy=False)== False:
writeToLog("INFO","FAILED to save the entry changes from KEA Editor Timeline")
return False
entryDuration = self.get_element_text(self.EDITOR_TOTAL_TIME, 10)
self.switch_to_default_content()
if expectedEntryDuration in entryDuration:
writeToLog("INFO","Success, Entry: " + entryName +", was trimmed, the new entry Duration is: " + expectedEntryDuration)
return True
writeToLog("INFO","FAILED, Entry: " + entryName +", was trimmed, but the new entry Duration is not as expected : " + entryDuration + " instead of :" + expectedEntryDuration)
return False
# @Author: Horia Cus
# Show all entries in quiz page
def showAllEntriesInAddQuizPage(self, timeOut=60):
# Get all entries in results
try:
tmpResultsList = self.get_elements(self.clsCommon.globalSearch.GLOBAL_SEARCH_ENTRY_RESUTLT_ROW)
except NoSuchElementException:
writeToLog("INFO","FAILED to get entries in results")
return False
if len(tmpResultsList) < 4:
writeToLog("INFO","Success, All media in global page are displayed")
return True
else:
self.clsCommon.sendKeysToBodyElement(Keys.END)
wait_until = datetime.datetime.now() + datetime.timedelta(seconds=timeOut)
while wait_until > datetime.datetime.now():
if self.is_present(self.clsCommon.globalSearch.GLOBAL_SEARCH_NO_RESULTS_ALERT_QUIZ, 2) == True:
writeToLog("INFO","Success, All media in global page are displayed")
sleep(1)
# go back to the top of the page
self.clsCommon.sendKeysToBodyElement(Keys.HOME)
return True
self.clsCommon.sendKeysToBodyElement(Keys.END)
writeToLog("INFO","FAILED to show all media")
return False
# @Author: Horia Cus
# The function check the the entries in my media are filter correctly
def verifyFiltersInAddQuizPage(self, entriesDict, noEntriesExpected=False):
if noEntriesExpected == True:
if self.wait_element(self.clsCommon.myMedia.ENTRY_NO_MEDIA_FOUND_MESSAGE, 1, multipleElements=True) != False:
writeToLog("INFO", "PASSED, no entries are displayed")
return True
else:
writeToLog("INFO", "Some entries are present, we will verify the dictionaries")
if self.showAllEntriesInAddQuizPage() == False:
writeToLog("INFO","FAILED to show all entries in global page")
return False
try:
# Get list of all entries element in results
entriesInGlobalPage = self.get_elements(self.clsCommon.globalSearch.GLOBAL_SEARCH_ENTRY_RESUTLT_NAME)
listOfEntriesInResults = []
# Get text of each entry element and add to a new list
for entry in entriesInGlobalPage:
entry.text.lower()
listOfEntriesInResults.append(entry.text.lower())
except NoSuchElementException:
writeToLog("INFO","FAILED to get entries list")
return False
for entry in entriesDict:
#if entry[1] == True:
if entriesDict[entry] == True:
#if entry[0].lower() in entriesInMyMedia == False:
if (entry.lower() in listOfEntriesInResults) == False:
writeToLog("INFO","FAILED, entry '" + entry + "' wasn't found in global page results although he need to be found")
return False
#elif entry[1] == False:
if entriesDict[entry] == False:
# if entry[0].lower() in entriesInMyMedia == True:
if (entry.lower() in listOfEntriesInResults) == True:
writeToLog("INFO","FAILED, entry '" + entry + "' was found in global page results although he doesn't need to be found")
return False
writeToLog("INFO","Success, Only the correct media display in global page")
return True
# @Author: Tzachi guetta
# Currently support split only
# expectedEntryDuration = the duration of the new entry
def clipEntry(self, entryName, startTime, endTime, expectedEntryDuration, navigateTo, navigateFrom, openEditorTab=False, isCreateClippingPermissionIsOn=False, timelineAction=enums.KeaEditorTimelineOptions.DELETE):
if self.launchKEA(entryName, navigateTo, navigateFrom, isCreateClippingPermissionIsOn) == False:
writeToLog("INFO","Failed to launch KEA for: " + entryName)
return False
sleep(2)
if localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST != enums.Application.MEDIA_SPACE:
self.click(self.clsCommon.kafGeneric.KAF_REFRSH_BUTTON)
else:
self.refresh()
sleep(3)
self.switchToKeaIframe()
sleep(6)
if self.editorTimelineActions(startTime, endTime, openEditorTab, timelineAction) == False:
return False
sleep(1)
if self.saveEditorChanges(saveCopy=True) == False:
writeToLog("INFO","FAILED to save a copy based on the changes that were performed to the KEA Editor timeline")
return False
sleep(1)
if self.click(self.EDITOR_GO_TO_MEDIA_PAGE_BUTTON) == False:
writeToLog("INFO","FAILED to click on 'Go to Media Page' button")
return False
if localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST != enums.Application.MEDIA_SPACE:
# Set iframe is player, to make sure switchToKAFIframeGeneric() will switch to default and then to KAF iframe
localSettings.TEST_CURRENT_IFRAME_ENUM = enums.IframeName.PLAYER
self.clsCommon.kafGeneric.switchToKAFIframeGeneric()
else:
self.switch_to_default_content()
if self.clsCommon.entryPage.waitTillMediaIsBeingProcessed() == False:
writeToLog("INFO","FAILED to wait Till Media Is Being Processed")
return False
#If the entry is Quiz, So openEditorTab = True, Then - wait 10sec refresh and wait again
if openEditorTab == True:
sleep(10)
self.refresh()
sleep(5)
self.clsCommon.player.switchToPlayerIframe()
entryDuration = self.get_element(self.clsCommon.player.PLAYER_TOTAL_VIDEO_LENGTH).text
self.switch_to_default_content()
if expectedEntryDuration in entryDuration:
writeToLog("INFO","Success, Entry: " + entryName +", was clipped, the new entry Duration is: " + expectedEntryDuration)
return True
writeToLog("INFO","FAILED, Entry: " + entryName +", was clipped, but the new entry Duration is not as expected : " + entryDuration + " instead of :" + expectedEntryDuration)
return False
# def quizCreation(self, entryName, dictQuestions, dictDetails='', dictScores='', dictExperience='', timeout=15):
# sleep(25)
# if self.searchAndSelectEntryInMediaSelection(entryName) == False:
# writeToLog("INFO", "FAILED to navigate to " + entryName)
# return False
# sleep(timeout)
#
# # We create the locator for the KEA Quiz Question title field area (used only in the "Reflection Point" and "True and False" Quiz Questions)
# questionField = (self.KEA_OPTION_TEXTAREA_FIELD[0], self.KEA_OPTION_TEXTAREA_FIELD[1].replace('FIELD_NAME', 'questionTxt'))
#
# for questionNumber in dictQuestions:
# questionDetails = dictQuestions[questionNumber]
#
# self.switchToKeaIframe()
# if self.wait_while_not_visible(self.KEA_LOADING_SPINNER, 30) == False:
# writeToLog("INFO","FAILED to wait until spinner isn't visible")
# return False
#
# # Specifying the time stamp, where the Quiz Question should be placed within the entry
# # click on the editor in order to higlight the timeline field and select all the text
# if self.click(self.EDITOR_TIME_PICKER, 1, True) == False:
# writeToLog("INFO", "FAILED to click on the kea timeline field")
# return False
#
# timestamp = questionDetails[0]
#
# # replace the text present in the timestamp field with the new one
# if self.send_keys(self.EDITOR_TIME_PICKER, timestamp + Keys.ENTER) == False:
# writeToLog("INFO", "FAILED to select the timeline field text")
# return False
#
# # Creating the variable for the Quiz Question Type
# qestionType = questionDetails[1]
# if qestionType == enums.QuizQuestionType.Multiple:
# # We enter in the KEA Quiz Question Type screen
# if self.selectQuestionType(qestionType) == False:
# writeToLog("INFO", "FAILED to enter in the " + qestionType.value + " Question screen")
# return False
#
# # Add question fields
# # We verify if we have only one question
# if questionDetails[4] != '':
# QuizQuestion1AdditionalAnswers = [questionDetails[4]]
#
# if questionDetails[5] != '':
# QuizQuestion1AdditionalAnswers.append(questionDetails[5])
#
# if questionDetails[6] != '':
# QuizQuestion1AdditionalAnswers.append(questionDetails[6])
#
# if len(QuizQuestion1AdditionalAnswers) >= 1:
# if self.fillQuizFields(questionDetails[2], questionDetails[3], QuizQuestion1AdditionalAnswers) == False:
# writeToLog("INFO","FAILED to fill question fields")
# return False
# else:
# writeToLog("INFO", "Please make sure that you supply at least two question answers")
# return False
#
# # we verify if the value for the 'Hint' is present in the list
# if len(questionDetails) >= 8:
# # we verify if we want to create a Hint for the current Quiz Question
# if questionDetails[7] != '':
# if self.createHintAndWhy(questionDetails[7], whyText='') == False:
# writeToLog("INFO", "FAILED to create a Hint for the " + questionDetails[2] + " Quiz Question")
# return False
# else:
# writeToLog("INFO", "No hint was given for the " + questionDetails[2] + " Quiz Question")
#
# # we verify if the value for the 'Why' is present in the list
# if len(questionDetails) >= 9:
# # we verify if we want to create a Why for the current Quiz Question
# if questionDetails[8] != '':
# sleep(2)
# if self.createHintAndWhy(hintText='', whyText=questionDetails[8]) == False:
# writeToLog("INFO", "FAILED to create a Why for the " + questionDetails[2] + " Quiz Question")
# return False
# else:
# writeToLog("INFO", "No hint was given for the " + questionDetails[2] + " Quiz Question")
# else:
# writeToLog("INFO", "No 'Hint' or 'Why' will be created for the " + questionDetails[2] + " Quiz Question")
#
# elif qestionType == enums.QuizQuestionType.REFLECTION:
# # We enter in the KEA Quiz Question Type screen
# if self.selectQuestionType(qestionType) == False:
# writeToLog("INFO", "FAILED to enter in the " + qestionType.value + " Question screen")
# return False
#
# # We select the KEA Quiz Question title field
# if self.click(questionField, 2, True) == False:
# writeToLog("INFO", "FAILED to select the reflection point text area")
# return False
#
# # We insert the title for the KEA Quiz Question type
# if self.send_keys(questionField, questionDetails[2], True) == False:
# writeToLog("INFO", "FAILED to insert the " + questionDetails[2] + " reflection point")
# return False
#
# # We make sure that no 'Hint' or 'Why' are trying to be created for 'Reflection Point' Quiz Question
# if len(questionDetails) >= 4:
# writeToLog("INFO", "Hint and Why are not supported for the Reflection Point Quiz Question")
# return False
#
# elif qestionType == enums.QuizQuestionType.TRUE_FALSE:
# # We enter in the KEA Quiz Question Type screen
# if self.selectQuestionType(qestionType) == False:
# writeToLog("INFO", "FAILED to enter in the " + qestionType.value + " Question screen")
# return False
#
# # We select the KEA Quiz Question title field
# if self.click(questionField, 2, True) == False:
# writeToLog("INFO", "FAILED to select the reflection point text area")
# return False
#
# #we insert the Question title inside the Question text area
# if self.send_keys(questionField, questionDetails[2], True) == False:
# writeToLog("INFO", "FAILED to insert the " + questionDetails[2] + " reflection point")
# return False
#
# # We insert the title for the KEA Quiz Question type
# if questionDetails[3] and questionDetails[4] != '':
# if self.click(self.KEA_ADD_NEW_ADD_QUESTION_TRUE_ANSWER_FIELD, 3, True) == False:
# writeToLog("INFO", "FAILED to select the 'True' text area field")
# return False
#
# if self.clear_and_send_keys(self.KEA_ADD_NEW_ADD_QUESTION_TRUE_ANSWER_FIELD, questionDetails[3], True)== False:
# writeToLog("INFO", "FAILED to insert the " + questionDetails[3] + " text within the 'True' field")
# return False
#
# if self.click(self.KEA_ADD_NEW_QUESTION_FALSE_ANSWER_FIELD, 3, True) == False:
# writeToLog("INFO", "FAILED to select the 'False' text area field")
# return False
#
# if self.clear_and_send_keys(self.KEA_ADD_NEW_QUESTION_FALSE_ANSWER_FIELD, questionDetails[4], True)== False:
# writeToLog("INFO", "FAILED to insert the " + questionDetails[4] + " text within the 'False' field")
# return False
#
# # we verify if the value for the 'Hint' is present in the list
# if len(questionDetails) >= 6:
# # we verify if we want to create a Hint for the current Quiz Question
# if questionDetails[5] != '':
# if self.createHintAndWhy(questionDetails[5], whyText='') == False:
# writeToLog("INFO", "FAILED to create a Hint for the " + questionDetails[2] + " Quiz Question")
# return False
# else:
# writeToLog("INFO", "No hint was given for the " + questionDetails[2] + " Quiz Question")
#
# # we verify if the value for the 'Why' is present in the list
# if len(questionDetails) == 7:
# # we verify if we want to create a Why for the current Quiz Question
# if questionDetails[6] != '':
# sleep(2)
# if self.createHintAndWhy(hintText='', whyText=questionDetails[6]) == False:
# writeToLog("INFO", "FAILED to create a Why for the " + questionDetails[2] + " Quiz Question")
# return False
# else:
# writeToLog("INFO", "No hint was given for the " + questionDetails[2] + " Quiz Question")
# else:
# writeToLog("INFO", "No 'Hint' or 'Why' will be created for the " + questionDetails[2] + " Quiz Question")
#
# # We verify that the KEA Quiz Question type is supported
# else:
# writeToLog("INFO", "FAILED, please make sure that you're using a support KEA Quiz Question type, using enums(e.g enums.QuizQuestionType.type)")
# return False
#
# # Save Question
# if self.saveQuizChanges() == False:
# writeToLog("INFO", "FAILED to save the changes")
# return False
#
# # Edit the KEA Quiz Section if necessary by enabling or disabling any KEA option from the KEA Details, Scores and Experience sections
# if dictDetails != '' or dictScores != '' or dictExperience != '':
# # We verify if we modify more than one option for the same KEA Section
# if type(dictDetails) is list:
# for option in dictDetails:
# if self.editQuizOptions(enums.KEAQuizSection.DETAILS, option, saveChanges=False, resumeEditing=False) == False:
# writeToLog("INFO", "FAILED to change the " + enums.KEAQuizSection.DETAILS.value + " KEA Section options")
# return False
#
# else:
# # We modify only one option for this specific KEA section
# if dictDetails != '':
# if self.editQuizOptions(enums.KEAQuizSection.DETAILS, option, saveChanges=False, resumeEditing=False) == False:
# writeToLog("INFO", "FAILED to change the " + enums.KEAQuizSection.DETAILS.value + " KEA Section options")
# return False
#
# # We verify if we modify more than one option for the same KEA Section
# if type(dictScores) is list:
# for option in dictScores:
# if self.editQuizOptions(enums.KEAQuizSection.SCORES, option, saveChanges=False, resumeEditing=False) == False:
# writeToLog("INFO", "FAILED to change the " + enums.KEAQuizSection.SCORES.value + " KEA Section options")
# return False
#
# else:
# # We modify only one option for this specific KEA section
# if dictScores != '':
# if self.editQuizOptions(enums.KEAQuizSection.SCORES, option, saveChanges=False, resumeEditing=False) == False:
# writeToLog("INFO", "FAILED to change the " + enums.KEAQuizSection.SCORES.value + " KEA Section options")
# return False
#
# # We verify if we modify more than one option for the same KEA Section
# if type(dictExperience) is list:
# for option in dictExperience:
# if self.editQuizOptions(enums.KEAQuizSection.EXPERIENCE, option, saveChanges=False, resumeEditing=False) == False:
# writeToLog("INFO", "FAILED to change the " + enums.KEAQuizSection.EXPERIENCE.value + " KEA Section options")
# return False
#
# else:
# # We modify only one option for this specific KEA section
# if dictExperience != '':
# if self.editQuizOptions(enums.KEAQuizSection.EXPERIENCE, option, saveChanges=False, resumeEditing=False) == False:
# writeToLog("INFO", "FAILED to change the " + enums.KEAQuizSection.EXPERIENCE.value + " KEA Section options")
# return False
#
# # We save all the changes performed from each KEA Section
# if self.saveKeaChanges(resumeEditing=True) == False:
# writeToLog("INFO", "FAILED to save the changes performed in the KEA Section")
# return False
# else:
# writeToLog("INFO", "No changes for the KEA Sections was performed")
#
# # Save the KEA Quiz entry and navigate to the entry page
# self.switchToKeaIframe()
# self.clickDone()
# return True
# @Author: Tzachi guetta & Horia Cus
# the following function will create a Quiz (within the given dictQuestions)
# Please follow the individual list structure for each Quiz Question type
# questionMultiple = ['00:10', enums.QuizQuestionType.Multiple, 'Question Title for Multiple Choice', 'question #1 option #1', 'question #1 option #2', 'question #1 option #3', 'question #1 option #4', 'Hint Text', 'Why Text']
# questionTrueAndFalse = ['00:15', enums.QuizQuestionType.TRUE_FALSE, 'Question Title for True and False', 'True text', 'False text', 'Hint Text', 'Why Text']
# questionReflection = ['00:20', enums.QuizQuestionType.REFLECTION, 'Question Title for Reflection Point', 'Hint Text', 'Why Text']
# dictQuestions = {'1':questionMultiple,'2':questionTrueAndFalse,'3':questionReflection}
# questionOpen = ['0:25', enums.QuizQuestionType.OPEN_QUESTION, 'Question title for Open-Q']
# If you want to change the answer order you can use this function: changeAnswerOrder
def quizCreation(self, entryName, dictQuestions, dictDetails='', dictScores='', dictExperience='', timeout=25):
sleep(25)
# Need this step in order to workaround an issue that may fail a test case after uploading an entry
if self.wait_element(self.clsCommon.upload.UPLOAD_PAGE_TITLE, 0.5, True) != False:
sleep(2)
if self.clsCommon.navigateTo(enums.Location.HOME) == False:
writeToLog("INFO", "FAILED to navigate to home page")
return False
if self.searchAndSelectEntryInMediaSelection(entryName) == False:
writeToLog("INFO", "FAILED to navigate to " + entryName)
return False
sleep(timeout)
if self.wait_while_not_visible(self.KEA_MEDIA_IS_BEING_PROCESSED, 120) == False:
writeToLog("INFO", "FAILED to process the " + entryName + " during the launch")
return False
self.switchToKeaIframe()
if self.wait_while_not_visible(self.KEA_QUIZ_LOADING_CONTAINER, 120) == False:
writeToLog("INFO", "FAILED to load the quiz screen")
return False
# We create the locator for the KEA Quiz Question title field area (used only in the "Reflection Point" and "True and False" and "Open-Q" Quiz Questions)
questionField = (self.KEA_OPTION_TEXTAREA_FIELD[0], self.KEA_OPTION_TEXTAREA_FIELD[1].replace('FIELD_NAME', 'questionTxt'))
for questionNumber in dictQuestions:
questionDetails = dictQuestions[questionNumber]
self.switchToKeaIframe()
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER, 45) == False:
writeToLog("INFO","FAILED to wait until spinner isn't visible")
return False
# Because D2L application doesn't properly display the entire Quiz screen we need to scroll down in order to select the time stamp field
if localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.D2L:
self.clsCommon.sendKeysToBodyElement(Keys.PAGE_DOWN)
sleep(2)
# Set the time position of the current quiz inside the timeline section
if self.setRealTimeMarkerToTime(questionDetails[0]) == False:
writeToLog("INFO", "FAILED to set the question " + questionDetails[2] + " at time location: " + questionDetails[0] )
return False
# Scroll back up if using D2L application
if localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST == enums.Application.D2L:
self.clsCommon.sendKeysToBodyElement(Keys.ARROW_UP, 4)
sleep(2)
# Creating the variable for the Quiz Question Type
qestionType = questionDetails[1]
if qestionType == enums.QuizQuestionType.Multiple:
# We enter in the KEA Quiz Question Type screen
if self.selectQuestionType(qestionType) == False:
writeToLog("INFO", "FAILED to enter in the " + qestionType.value + " Question screen")
return False
# Add question fields
# We verify if we have only one question
if questionDetails[4] != '':
QuizQuestion1AdditionalAnswers = [questionDetails[4]]
if questionDetails[5] != '':
QuizQuestion1AdditionalAnswers.append(questionDetails[5])
if questionDetails[6] != '':
QuizQuestion1AdditionalAnswers.append(questionDetails[6])
if len(QuizQuestion1AdditionalAnswers) >= 1:
if self.fillQuizFields(questionDetails[2], questionDetails[3], QuizQuestion1AdditionalAnswers) == False:
writeToLog("INFO","FAILED to fill question fields")
return False
else:
writeToLog("INFO", "Please make sure that you supply at least two question answers")
return False
# we verify if the value for the 'Hint' is present in the list
if len(questionDetails) >= 8:
# we verify if we want to create a Hint for the current Quiz Question
if questionDetails[7] != '':
if self.createHintAndWhy(questionDetails[7], whyText='') == False:
writeToLog("INFO", "FAILED to create a Hint for the " + questionDetails[2] + " Quiz Question")
return False
else:
writeToLog("INFO", "No hint was given for the " + questionDetails[2] + " Quiz Question")
# we verify if the value for the 'Why' is present in the list
if len(questionDetails) >= 9:
# we verify if we want to create a Why for the current Quiz Question
if questionDetails[8] != '':
sleep(2)
if self.createHintAndWhy(hintText='', whyText=questionDetails[8]) == False:
writeToLog("INFO", "FAILED to create a Why for the " + questionDetails[2] + " Quiz Question")
return False
else:
writeToLog("INFO", "No hint was given for the " + questionDetails[2] + " Quiz Question")
else:
writeToLog("INFO", "No 'Hint' or 'Why' will be created for the " + questionDetails[2] + " Quiz Question")
elif qestionType == enums.QuizQuestionType.REFLECTION:
# We enter in the KEA Quiz Question Type screen
if self.selectQuestionType(qestionType) == False:
writeToLog("INFO", "FAILED to enter in the " + qestionType.value + " Question screen")
return False
# We select the KEA Quiz Question title field
if self.click(questionField, 2, True) == False:
writeToLog("INFO", "FAILED to select the reflection point text area")
return False
# We insert the title for the KEA Quiz Question type
if self.send_keys(questionField, questionDetails[2], True) == False:
writeToLog("INFO", "FAILED to insert the " + questionDetails[2] + " reflection point")
return False
# We make sure that no 'Hint' or 'Why' are trying to be created for 'Reflection Point' Quiz Question
if len(questionDetails) >= 4:
writeToLog("INFO", "Hint and Why are not supported for the Reflection Point Quiz Question")
return False
elif qestionType == enums.QuizQuestionType.TRUE_FALSE:
# We enter in the KEA Quiz Question Type screen
if self.selectQuestionType(qestionType) == False:
writeToLog("INFO", "FAILED to enter in the " + qestionType.value + " Question screen")
return False
# We select the KEA Quiz Question title field
if self.click(questionField, 2, True) == False:
writeToLog("INFO", "FAILED to select the reflection point text area")
return False
#we insert the Question title inside the Question text area
if self.send_keys(questionField, questionDetails[2], True) == False:
writeToLog("INFO", "FAILED to insert the " + questionDetails[2] + " reflection point")
return False
# We insert the title for the KEA Quiz Question type
if questionDetails[3] and questionDetails[4] != '':
if self.click(self.KEA_ADD_NEW_ADD_QUESTION_TRUE_ANSWER_FIELD, 3, True) == False:
writeToLog("INFO", "FAILED to select the 'True' text area field")
return False
if self.clear_and_send_keys(self.KEA_ADD_NEW_ADD_QUESTION_TRUE_ANSWER_FIELD, questionDetails[3], True)== False:
writeToLog("INFO", "FAILED to insert the " + questionDetails[3] + " text within the 'True' field")
return False
if self.click(self.KEA_ADD_NEW_QUESTION_FALSE_ANSWER_FIELD, 3, True) == False:
writeToLog("INFO", "FAILED to select the 'False' text area field")
return False
if self.clear_and_send_keys(self.KEA_ADD_NEW_QUESTION_FALSE_ANSWER_FIELD, questionDetails[4], True)== False:
writeToLog("INFO", "FAILED to insert the " + questionDetails[4] + " text within the 'False' field")
return False
# we verify if the value for the 'Hint' is present in the list
if len(questionDetails) >= 6:
# we verify if we want to create a Hint for the current Quiz Question
if questionDetails[5] != '':
if self.createHintAndWhy(questionDetails[5], whyText='') == False:
writeToLog("INFO", "FAILED to create a Hint for the " + questionDetails[2] + " Quiz Question")
return False
else:
writeToLog("INFO", "No hint was given for the " + questionDetails[2] + " Quiz Question")
# we verify if the value for the 'Why' is present in the list
if len(questionDetails) == 7:
# we verify if we want to create a Why for the current Quiz Question
if questionDetails[6] != '':
sleep(2)
if self.createHintAndWhy(hintText='', whyText=questionDetails[6]) == False:
writeToLog("INFO", "FAILED to create a Why for the " + questionDetails[2] + " Quiz Question")
return False
else:
writeToLog("INFO", "No hint was given for the " + questionDetails[2] + " Quiz Question")
else:
writeToLog("INFO", "No 'Hint' or 'Why' will be created for the " + questionDetails[2] + " Quiz Question")
elif qestionType == enums.QuizQuestionType.OPEN_QUESTION:
# We enter in the KEA Quiz Question Type screen
if self.selectQuestionType(qestionType) == False:
writeToLog("INFO", "FAILED to enter in the " + qestionType.value + " Question screen")
return False
# We select the KEA Quiz Question title field
if self.click(questionField, 2, True) == False:
writeToLog("INFO", "FAILED to select the reflection point text area")
return False
# We insert the title for the KEA Quiz Question type
if self.send_keys(questionField, questionDetails[2], True) == False:
writeToLog("INFO", "FAILED to insert the " + questionDetails[2] + " open-Q")
return False
# we verify if the value for the 'Hint' is present in the list
if len(questionDetails) >= 4:
# we verify if we want to create a Hint for the current Quiz Question
if questionDetails[3] != '':
if self.createHintAndWhy(questionDetails[3], whyText='') == False:
writeToLog("INFO", "FAILED to create a Hint for the " + questionDetails[2] + " Quiz Question")
return False
else:
writeToLog("INFO", "No hint was given for the " + questionDetails[2] + " Quiz Question")
# we verify if the value for the 'Why' is present in the list
if len(questionDetails) == 5:
# we verify if we want to create a Why for the current Quiz Question
if questionDetails[4] != '':
sleep(2)
if self.createHintAndWhy(hintText='', whyText=questionDetails[4]) == False:
writeToLog("INFO", "FAILED to create a Why for the " + questionDetails[2] + " Quiz Question")
return False
else:
writeToLog("INFO", "No hint was given for the " + questionDetails[2] + " Quiz Question")
else:
writeToLog("INFO", "No 'Hint' or 'Why' will be created for the " + questionDetails[2] + " Quiz Question")
# We verify that the KEA Quiz Question type is supported
else:
writeToLog("INFO", "FAILED, please make sure that you're using a support KEA Quiz Question type, using enums(e.g enums.QuizQuestionType.type)")
return False
# Save Question
if self.saveQuizChanges() == False:
writeToLog("INFO", "FAILED to save the changes")
return False
# Edit the KEA Quiz Section if necessary by enabling or disabling any KEA option from the KEA Details, Scores and Experience sections
if dictDetails != '' or dictScores != '' or dictExperience != '':
# We verify if we modify more than one option for the same KEA Section
if type(dictDetails) is list:
for option in dictDetails:
if self.editQuizOptions(enums.KEAQuizSection.DETAILS, option, saveChanges=False, resumeEditing=False) == False:
writeToLog("INFO", "FAILED to change the " + enums.KEAQuizSection.DETAILS.value + " KEA Section options")
return False
else:
# We modify only one option for this specific KEA section
if dictDetails != '':
if self.editQuizOptions(enums.KEAQuizSection.DETAILS, option, saveChanges=False, resumeEditing=False) == False:
writeToLog("INFO", "FAILED to change the " + enums.KEAQuizSection.DETAILS.value + " KEA Section options")
return False
# We verify if we modify more than one option for the same KEA Section
if type(dictScores) is list:
for option in dictScores:
if self.editQuizOptions(enums.KEAQuizSection.SCORES, option, saveChanges=False, resumeEditing=False) == False:
writeToLog("INFO", "FAILED to change the " + enums.KEAQuizSection.SCORES.value + " KEA Section options")
return False
else:
# We modify only one option for this specific KEA section
if dictScores != '':
if self.editQuizOptions(enums.KEAQuizSection.SCORES, option, saveChanges=False, resumeEditing=False) == False:
writeToLog("INFO", "FAILED to change the " + enums.KEAQuizSection.SCORES.value + " KEA Section options")
return False
# We verify if we modify more than one option for the same KEA Section
if type(dictExperience) is list:
for option in dictExperience:
if self.editQuizOptions(enums.KEAQuizSection.EXPERIENCE, option, saveChanges=False, resumeEditing=False) == False:
writeToLog("INFO", "FAILED to change the " + enums.KEAQuizSection.EXPERIENCE.value + " KEA Section options")
return False
else:
# We modify only one option for this specific KEA section
if dictExperience != '':
if self.editQuizOptions(enums.KEAQuizSection.EXPERIENCE, option, saveChanges=False, resumeEditing=False) == False:
writeToLog("INFO", "FAILED to change the " + enums.KEAQuizSection.EXPERIENCE.value + " KEA Section options")
return False
# We save all the changes performed from each KEA Section
if self.saveKeaChanges(resumeEditing=True) == False:
writeToLog("INFO", "FAILED to save the changes performed in the KEA Section")
return False
else:
writeToLog("INFO", "No changes for the KEA Sections was performed")
# Save the KEA Quiz entry and navigate to the entry page
self.switchToKeaIframe()
self.clickDone()
return True
# @Author: Horia Cus
# This function will move the real time marker to the desired time location
# The real time marker location will be moved by clicking directly on the time line section
# If the real time marker couldn't be set properly by clicking on the time line section, the Editor Time option will be used
# timeLocation must be string using mm:ss format ( e.g str(10:00) )
def setRealTimeMarkerToTime(self, timeLocation):
self.switchToKeaIframe()
# Take the details from the kea time line section
keaTimelineSection = self.wait_element(self.KEA_TIMELINE_PRESENTED_SECTIONS, 60, True)
# Verify that the time line section is available
if keaTimelineSection == False:
writeToLog("INFO", "FAILED to take the KEA timeline section element")
return False
# Take the length needed in order to set by the correct pixels the time location, based on the length of the timeline section, entry time and time location
entryTotalTime = self.wait_element(self.EDITOR_TOTAL_TIME, 1, True).text.replace(' ', '')[1:]
m, s = entryTotalTime.split(':')
entryTotalTimeSeconds = int(m) * 60 + int(s)
m, s = timeLocation.split(':')
quizTimeLocationInSeconds = float(m) * 60 + float(s)
keaTimelineSectionWidth = keaTimelineSection.size['width']
widthSizeForOneSecond = keaTimelineSectionWidth/entryTotalTimeSeconds
widthSizeInOrderToReachDesiredStartTime = widthSizeForOneSecond * quizTimeLocationInSeconds
actionSetQuizLocation = ActionChains(self.driver)
# Set the time line location using action chain
# Time marker is moved based on the clicked spot from the timeline section element
try:
actionSetQuizLocation.move_to_element_with_offset(keaTimelineSection, widthSizeInOrderToReachDesiredStartTime+2.5, -10).pause(1).click().perform()
except Exception:
writeToLog("INFO", "FAILED to click on the timeline section during the first time in order to set the desired time marker location")
timeLineSectionMarker = self.wait_element(self.EDITOR_REALTIME_MARKER, 3, True)
try:
ActionChains(self.driver).move_to_element(timeLineSectionMarker).send_keys(Keys.PAGE_DOWN).pause(5).perform()
ActionChains(self.driver).move_to_element_with_offset(keaTimelineSection, widthSizeInOrderToReachDesiredStartTime+2.5, -10).pause(1).click().perform()
except Exception:
# Verify if the real time marker already matches with our time location
if self.wait_element(self.EDITOR_REALTIME_MARKER, 3, True).text[:5] != timeLocation:
writeToLog("INFO", "FAILED to set the start time to during the second try " + str(timeLocation) + " using action chain")
# we continue to try to change the real marker time using input field ( not action chains )
else:
writeToLog("INFO", "PASSED, the real time marker has been successfully set to the " + timeLocation + " time location, using Action Chain")
return True
timeLineSectionMarker = self.wait_element(self.EDITOR_REALTIME_MARKER, 3, True).text[:5]
# Verify that the Time Marker matches with our desired time location
if timeLineSectionMarker != timeLocation:
timeLineSectionMarkerUpdated = self.wait_element(self.EDITOR_REALTIME_MARKER, 3, True).text[4:]
# Verify if there's a gap of one second between the action
if int(timeLineSectionMarkerUpdated[0]) + 1 == int(timeLocation[-1]):
if self.driver.capabilities['browserName'] == 'firefox':
# If the ml seconds are higher than 49 we need a less difference in px
if int(timeLineSectionMarkerUpdated[2:]) >= 49:
differencePx = 7
# If the ml seconds are less than 49 we need a higher difference in px
else:
differencePx = 22
else:
# If the ml seconds are higher than 49 we need a less difference in px
if int(timeLineSectionMarkerUpdated[2:]) >= 49:
differencePx = 10
# If the ml seconds are less than 49 we need a higher difference in px
else:
differencePx = 30
actionSetQuizLocationSecond = ActionChains(self.driver)
try:
actionSetQuizLocationSecond.move_to_element_with_offset(keaTimelineSection, widthSizeInOrderToReachDesiredStartTime+2.5+differencePx, -10).pause(1).click().perform()
except Exception:
writeToLog("INFO", "FAILED to set the start time to during the second try " + str(timeLocation))
return False
else:
writeToLog("INFO", "Couldn't set the " + timeLocation + " using action chains, but " + timeLineSectionMarker + " time location has been set")
timeLineSectionMarkerUpdated = self.wait_element(self.EDITOR_REALTIME_MARKER, 3, True).text[:5]
# Verify that the Time Marker matches with our desired time location after the second try of using Action Chain
if timeLineSectionMarkerUpdated != timeLocation:
# As a redundancy, if we are unable to set the desired time location by Action Chain, we are going to use Editor Time Picker
# Select the time stamp input field
if self.click(self.EDITOR_TIME_PICKER, 1, True) == False:
writeToLog("INFO", "FAILED to click on the kea timeline field")
return False
sleep(1)
timePickerHighlighted = self.wait_element(self.EDITOR_TIME_PICKER_HIGHLIGHTED_CONTAINER, 3, True)
# Verify that the input time field is highlighted
if timePickerHighlighted == False:
writeToLog("INFO", "Time picker input field couldn't be highlighted during the first try")
if self.click(self.EDITOR_TIME_PICKER, 1, True) == False:
writeToLog("INFO", "FAILED to click on the kea timeline field")
return False
sleep(1)
timePickerHighlighted = self.wait_element(self.EDITOR_TIME_PICKER_HIGHLIGHTED_CONTAINER, 3, True)
if timePickerHighlighted == False:
writeToLog("INFO", "FAILED to highlight the time picker input field during the second time")
return False
# Clear first the current Editor Time Location
if self.clear_and_send_keys(self.EDITOR_TIME_PICKER, timeLocation) == False:
writeToLog("INFO", "FAILED to select the timeline field text")
return False
sleep(2)
# Put the desired time location inside the Editor Time input field
if self.clear_and_send_keys(self.EDITOR_TIME_PICKER, timeLocation) == False:
writeToLog("INFO", "FAILED to select the timeline field text")
return False
# Move the real time maker to the desired time stamp
sleep(2)
self.clsCommon.sendKeysToBodyElement(Keys.ENTER)
sleep(2)
timeLineSectionMarkerUpdated = self.wait_element(self.EDITOR_REALTIME_MARKER, 1, True).text[:5]
# Verify that the Time Marker matches with our desired time location
if timeLineSectionMarkerUpdated != timeLocation:
writeToLog("INFO", "FAILED to set the real time marker to the " + timeLocation + " time location using Editor Time Picker")
return False
else:
writeToLog("INFO", "PASSED, the real time marker has been successfully set to the " + timeLocation + " time location using, Editor Time Picker")
return True
writeToLog("INFO", "PASSED, the real time marker has been successfully set to the " + timeLocation + " time location, using Action Chain")
return True
# @Author: Horia Cus
# This function can navigate to a specific entry and initiate the KEA Quiz option
# This function work for both entries that have Quiz created or not
# entryName must be inserted in order to verify that the KEA page has been successfully opened and loaded
def initiateQuizFlow(self, entryName, navigateToEntry=False, timeOut=40):
self.switch_to_default_content()
if navigateToEntry == True:
sleep(timeOut)
if self.launchKEA(entryName, navigateTo=enums.Location.ENTRY_PAGE, navigateFrom=enums.Location.MY_MEDIA) == False:
writeToLog("INFO","Failed to launch KEA for: " + entryName)
return False
self.switchToKeaIframe()
if self.verifyKeaEntryName(entryName, 60) == False:
writeToLog("INFO", "FAILED to load the page until the " + entryName + " was present")
return False
if self.wait_element(self.KEA_QUIZ_TAB_ACTIVE, 5, True) == False:
if self.click(self.KEA_QUIZ_TAB, 5, True) == False:
writeToLog("INFO","FAILED to click on the KEA Quiz tab menu")
return False
start_button = (self.KEA_QUIZ_BUTTON[0], self.KEA_QUIZ_BUTTON[1].replace('BUTTON_NAME', enums.KeaQuizButtons.START.value))
if self.wait_element(start_button, 2, True) != False:
if self.click(start_button, 1, True) == False:
writeToLog("INFO","FAILED to click on the Quiz Start button")
return False
sleep(3)
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER_CONTAINER, 60) == False:
writeToLog("INFO","FAILED, the loading spinner remained in infinite loading")
return False
sleep(1)
if self.wait_element(self.KEA_QUIZ_TAB_ACTIVE, 5, True) == False:
writeToLog("INFO", "FAILED, KEA Quiz tab is not active")
return False
return True
# @Author: Horia Cus
# This function verifies that the KEA entry name is present and that it matches with the desired one
def verifyKeaEntryName(self, entryName, timeout=60):
self.switchToKeaIframe()
wait_until = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
self.setImplicitlyWait(0)
while True:
try:
el = self.wait_element(self.KEA_ENTRY_NAME, 60, multipleElements=True)
if el.text == entryName:
self.setImplicitlyWaitToDefault()
writeToLog("INFO", "The " + entryName + " has been found in KEA page")
break
else:
writeToLog("INFO", "The KEA entry-name doesn't matches with " + entryName + " entry")
return False
except:
if wait_until < datetime.datetime.now():
self.setImplicitlyWaitToDefault()
writeToLog("INFO", "FAILED to find the " + entryName + " within the " + str(timeout) + " seconds")
return False
pass
if self.wait_while_not_visible(self.KEA_LOADING_CONTAINER, 60) == False:
writeToLog("INFO", "FAILED to wait until the KEA page has been successfully loaded")
return False
writeToLog("INFO", "KEA Page is active for the " + entryName + " entry")
return True
# @Author: Horia Cus
# This function triggers a specific KEA Section and it can enable / disable or add an input for any available KEA option
# keaCategory = must be enum
# keaOption must be enum and have a map
# If saveChanges = True, all the KEA changes will be saved by clicking on the done button and waiting for the spinner to disappear
def editQuizOptions(self, keaSection, keaOptionDict, saveChanges=False, resumeEditing=False):
if keaSection != '':
tmpKEASection = (self.KEA_TOGGLE_MENU_OPTION[0], self.KEA_TOGGLE_MENU_OPTION[1].replace('OPTION_NAME', keaSection.value))
else:
writeToLog("INFO", "Please specify in which KEA section we should enable or disable the options")
return False
self.switchToKeaIframe()
sleep(1)
if self.click(tmpKEASection, 5, True) == False:
writeToLog("INFO", "Failed to click on the " + keaSection.value + " KEA section drop down menu")
return False
for options in keaOptionDict:
if keaOptionDict[options] == True:
if options == enums.KEAQuizOptions.NO_SEEKING_FORWARD:
options = enums.KEAQuizOptions.DO_NOT_ALLOW_SKIP
if self.changeKEAOptionState(options, True) == False:
writeToLog("INFO", "FAILED to enable the " + options.value + " in order to enable the: " + enums.KEAQuizOptions.NO_SEEKING_FORWARD.value)
return False
sleep(1)
options = enums.KEAQuizOptions.NO_SEEKING_FORWARD
if self.changeKEAOptionState(options, True) == False:
writeToLog("INFO", "FAILED to enable the " + options.value)
return False
elif self.changeKEAOptionState(options, True) == False:
return False
elif options == enums.KEAQuizOptions.ALLOW_MULTUPLE_ATTEMPTS:
if self.changeKEAOptionState(options, True) == False:
writeToLog("INFO", "FAILED to enable the " + options.value)
return False
elif keaOptionDict[options] == False:
if options == enums.KEAQuizOptions.DO_NOT_SHOW_SCORES:
options = enums.KEAQuizOptions.SHOW_SCORES
if self.changeKEAOptionState(options, True) == False:
writeToLog("INFO", "FAILED to disable the " + options.value + " by enabling the dependency option: " + enums.KEAQuizOptions.SHOW_SCORES.value)
return False
elif options == enums.KEAQuizOptions.NO_SEEKING_FORWARD:
options = enums.KEAQuizOptions.DO_NOT_ALLOW_SKIP
if self.changeKEAOptionState(options, True) == False:
writeToLog("INFO", "FAILED to enable the " + options.value + " in order to enable the: " + enums.KEAQuizOptions.NO_SEEKING_FORWARD.value)
return False
sleep(1)
options = enums.KEAQuizOptions.NO_SEEKING_FORWARD
if self.changeKEAOptionState(options, False) == False:
writeToLog("INFO", "FAILED to enable the " + options.value)
return False
elif options == enums.KEAQuizOptions.SHOW_SCORES:
options = enums.KEAQuizOptions.DO_NOT_SHOW_SCORES
if self.changeKEAOptionState(options, True) == False:
writeToLog("INFO", "FAILED to disable the " + options.value + " by enabling the dependency option: " + enums.KEAQuizOptions.DO_NOT_SHOW_SCORES.value)
return False
elif options == enums.KEAQuizOptions.ALLOW_SKIP:
options = enums.KEAQuizOptions.DO_NOT_ALLOW_SKIP
if self.changeKEAOptionState(options, True) == False:
writeToLog("INFO", "FAILED to disable the " + options.value + " by enabling the dependency option: " + enums.KEAQuizOptions.DO_NOT_ALLOW_SKIP.value)
return False
elif options == enums.KEAQuizOptions.DO_NOT_ALLOW_SKIP:
options = enums.KEAQuizOptions.ALLOW_SKIP
if self.changeKEAOptionState(options, True) == False:
writeToLog("INFO", "FAILED to disable the " + options.value + " by enabling the dependency option: " + enums.KEAQuizOptions.ALLOW_SKIP.value)
return False
else:
if self.changeKEAOptionState(options, False) == False:
return False
elif keaOptionDict[options] != '':
if options == enums.KEAQuizOptions.SET_NUMBER_OF_ATTEMPTS:
if self.clear_and_send_keys(self.KEA_NUMBER_OF_ALLOW_ATTEMPTS, keaOptionDict[options]) == False:
writeToLog("INFO", "FAILED to insert number of allow attempts")
return False
elif options == enums.KEAQuizOptions.QUIZ_SCORE_TYPE:
if self.click(self.KEA_SCORE_TYPE_DROP_DOWN) == False:
writeToLog("INFO", "FAILED to click on score type dropdown")
return False
tmpScoreType = (self.KEA_SCORE_TYPE_OPTION[0], self.KEA_SCORE_TYPE_OPTION[1].replace('SCORE_TYPE', keaOptionDict[options]))
if self.click(tmpScoreType) == False:
writeToLog("INFO", "FAILED to click on " + options.value + " score type")
return False
else:
if options == enums.KEAQuizOptions.QUIZ_NAME:
tmpKEAInputField = (self.KEA_OPTION_INPUT_FIELD[0], self.KEA_OPTION_INPUT_FIELD[1].replace('FIELD_NAME', 'quizName'))
elif options == enums.KEAQuizOptions.SHOW_WELCOME_PAGE:
tmpKEAInputField = (self.KEA_OPTION_TEXTAREA_FIELD[0], self.KEA_OPTION_TEXTAREA_FIELD[1].replace('FIELD_NAME', 'welcomeMessage'))
if self.click(tmpKEAInputField, 5, True) == False:
writeToLog("INFO", "FAILED to select the " + options.value + " option")
return False
if self.clear_and_send_keys(tmpKEAInputField, keaOptionDict[options], True) == False:
writeToLog("INFO", "FAILED to clear and add " + keaOptionDict[options] + " text to the " + keaOptionDict.value)
return False
sleep(3)
sleep(1)
if self.click(tmpKEASection, 5, True) == False:
writeToLog("INFO", "Failed to collapse the " + keaSection.value)
return False
if saveChanges == True:
if self.saveKeaChanges(resumeEditing) == False:
writeToLog("INFO", "FAILED to save the KEA changes")
return False
return True
# @Author: Horia Cus
# This function changes the status of any KEA Option to enable or disable
# If stateEnabled=True, it will verify if the specific KEA Option is enabled, if not, it will enable it
# If stateEnabled=False, it will verify if the specific KEA Option is disabled, if not, it will disable it
# keaOption must be enum and have a map
# stateEnabled must be Boolean
def changeKEAOptionState(self, keaOption, stateEnabled):
self.switchToKeaIframe()
if stateEnabled == True:
tmpKEAOption = (self.KEA_OPTION_ACTIVE[0], self.KEA_OPTION_ACTIVE[1].replace('OPTION_NAME', keaOption.value))
if self.wait_element(tmpKEAOption, 1, True) != False:
writeToLog("INFO", "The " + keaOption.value + " is already enabled")
return True
else:
if keaOption.value == enums.KEAQuizOptions.ALLOW_MULTUPLE_ATTEMPTS:
if self.click(self.KEA_ALLOW_MULTIPLE_ATTEMPTS_OPTION_GRAYED_OUT) == False:
writeToLog("INFO", "FAILED to enable " + keaOption.value + " option")
return False
else:
tmpKEAOption = (self.KEA_OPTION_NORMAL[0], self.KEA_OPTION_NORMAL[1].replace('OPTION_NAME', keaOption.value))
if self.click(tmpKEAOption, 5, True) == False:
writeToLog("INFO", "FAILED to enable " + keaOption.value + " option")
return False
elif stateEnabled == False:
if keaOption == enums.KEAQuizOptions.ALLOW_DOWNLOAD or keaOption == enums.KEAQuizOptions.INSTRUCTIONS:
if self.verifyKEAOptionState(enums.KEAQuizOptions.SHOW_WELCOME_PAGE, False) == True:
writeToLog("INFO", "The " + keaOption.value + " is already disabled due to the dependent option, " + enums.KEAQuizOptions.SHOW_WELCOME_PAGE.value)
return True
tmpKEAOptionActive = (self.KEA_OPTION_ACTIVE[0], self.KEA_OPTION_ACTIVE[1].replace('OPTION_NAME', keaOption.value))
if keaOption.value == enums.KEAQuizOptions.ALLOW_MULTUPLE_ATTEMPTS:
tmpKEAOptionNormal = self.KEA_ALLOW_MULTIPLE_ATTEMPTS_OPTION_GRAYED_OUT
else:
tmpKEAOptionNormal = (self.KEA_OPTION_NORMAL[0], self.KEA_OPTION_NORMAL[1].replace('OPTION_NAME', keaOption.value))
if self.wait_element(tmpKEAOptionActive, 1, True) == False and self.wait_element(tmpKEAOptionNormal, 1, True) != False:
writeToLog("INFO", "The " + keaOption.value + " is already disabled")
return True
else:
if self.click(tmpKEAOptionNormal, 5, True) == False:
writeToLog("INFO", "FAILED to enable " + keaOption.value + " option")
return False
else:
writeToLog("INFO", "Make sure that you use boolean")
return False
return True
# @Author: Horia Cus
# This function verifies if the status of any KEA Option is enabled or disabled
# If expectedState=True, it will verify if the specific KEA Option is enabled
# If expectedState=False, it will verify if the specific KEA Option is disabled
# keaOption must be enum and have a map with boolean
def verifyKEAOptionState(self, keaOption, expectedState):
self.switchToKeaIframe()
if expectedState == True:
tmpKEAOptionActive = (self.KEA_OPTION_ACTIVE[0], self.KEA_OPTION_ACTIVE[1].replace('OPTION_NAME', keaOption.value))
if self.wait_element(tmpKEAOptionActive, 1, True) == False:
writeToLog("INFO", "The " + keaOption.value + " is not enabled")
return False
elif expectedState == False:
tmpKEAOptionActive = (self.KEA_OPTION_ACTIVE[0], self.KEA_OPTION_ACTIVE[1].replace('OPTION_NAME', keaOption.value))
tmpKEAOptionNormal = (self.KEA_OPTION_NORMAL[0], self.KEA_OPTION_NORMAL[1].replace('OPTION_NAME', keaOption.value))
if self.wait_element(tmpKEAOptionActive, 1, True) == False and self.wait_element(tmpKEAOptionNormal, 1, True) != False:
writeToLog("INFO", "The " + keaOption.value + " is disabled")
return True
else:
writeToLog("INFO", "The " + keaOption.value + " is not disabled")
return False
else:
writeToLog("INFO", "Make sure that you use boolean")
return False
return True
# @Author: Horia Cus
# This function verifies if the status of any KEA Option is enabled or disabled or that a specific element is present or not
# keaSection must be enum
# keaOption must be enum and have a map
def verifyQuizOptionsInKEA(self, keaSection, keaOption):
self.switchToKeaIframe()
if keaSection != '':
tmpKEASection = (self.KEA_TOGGLE_MENU_OPTION[0], self.KEA_TOGGLE_MENU_OPTION[1].replace('OPTION_NAME', keaSection.value))
else:
writeToLog("INFO", "Please specify in which KEA section we should verify the state of the options")
return False
sleep(1)
if self.click(tmpKEASection, 5, True) == False:
writeToLog("INFO", "Failed to click on the " + keaSection.value)
return False
for options in keaOption:
if options == enums.KEAQuizOptions.QUIZ_NAME:
if self.verifyKeaEntryName(keaOption[options], 5) == False:
writeToLog("INFO", "The KEA entry name doesn't match with " + keaOption[options] + " name")
return False
else:
if keaOption[options] == True:
if self.verifyKEAOptionState(options, True) == False:
return False
elif keaOption[options] == False:
if self.verifyKEAOptionState(options, False) == False:
return False
elif keaOption[options] != '':
writeToLog("INFO", "Work in progress")
# if self.clsCommon.player.verifyQuizElementsInPlayer() == False: WIP
sleep(1)
if self.click(tmpKEASection, 5, True) == False:
writeToLog("INFO", "Failed to collapse the " + keaSection.value)
return False
return True
# @Author: Horia Cus
# This function switches to the KEA Preview Player Iframe
def switchToKEAPreviewPlayer(self):
if localSettings.TEST_CURRENT_IFRAME_ENUM == enums.IframeName.KEA_QUIZ_PLAYER:
return True
else:
localSettings.TEST_CURRENT_IFRAME_ENUM = enums.IframeName.KEA_QUIZ_PLAYER
if self.swith_to_iframe(self.KEA_IFRAME_PREVIEW_PLAYER) == False:
writeToLog("INFO", "FAILED to switch to KEA preview player")
return False
else:
return True
# @Author: Horia Cus
# This function switches to the KEA BLANK Iframe
def switchToKEABlank(self):
self.switch_to_default_content()
if localSettings.TEST_CURRENT_IFRAME_ENUM == enums.IframeName.KEA_QUIZ_BLANK:
return True
else:
localSettings.TEST_CURRENT_IFRAME_ENUM = enums.IframeName.KEA_QUIZ_BLANK
if self.swith_to_iframe(self.KEA_IFRAME_BLANK) == False:
writeToLog("INFO", "FAILED to switch to KEA preview player")
return False
else:
return True
# @Author: Horia Cus
# This function opens the KEA preview screen
def openKEAPreviewScreen(self):
self.switchToKeaIframe()
if self.click(self.KEA_PREVIEW_ICON, 5, True) == False:
writeToLog("INFO", "FAILED to click on the preview icon")
return False
self.switchToKEAPreviewPlayer()
if self.wait_visible(self.KEA_PREVIEW_PLAY_BUTTON, 30, True) == False:
writeToLog("INFO", "FAILED to load the preview screen")
return False
return True
# @Author: Horia Cus
# This function closes the KEA Preview screen
def closeKEAPreviewScreen(self):
self.switchToKEABlank()
if self.click(self.KEA_PREVIEW_CLOSE_BUTTON, 5, True) == False:
writeToLog("INFO", "FAILED to close the KEA preview screen")
return False
self.switch_to_default_content()
self.switchToKeaIframe()
return True
# @Author: Horia Cus
# This function verifies that the default options are displayed after using the revert option
# keaSection = must use enums.KEAQuizSection
def revertToDefaultInKEA(self, keaSection):
self.switchToKeaIframe()
tmpKEASection = (self.KEA_TOGGLE_MENU_OPTION[0], self.KEA_TOGGLE_MENU_OPTION[1].replace('OPTION_NAME', keaSection.value))
tmpKEAOptionSeeking = (self.KEA_OPTION_NORMAL[0], self.KEA_OPTION_NORMAL[1].replace('OPTION_NAME', enums.KEAQuizOptions.NO_SEEKING_FORWARD.value))
sleep(1)
if self.click(tmpKEASection, 5, True) == False:
writeToLog("INFO", "Failed to click on the " + keaSection.value)
return False
sleep(1)
if self.click(self.KEA_QUIZ_OPTIONS_REVERT_TO_DEFAULT_BUTTON, 5, True) == False:
writeToLog("INFO", "FAILED to click on the revert to default button")
return False
if keaSection == enums.KEAQuizSection.DETAILS:
if self.verifyKEAOptionState(enums.KEAQuizOptions.SHOW_WELCOME_PAGE, True) == False:
return False
if self.verifyKEAOptionState(enums.KEAQuizOptions.ALLOW_DOWNLOAD, True) == False:
return False
if self.verifyKEAOptionState(enums.KEAQuizOptions.INSTRUCTIONS, True) == False:
return False
elif keaSection == enums.KEAQuizSection.SCORES:
if self.verifyKEAOptionState(enums.KEAQuizOptions.DO_NOT_SHOW_SCORES, False) == False:
return False
if self.verifyKEAOptionState(enums.KEAQuizOptions.SHOW_SCORES, True) == False:
return False
if self.verifyKEAOptionState(enums.KEAQuizOptions.INCLUDE_ANSWERS, True) == False:
return False
elif keaSection == enums.KEAQuizSection.EXPERIENCE:
if self.verifyKEAOptionState(enums.KEAQuizOptions.ALLOW_ANSWER_CHANGE, True) == False:
return False
if self.verifyKEAOptionState(enums.KEAQuizOptions.ALLOW_SKIP, True) == False:
return False
if self.verifyKEAOptionState(enums.KEAQuizOptions.DO_NOT_ALLOW_SKIP, False) == False:
return False
if self.wait_element(tmpKEAOptionSeeking, 1, False) != False:
if self.verifyKEAOptionState(enums.KEAQuizOptions.NO_SEEKING_FORWARD, False) == False:
return False
else:
writeToLog("INFO", "AS EXPECTED, no seeking forward option was found as disabled")
sleep(1)
if self.click(tmpKEASection, 5, True) == False:
writeToLog("INFO", "Failed to collapse the " + keaSection.value)
return False
return True
# @Author: Horia Cus
# This function can navigate to the Entry page while being in the KEA Page
# entryName = entry that you want to navigate to
def navigateToEntryPageFromKEA(self, entryName):
self.switch_to_default_content()
if self.clsCommon.entryPage.verifyEntryNamePresent(entryName, 3) == True:
writeToLog("INFO","You are already in the " + entryName + " entry page")
return True
if self.saveKeaChanges() == False:
writeToLog("INFO", "FAILED to save the KEA changes")
return False
tmp_button = (self.KEA_QUIZ_BUTTON[0], self.KEA_QUIZ_BUTTON[1].replace('BUTTON_NAME', enums.KeaQuizButtons.GO_TO_MEDIA_PAGE.value))
sleep(1)
if self.wait_element(tmp_button, 60, True) == False:
writeToLog("INFO", "FAILED to find the go to media button")
return False
sleep(1)
if self.click(tmp_button, 5, True) == False:
writeToLog("INFO", "FAILED to click on the Go To Media button")
return False
self.switch_to_default_content()
sleep(3)
if self.clsCommon.entryPage.verifyEntryNamePresent(entryName, 60)== False:
writeToLog("INFO","FAILED to load the entry page for " + entryName + " entry")
return False
sleep(2)
return True
# @Author: Horia Cus
# This function saves any change performed within the KEA page by clicking on the done button and waiting for the changes to be performed
# if resumeEditing=True, we will click on the "Edit" button and wait for the kea options to be displayed
def saveKeaChanges(self, resumeEditing=False):
self.switchToKeaIframe()
tmp_button_done = (self.KEA_QUIZ_BUTTON[0], self.KEA_QUIZ_BUTTON[1].replace('BUTTON_NAME', enums.KeaQuizButtons.DONE.value))
if self.wait_element(tmp_button_done, 3, True) != False:
if self.click(tmp_button_done, 5, True) == False:
writeToLog("INFO", "FAILED to click on the done button")
return False
sleep(0.5)
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER, 75) == False:
writeToLog("INFO","FAILED to save the changes")
return False
sleep(2)
else:
writeToLog("INFO", "FAILED to find the 'done' button in order to save the changes")
return False
if resumeEditing == True:
sleep(3)
tmp_button_edit = (self.KEA_QUIZ_BUTTON[0], self.KEA_QUIZ_BUTTON[1].replace('BUTTON_NAME', enums.KeaQuizButtons.EDIT_QUIZ.value))
if self.wait_element(tmp_button_edit, 30, True) == False:
writeToLog("INFO", "FAILED to find the edit quiz button")
return False
if self.click(tmp_button_edit, 3, True) == False:
writeToLog("INFO", "FAILED to click on the edit quiz button")
return False
if self.wait_element(tmp_button_done, 30, True) == False:
writeToLog("INFO", "FAILED to load the edit quiz page")
return False
sleep(1)
return True
# @Author: Horia Cus
# This function creates a hint and why while being in the 'Multiple Choices' or 'True and False' KEA Quiz Question type screen
# hintText = is the text that you want to be displayed in the hint screen ( use only str)
# whyText = is the text that you want to be displayed in the why screen ( use only str)
# You can create a hint without specifying a why, leaving the whyText as = ''
def createHintAndWhy(self, hintText='', whyText=''):
self.switchToKeaIframe()
# we verify that we are in a KEA Quiz Question screen
saveButton = (self.KEA_QUIZ_BUTTON[0], self.KEA_QUIZ_BUTTON[1].replace('BUTTON_NAME', enums.KeaQuizButtons.SAVE.value))
if self.wait_element(saveButton, 3, True) == False:
writeToLog("INFO", "FAILED, please make sure that you're in 'Multiple Choices' or 'True and False' KEA Quiz Question type screen")
return False
# we used this locator in order to save the hint any why changes
applyButton = (self.KEA_QUIZ_BUTTON[0], self.KEA_QUIZ_BUTTON[1].replace('BUTTON_NAME', enums.KeaQuizButtons.APPLY.value))
if hintText != '':
if self.click(self.KEA_ADD_NEW_QUESTION_HINT_AND_WHY_TOGGLE_MENU_BUTTON, 3, True) == False:
writeToLog("INFO", "FAILED to trigger the Hint and Why toggle menu")
return False
sleep(1)
# access the Hint option
if self.click(self.KEA_ADD_NEW_QUESTION_HINT_BUTTON, 3, True) == False:
writeToLog("INFO", "FAILED to select the 'Hint' option from the Hint and Why toggle menu")
return False
# leave time for input field to be properly displayed
sleep(1)
# We use action chains in order to insert text within the fields, input text area being already active
action = ActionChains(self.driver)
try:
action.send_keys(hintText).perform()
except Exception:
writeToLog("INFO", "FAILED to insert " + hintText + " in the hint text field")
return False
# We wait one second in order to make sure that all the text was properly inserted
sleep(1)
# We save the changes
if self.click(applyButton, 3, True) == False:
writeToLog("INFO", "FAILED to click on the 'Apply button' in order to save the Hint changes")
return False
if whyText != '':
if self.click(self.KEA_ADD_NEW_QUESTION_HINT_AND_WHY_TOGGLE_MENU_BUTTON, 3, True) == False:
writeToLog("INFO", "FAILED to trigger the Hint and Why toggle menu")
return False
sleep(1)
# access the Why option
if self.click(self.KEA_ADD_NEW_QUESTION_WHY_BUTTON, 3, True) == False:
writeToLog("INFO", "FAILED to select the 'Why' option from the Hint and Why toggle menu")
return False
# leave time for the input field to be properly displayed
sleep(1)
# We use action chains in order to insert text within the fields, because the fields are already clicked
action = ActionChains(self.driver)
try:
action.send_keys(whyText).perform()
except Exception:
writeToLog("INFO", "FAILED to insert " + whyText + " in the 'why' text field")
return False
# We wait one second in order to make sure that all the text was properly inserted
sleep(1)
# We save the changes
if self.click(applyButton, 3, True) == False:
writeToLog("INFO", "FAILED to click on the 'Apply button' in order to save the Why changes")
return False
return True
# @Author: Horia Cus
# This function enters in any Quiz Question Type screen
# questionType must be enum ( e.g enums.QuizQuestionType.Multiple )
# we support 'Multiple Choice', 'True and False' and 'Reflection Point'
def selectQuestionType(self, qestionType):
self.switchToKeaIframe()
if qestionType == enums.QuizQuestionType.Multiple:
# Verify if the KEA Quiz Question type is already highlighted
if self.wait_element(self.KEA_ADD_NEW_MULTIPLE_QUESTION_BUTTON_ACTIVE, 2, True) != False or self.wait_element(self.KEA_ADD_NEW_MULTIPLE_QUESTION_BUTTON_DEFAULT, 2, True) != False:
if self.click(self.KEA_ADD_NEW_MULTIPLE_QUESTION_BUTTON) == False:
writeToLog("INFO","FAILED to activate the 'ADD NEW MULTIPLE' quiz type")
return False
# We highlight the KEA Quiz Question type and then access it
else:
if self.click(self.KEA_ADD_NEW_MULTIPLE_QUESTION_BUTTON) == False:
writeToLog("INFO","FAILED to highlight the 'ADD NEW MULTIPLE' quiz type")
return False
sleep(1)
if self.click(self.KEA_ADD_NEW_MULTIPLE_QUESTION_BUTTON_ACTIVE) == False:
writeToLog("INFO","FAILED to activate the 'ADD NEW MULTIPLE' quiz type")
return False
elif qestionType == enums.QuizQuestionType.REFLECTION:
# Verify if the KEA Quiz Question type is already highlighted
if self.wait_element(self.KEA_ADD_NEW_REFLECTION_POINT_BUTTON_ACTIVE, 2, True) != False:
if self.click(self.KEA_ADD_NEW_REFLECTION_POINT_BUTTON_ACTIVE) == False:
writeToLog("INFO","FAILED to activate the 'ADD NEW MULTIPLE' quiz type")
return False
# We highlight the KEA Quiz Question type and then access it
else:
if self.click(self.KEA_ADD_NEW_REFLECTION_POINT_BUTTON) == False:
writeToLog("INFO","FAILED to highlight the 'Reflection Point' quiz type")
return False
sleep(1)
if self.click(self.KEA_ADD_NEW_REFLECTION_POINT_BUTTON_ACTIVE) == False:
writeToLog("INFO","FAILED to activate the 'Reflection Point' quiz type")
return False
elif qestionType == enums.QuizQuestionType.TRUE_FALSE:
# Verify if the KEA Quiz Question type is already highlighted
if self.wait_element(self.KEA_ADD_NEW_TRUE_FALSE_QUESTION_BUTTON_ACTIVE, 2, True) != False:
if self.click(self.KEA_ADD_NEW_TRUE_FALSE_QUESTION_BUTTON_ACTIVE) == False:
writeToLog("INFO","FAILED to activate the 'True and False' quiz type")
return False
# We highlight the KEA Quiz Question type and then access it
else:
if self.click(self.KEA_ADD_NEW_TRUE_FALSE_QUESTION_BUTTON) == False:
writeToLog("INFO","FAILED to highlight the 'True and False' quiz type")
return False
sleep(1)
if self.click(self.KEA_ADD_NEW_TRUE_FALSE_QUESTION_BUTTON_ACTIVE) == False:
writeToLog("INFO","FAILED to activate the 'True and False' quiz type")
return False
elif qestionType == enums.QuizQuestionType.OPEN_QUESTION:
# Verify if the KEA Quiz Question type is already highlighted
if self.wait_element(self.KEA_ADD_NEW_OPEN_QUESTION_BUTTON_ACTIVE, 2, True) != False:
if self.click(self.KEA_ADD_NEW_OPEN_QUESTION_BUTTON_ACTIVE) == False:
writeToLog("INFO","FAILED to activate the 'open-Q' quiz type")
return False
# We highlight the KEA Quiz Question type and then access it
else:
if self.click(self.KEA_ADD_NEW_OPEN_QUESTION_BUTTON) == False:
writeToLog("INFO","FAILED to highlight the 'open-Q' quiz type")
return False
sleep(1)
if self.click(self.KEA_ADD_NEW_OPEN_QUESTION_BUTTON_ACTIVE) == False:
writeToLog("INFO","FAILED to activate the 'open-Q' quiz type")
return False
# We verify that a supported KEA Quiz Question type has been used
else:
writeToLog("INFO", "FAILED, please make sure that you used a supported KEA Quiz Question type and that the value used was enum")
return False
return True
# @Author: Inbar Willman
# Check editor when user is able just to create clip in editor
def verifyEditorForClippingPermission(self):
# Verify that quiz tab isn't displayed
if self.wait_element(self.KEA_QUIZ_TAB, timeout=3) != False:
writeToLog("INFO","FAILED: Quiz tab is displayed in editor")
return False
# Verify that 'Save' button for trim isn't displayed
if self.wait_element(self.EDITOR_SAVE_BUTTON, timeout=3) != False:
writeToLog("INFO","FAILED: 'Save' button is displayed in editor")
return False
writeToLog("INFO","Success: 'Save' button and Quiz tab aren't displayed in editor")
return True
# @Author: Horia Cus
# This function will verify the quiz question number, timestamp and title in the KEA Timeline section
# questionDict must have the following structure {'NUMBER OF QUESTION':questionDetailsList}
# questionDetailsList must contain ['timestamp', enums.QuizQuestionType.Type, 'Question title']
def keaTimelineVerification(self, questionDict):
self.switchToKeaIframe()
if self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE, 5, True) == False:
writeToLog("INFO", "FAILED to find any quiz question pointer in the time line section")
return False
# We take all the available quiz question pointers from the timeline KEA section
presentedQuestionsInTimeline = self.wait_elements(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE, 1)
# We verify that the number of the available quiz questions from the timeline, matches with the number of quiz questions given in the questionDict
if len(presentedQuestionsInTimeline) != len(questionDict):
writeToLog("INFO", "FAILED, in timeline section were found " + str(len(presentedQuestionsInTimeline)) + " questions, and in the dictionary were given " + str(len(questionDict)) + " questions")
return False
totalQuestionNumber = (self.KEA_TIMELINE_SECTION_TOTAL_QUESTION_NUMBER[0], self.KEA_TIMELINE_SECTION_TOTAL_QUESTION_NUMBER[1].replace('QUESTION_NUMBER', str(len(presentedQuestionsInTimeline))))
if self.wait_element(totalQuestionNumber, 1, True) == False:
writeToLog("INFO", "FAILED, the total number of question text doesn't match with the total number of questions from the KEA timeline section")
return False
actions = {}
# We verify all the available quiz question pointers, by verifying the quiz number,time stamp and quiz title
for x in range(0, len(presentedQuestionsInTimeline)):
# We take the locator element for the current quiz number
currentQuestion = presentedQuestionsInTimeline[x]
actions["action{0}".format(x)]= ActionChains(self.driver)
# We hover over the current quiz number, in order to verify the elements
try:
actions["action{0}".format(x)].move_to_element(currentQuestion).pause(2).perform()
except Exception:
writeToLog("INFO", "FAILED to hover over the quiz number " + str(x+1) + " during the first try")
# Add redundancy step if unable to select the element during the first try
if self.clickElement(currentQuestion, True) == False:
writeToLog("INFO", "FAILED to click on the quiz number " + str(x+1) + " in order to hover on it during the second try")
return False
if self.setRealTimeMarkerToTime('00:00') == False:
writeToLog("INFO", "FAILED to resume the real time marker to second zero in order to hover on the quiz cue point during the second try")
return False
try:
ActionChains(self.driver).move_to_element(currentQuestion).pause(2).perform()
except Exception:
writeToLog("INFO", "FAILED to hover over the quiz number " + str(x+1) + " after two tries")
return False
# We take the quiz title and time stamp for the current quiz number
currentQuestionDetails = questionDict[str(x+1)]
# We take the presented quiz number, title and time stamp
try:
questionNumberPresented = self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE_QUESTION_NUMBER, 2, True).text
questionTitlePresented = self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE_TITLE, 2, True).text
questionTimestampPresented = self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE_QUESTION_TIMESTAMP, 2, True).text
except Exception:
writeToLog("INFO", "FAILED to find the question details while hovering over the question: " + currentQuestionDetails[2] + " during the first time")
try:
# Take the entry name in order to switch between the KEA Tabs
try:
entryName = self.wait_element(self.KEA_ENTRY_NAME, 60, multipleElements=True).text
except Exception:
writeToLog("INFO", "FAILED to take the Entry Name while trying to verify the timeline section")
return False
# Switch between the kea tabs in order to refresh the elements
if self.launchKEATab(entryName, enums.keaTab.VIDEO_EDITOR, False, 1) == False:
writeToLog("INFO", "FAILED to navigate to the Video Editor in order to try to perform a switch between the tabs and take the Cue Point details")
return False
sleep(3)
if self.launchKEATab(entryName, enums.keaTab.QUIZ, False, 1) == False:
writeToLog("INFO", "FAILED to navigate to the Video Editor in order to try to perform a switch between the tabs and take the Cue Point details")
return False
sleep(7)
# Take the questions from the timeline after switching between KEA Tabs
presentedQuestionsInTimelineSecondTry = self.wait_elements(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE, 1)
currentQuestionSecondTry = presentedQuestionsInTimelineSecondTry[x]
ActionChains(self.driver).move_to_element(currentQuestionSecondTry).pause(2).perform()
questionNumberPresented = self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE_QUESTION_NUMBER, 2, True).text
questionTitlePresented = self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE_TITLE, 2, True).text
questionTimestampPresented = self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE_QUESTION_TIMESTAMP, 2, True).text
except Exception:
writeToLog("INFO", "FAILED to find the question details while hovering over the question: " + currentQuestionDetails[2] + " after two tries")
return False
# We verify that the quiz number, matches with the desired order from the questionDict
# First we verify the question number and then we verify the 'Question' text that its presented
if questionNumberPresented.count(str(x+1)) != 1 and questionNumberPresented.count('Question') != 1:
writeToLog("INFO", "FAILED, the question " + currentQuestionDetails[2] + " was not found at the number " + str(x+1))
return False
# We verify that the presented title, matches with the desired one from the questionDict
if questionTitlePresented != currentQuestionDetails[2]:
writeToLog("INFO", "FAILED, the following question title was presented: " + questionTitlePresented + " instead of " + currentQuestionDetails[2] + " title that has been given in the dictionary")
return False
# We verify that the presented time stamp, matches with the desired one from the questionDict
if questionTimestampPresented != currentQuestionDetails[0]:
writeToLog("INFO", "FAILED, the question " + currentQuestionDetails[2] + " has been found at timestamp : " + questionTimestampPresented + " instead of " + currentQuestionDetails[0])
return False
return True
# @Author: Horia Cus
# This function can change the answer order by drag and drop or shuffle
# changeAnswerOrderDict must contain a list for each question that needs to be modified
# changeAnswerOrderDict = {'1':answerOrderOne}
# This list is used in order to change the answer order using drag and drop
# index 0 = question title that must be found while hovering over the quiz question bubble
# index 1 = question answer that we want to move to a different location
# index 2 = question location where we want to move index 1
# answerOrderOne = ['question #1 Title', 4, 1] ( answer from place four will be moved to the 1st place )
# This list is used in order to verify that the answer options are displayed in the desired order
# answerListOrderOne = ['question #1 option #4', 'question #1 option #1', 'question #1 option #2', 'question #1 option #3']
# This dictionary is used in order to verify the answer list order : verifyAnswerOrderDict = {'1':answerListOrderOne}
# If shuffle == True, there's no need to have an expectedAnswerListDict
def changeAnswerOrder(self, changeAnswerOrderDict, expectedAnswerListDict, shuffle=False, tries=3):
self.switchToKeaIframe()
# Verify that we are in the KEA editor
if self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE, 15, True) == False:
writeToLog("INFO", "FAILED to find any quiz question pointer in the time line section")
return False
# Take all the available quiz question pointers from the timeline KEA section
quizCuePoint = self.wait_elements(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE, 1)
# Iterate each available question
for questionNumber in changeAnswerOrderDict:
# Take the details for the current question
questionDetails = changeAnswerOrderDict[questionNumber]
# Create the locator for the current question
questionCuePoint = quizCuePoint[int(questionNumber) - 1]
action = ActionChains(self.driver)
# Hover over the current question
try:
action.move_to_element(questionCuePoint).pause(1).perform()
except Exception:
writeToLog("INFO", "FAILED to hover over the quiz " + questionDetails[0])
return False
# Take the presented title from the hovered question
questionTitlePresented = self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE_TITLE, 5, True)
# Verify that the question title was presented
if questionTitlePresented == False:
writeToLog("INFO", "FAILED to take the question title")
return False
else:
questionTitlePresented = questionTitlePresented.text
# Verify that the presented title is present also in our question details list
if questionTitlePresented in questionDetails:
# Enter in the quiz question editing screen
if self.clickElement(questionCuePoint) == False:
writeToLog("INFO", "FAILED to select the question cue point for " + questionDetails[0])
return False
numberOfAnswers = str(len(self.wait_elements(self.KEA_TIMELINE_SECTION_DRAG_HAND, 3)))
availableAnswers = self.wait_elements(self.KEA_TIMELINE_SECTION_DRAG_HAND, 3)
if shuffle == True:
count = 0
while True or count <= tries:
# Take the current answer list presented, in order to verify that after we use the shuffle option, the list will change
answerListPresentedFirst = self.extractAnswersListPresented()
# Trigger the shuffle option
if self.click(self.KEA_QUIZ_SHUFFLE_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to click on the shuffle button")
# Verify that the answer order is changed
if self.verifyAnswersOrder(answerListPresentedFirst, shuffle=True) == False:
count += 1
writeToLog("INFO", "During the " + str(count) + " try, the same answer order has been displayed, going to retry")
if count > tries:
writeToLog("INFO", "FAILED, the shuffle option has no functionality")
return False
else:
break
elif shuffle == False:
# Verify that the answer number is available in the presented answer list
if questionDetails[2] > int(numberOfAnswers):
writeToLog("INFO", "FAILED, only " + numberOfAnswers + " number of answers are available, instead of " + str(questionDetails[2]))
return False
# Create the element for the answer that we want to move
answerToBeMoved = availableAnswers[questionDetails[1] - 1]
# Create the element for the place where we want to move our answer
# If we want to move the answer lower, we must specify only the answer that it should replace
if questionDetails[1] < questionDetails[2]:
placeToBeMoved = availableAnswers[questionDetails[2] - 1]
# If we want to move the answer higher, we must specify two location upper than the one where it would be placed
elif questionDetails[1] > questionDetails[2]:
# If the answer should be moved to the first position, we will use quiz question title as pointer
if questionDetails[2] == 1:
placeToBeMoved = self.wait_element(self.KEA_QUIZ_QUESTION_FIELD, 1, True)
else:
placeToBeMoved = availableAnswers[questionDetails[2] - 2]
# Move the answer to the desired new location
try:
action.move_to_element(answerToBeMoved).pause(1).drag_and_drop(answerToBeMoved, placeToBeMoved).perform()
action.reset_actions()
except Exception:
writeToLog("INFO", "FAILED to hover over the quiz " + questionDetails[1])
return False
# Create the list attribute in order to verify that the answer order has been changed successfully
answerList = expectedAnswerListDict[questionNumber]
# Verify that the answer order has been changed successfully
if self.verifyAnswersOrder(answerList) == False:
return False
# Save the changes
if self.saveQuizChanges() == False:
writeToLog("INFO", "FAILED to save the Quiz changes for " + questionDetails[1] + " question")
return False
else:
writeToLog("INFO", "FAILED to find the " + questionDetails[0] + " because the " + questionTitlePresented + " was presented")
return False
writeToLog("INFO", "Quiz answer's order has been changed successfully")
return True
# @Author: Horia Cus
# This function will click on the save button and wait until the changes are saved
def saveQuizChanges(self):
# We save the KEA Quiz Question
if self.keaQuizClickButton(enums.KeaQuizButtons.SAVE) == False:
writeToLog("INFO","FAILED to click on the save button")
return False
# We wait until the changes were successfully saved
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER, 35) == False:
writeToLog("INFO","FAILED to wait until spinner isn't visible")
return False
sleep(1)
return True
# @Author: Horia Cus
# This function will verify that the answer that are presented matches with the answerList
# answerList = contains a list with all the available answers and the correct order
def verifyAnswersOrder(self, answerList, shuffle=False):
# Take the answer list presented
answerListPresented = self.extractAnswersListPresented()
# Verify that the answer list that was first presented, no longer matches with the answer list that is now presented
if shuffle == True:
if answerListPresented == answerList:
writeToLog("INFO", "The shuffle option kept the same structure")
return False
# Verify that the answer list presented, matches with our desired answer list
else:
if answerListPresented != answerList:
writeToLog("INFO", "FAILED, the answer order doesn't match with the answer dictionary")
return False
writeToLog("INFO", "Answer order is properly displayed")
return True
# @Author: Horia Cus
# This function will iterate through each answer field and return a list with all the available answers in the order that they were found
def extractAnswersListPresented(self):
answerFields = self.wait_elements(self.KEA_QUIZ_ANSWER_GENERAL, 1)
answerListPresented = []
if answerFields == False:
writeToLog("INFO", "FAILED to take the elements for the answers")
return False
# We iterate throguh each available answer field
for x in range(0, len(answerFields)):
# Select the answer input field
if self.clickElement(answerFields[x]) == False:
writeToLog("INFO", "FAILED to click on the answer field")
return False
# Copy the answer text in clipboard
self.send_keys_to_element(answerFields[x], Keys.CONTROL + 'a')
self.send_keys_to_element(answerFields[x], Keys.CONTROL + 'c')
# Take the answer text from clipboard
answerText = self.clsCommon.base.paste_from_clipboard()
# Add the answer text to answer list
answerListPresented.append(answerText)
return answerListPresented
# @Author: Horia Cus
# This function can change the question order from the KEA timeline section by moving in forward and / or backwards
# changeTimelineOrderDict = is a dictionary that contains as key the Quiz Number and as value the amount of seconds that we want to move the question forward and / or backwards
# e.g changeTimelineOrderDict = {'1':3, '2':2, '3':1}, question one will be moved by three seconds
# This function will not change the timeline properly if the zoom in / zoom out has been used
def changeQuestionOrderInTimeline(self, changeTimelineOrderDict):
self.switchToKeaIframe()
# Verify that we are in the KEA editor
if self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE, 15, True) == False:
writeToLog("INFO", "FAILED to find any quiz question pointer in the time line section")
return False
sleep(5)
# Iterate each available question
for questionNumber in changeTimelineOrderDict:
# Take all the available quiz question pointers from the timeline KEA section
quizCuePoint = self.wait_elements(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE, 15)
# Take the details for the current question ( number of seconds that the quiz should be moved by )
questionDetails = changeTimelineOrderDict[questionNumber]
# Create the locator for the current question
questionCuePoint = quizCuePoint[int(questionNumber) - 1]
# Move the quiz number to a new timeline location
try:
ActionChains(self.driver).move_to_element(questionCuePoint).click().pause(2).drag_and_drop_by_offset(None,35.7*questionDetails, 0).perform()
except Exception:
writeToLog("INFO", "FAILED to move question number " + str(questionNumber) + " by " + str(questionDetails) + " seconds")
return False
# Save the new timeline location
if self.saveQuizChanges() == False:
writeToLog("INFO", "FAILED to save the new timeline location for " + str(questionNumber) + " question number")
return False
if self.clickDone(enums.KeaQuizButtons.EDIT_QUIZ) == False:
writeToLog("INFO", "FAILED to save the new timeline location for " + str(questionNumber) + " question number")
return False
return True
# @Author: Horia Cus
# This function will verify that the entry can be played in KEA Editor and KEA Quiz page
# Verify that the entire entry can be watched from the beginning till the end
# Increment tries by one for each 15 seconds of the entry ( e.g Entry = 30 seconds, tries=2 )
def verifyPlayingProcess(self, tries=2):
self.switchToKeaIframe()
# Verify that we are in the KEA editor
if self.wait_element(self.KEA_PLAYER_CONTROLS_PLAY_BUTTON, 15, True) == False:
writeToLog("INFO", "FAILED to find the KEA player play button")
return False
# Take the entry time
entryTotalTime = self.wait_element(self.EDITOR_TOTAL_TIME, 1, True).text.replace(" ", "")[1:]
# Because the video resumes back to zero before the last second to be displayed, we have to issue this variable
if entryTotalTime[3:] == '00':
# with changes to the mm
entryTotalTimeVerify = str(int(entryTotalTime[:2])-1) + ':59'
else:
# with changes to ss
entryTotalTimeVerify = entryTotalTime[:3] + str(int(entryTotalTime[3:])-1)
# Time presented inside the timeline cursor
realTimeMarker = self.wait_element(self.EDITOR_REALTIME_MARKER, 1, True).text[:5]
# Verify if we are at the beginning of the entry
if self.resumeFromBeginningKEA(forceResume=False) == False:
writeToLog("INFO", "FAILED to start the entry from the beginning")
return False
# Trigger the playing process
if self.click(self.KEA_PLAYER_CONTROLS_PLAY_BUTTON, 2, True) == False:
writeToLog("INFO", "FAILED to click on the KEA play button")
return False
sleep(1)
# Wait until the loading spinner is no longer present
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER_QUIZ_PLAYER, 30) == False:
writeToLog("INFO", "FAILED to load the KEA entry video playing process")
return False
# Set the real time to second one
x = 1
realtTimeMakerElement = self.wait_element(self.EDITOR_REALTIME_MARKER, 1, True)
# Wait until the timeline cursor reached the first second
startTimeLine = '00:00'
while startTimeLine == realTimeMarker:
startTimeLine = realtTimeMakerElement.text[:5]
# Because the speed for the playing process is higher than the loop run, we increment the number of tries by one for each 15 seconds
attempt = 0
# We let the playing process to run until we reach the end of the entry
while realTimeMarker != entryTotalTimeVerify:
# We take the presented time from timeline cursor
realTimeMarkerUpdated = realtTimeMakerElement.text[:5]
# We take the real time based on 1 second of sleep and number of iteration from X
realTime = str(datetime.timedelta(seconds=x))[2:]
writeToLog("INFO", "AS Expected, Current time present in the marker " + str(realTimeMarkerUpdated) + " real time expected" + realTime)
# Verify that the presented time from the timeline cursor, matches with the expected time
if realTimeMarkerUpdated != realTime:
attempt += 1
writeToLog("INFO", "AS Expected, Presented time " + realTimeMarkerUpdated + " expected" + realTime + " during the " + str(attempt) + " attempt")
# For each 15 seconds, 1 try should be passed, if the number of attempts is higher than tries, will return false
if attempt > tries:
writeToLog("INFO", "Timeline time was: " + realTimeMarkerUpdated + " and " + realTime + " was expected")
return False
# Take the presented time from the timeline cursor in order to compare it with entry total time
realTimeMarker = realtTimeMakerElement.text[:5]
else:
# Take the presented time from the timeline cursor in order to compare it with entry total time
realTimeMarker = realtTimeMakerElement.text[:5]
sleep(1)
# Increment the real time by one for each run
x += 1
writeToLog("INFO", "The entire entry has been successfully watched")
return True
# @Author: Horia Cus
# This function will refresh the page if the playing process is already started or if the user is at a different time within the timeline than zero
def resumeFromBeginningKEA(self, forceResume=False):
self.switchToKeaIframe()
# Verify if any of the elements that indicates if the user is at the beginning of entry is present or not
if self.wait_element(self.EDITOR_REALTIME_MARKER, 1, True).text != '00:00.00' or self.wait_element(self.KEA_PLAYER_CONTROLS_PAUSE_BUTTON, 1) != False or forceResume == True:
# Refresh the page
self.driver.refresh()
# Change the iframe to default in order to be able to resume to KEA iframe
self.clsCommon.base.switch_to_default_content()
self.switchToKeaIframe()
# Verify that the KEA page has been loaded successful
if self.wait_element(self.KEA_PLAYER_CONTROLS_PLAY_BUTTON, 15, True) == False:
writeToLog("INFO", "FAILED to find the KEA player play button")
return False
return True
# @Author: Horia Cus
# This function verifies that the user is able to navigate forward and backwards to each quiz question using navigation buttons
# Verify that the proper question number and time stamp are displayed
def verifyKEANavigation(self):
self.switchToKeaIframe()
# Verify that we are in the KEA Page
if self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE, 30, True) == False:
writeToLog("INFO", "FAILED to find any quiz question pointer in the time line section")
return False
# Verify if we are at the beginning of the entry
if self.resumeFromBeginningKEA(forceResume=False) == False:
writeToLog("INFO", "FAILED to start the entry from the beginning")
return False
# We take all the available quiz question pointers from the timeline KEA section
presentedQuestionsInTimeline = self.wait_elements(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE, 1)
presentedQuestionDict = {}
# Navigate to each Quiz Question using next option
for x in range(0, len(presentedQuestionsInTimeline)):
# Navigate to the next available question
if self.click(self.KEA_PLAYER_CONTROLS_NEXT_ARROW_BUTTON, 15, True) == False:
writeToLog("INFO", "FAILED to navigate to the " + str(x+1) + " question field")
return False
# Take the details for the current question
try:
presentedQuestionNumber = self.wait_element(self.KEA_ADD_NEW_QUESTION_NUMBER, 5, True).text
presentedQuestionTime = self.wait_element(self.EDITOR_REALTIME_MARKER, 5, True).text
except Exception:
writeToLog("INFO", "FAILED to take the details after returning back to the first question")
return False
# Verify that the expected question number is displayed
if presentedQuestionNumber.count(str(x+1)) == 0:
writeToLog("INFO", "FAILED, question number " + str(x+1) + " was expected, instead " + presentedQuestionNumber + " is presented")
return False
# Add the Question details inside the dictionary
presentedQuestionDict.update({presentedQuestionNumber:presentedQuestionTime})
# Navigate back to the first question, using next arrow at the end of the last available question
if self.click(self.KEA_PLAYER_CONTROLS_NEXT_ARROW_BUTTON, 15, True) == False:
writeToLog("INFO", "FAILED to navigate to the " + str(x+1) + " question field")
return False
# Take the details back from the first question
try:
presentedQuestionNumber = self.wait_element(self.KEA_ADD_NEW_QUESTION_NUMBER, 5, True).text
presentedQuestionTime = self.wait_element(self.EDITOR_REALTIME_MARKER, 5, True).text
except Exception:
writeToLog("INFO", "FAILED to take the details after returning back to the first question")
return False
# Verify that the user was resumed back to the first question
if presentedQuestionNumber.count('1') != 1:
writeToLog("INFO", "FAILED, to resume to the first question after iterating all of the available Quiz Questions")
return False
# Verify that the first time stamp that was presented matches with the current one
if presentedQuestionDict[presentedQuestionNumber] != presentedQuestionTime:
writeToLog("INFO", "FAILED, at first the " + presentedQuestionDict[presentedQuestionNumber] + " was present and now " + presentedQuestionTime + " time is presented")
return False
presentedQuestionDictPrevious = {}
i = len(presentedQuestionDict)
for x in range(0, len(presentedQuestionDict)):
# Navigate to the previous question
if self.click(self.KEA_PLAYER_CONTROLS_PREVIOUS_ARROW_BUTTON, 5, True) == False:
writeToLog("INFO", "FAILED to navigate using previous arrow")
return False
# Take the details for the current question
try:
presentedQuestionNumber = self.wait_element(self.KEA_ADD_NEW_QUESTION_NUMBER, 5, True).text
presentedQuestionTime = self.wait_element(self.EDITOR_REALTIME_MARKER, 5, True).text
except Exception:
writeToLog("INFO", "FAILED to take the details after returning back to the first question")
return False
# Verify that the expected question number is displayed
if presentedQuestionNumber.count(str(i)) == 0:
writeToLog("INFO", "FAILED, question number " + str(x+1) + " was expected, instead " + presentedQuestionNumber + " is presented")
return False
# Add the Question details inside the dictionary
presentedQuestionDictPrevious.update({presentedQuestionNumber:presentedQuestionTime})
i -= 1
# Verify that all the available Quiz Question were navigated forward and backward
if len(presentedQuestionDict) != len(presentedQuestionsInTimeline) or len(presentedQuestionDictPrevious) != len(presentedQuestionsInTimeline):
writeToLog("INFO", "FAILED " + str(len(presentedQuestionDict)) + " questions were found while navigating forward, " + str(len(presentedQuestionDictPrevious)) + " were found while navigating backwards, and " + len(presentedQuestionsInTimeline) + " were presented" )
return False
writeToLog("INFO", "PASSED, all the quiz questions were properly navigated forward and backward")
return True
# @Author: Horia Cus
# This function will verify the KEA Timeline section and Navigation while using the zoom option
# Zoom option will be used by using the Zoom Level pointer forward and backwards
# Question Cue Point distance is verified after each zoom in call
# KEA Timline Container size is verified after each zoom in call
# KEA Zoom Level Pointer is verified after each zoom in call
def verifyZoomLevelInTimeline(self):
self.switchToKeaIframe()
# Verify that we are in the KEA Page
if self.wait_element(self.KEA_TIMELINE_SECTION_CONTAINER, 30, True) == False:
writeToLog("INFO", "FAILED to find the KEA Timeline section")
return False
# Taking the default size of the timeline container
containerDefaultSize = self.wait_element(self.KEA_TIMELINE_SECTION_CONTAINER, 30, True).size['width']
# Taking the zoom level elements
zoomLevelDefault = (self.KEA_TIMELINE_CONTROLS_ZOOM_LEVEL_POINTER_VALUE[0], self.KEA_TIMELINE_CONTROLS_ZOOM_LEVEL_POINTER_VALUE[1].replace('VALUE', str(0)))
# zoomInButtonElement = self.wait_element(self.KEA_TIMELINE_CONTROLS_ZOOM_IN_BUTTON, 5, True)
zoomOutButtonElement = self.wait_element(self.KEA_TIMELINE_CONTROLS_ZOOM_OUT_BUTTON , 5, True)
zoomLevelPointer = self.wait_element(self.KEA_TIMELINE_CONTROLS_ZOOM_LEVEL_POINTER, 5, True)
# Verify that we are at the beginning of the timeline section
if self.wait_element(zoomLevelDefault, 5, True) == False:
writeToLog("INFO", "FAILED to find the KEA Timeline Zoom Level Pointer at the initial location ( beginning )")
return False
action = ActionChains(self.driver)
# Create a list that will be used in order to compare the initial size of a Question with the updated one after using the zoom option
questionListPresented = []
# Use the zoom option until reaching the maximum length available
for x in range(0,15):
# Incrementing the zoom in option
zoomInPosition = x*5
# Taking the Zoom Pointer location before using the zoom option
pointerInitial = self.wait_element(self.KEA_TIMELINE_CONTROLS_ZOOM_LEVEL_POINTER, 1, True).location['x']
# Taking the Timeline Container size before using the zoom option
containerInitialSize = int(self.wait_element(self.KEA_TIMELINE_SECTION_CONTAINER, 30, True).size['width'])
# Verify if Questions are created within the timeline section
if self.wait_elements(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE_CONTAINER, 1) != False:
# Take all the available questions
presentedQuestions = self.wait_elements(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE_CONTAINER, 1)
# Add question's position inside a list
for i in range (0, len(presentedQuestions)):
# Take the location attribute from the active question
presentedQuestion = presentedQuestions[i].get_attribute('style').split()[1]
# Convert the location attribute in integer
presentedQuestionPosition = int(re.search(r'\d+', presentedQuestion).group())
# Add the question location inside the question List
questionListPresented.append(presentedQuestionPosition)
# Verify that at least one time the Zoom option has been used
if len(questionListPresented) > len(presentedQuestions):
# Comparing the last two sets of Questions ( before zoom option / after zoom option) and verify that the second set has a higher location
# Take the last before / after zoom in list elements
compareList = questionListPresented[-len(presentedQuestions)*2:]
for k in range(0,len(presentedQuestions)-1):
# Verify that the list that was created after zoom option has a higher location than the list that was created before that
if compareList[k] > compareList[k+len(presentedQuestions)]:
writeToLog("INFO", "FAILED, question number " + str(k+1) + " has been found at " + str(questionListPresented[k]) + " before zoom in, and at " + str(questionListPresented[k+len(presentedQuestions)]) + " after zoom in")
return False
# Use the zoom in option, by drag and drop of Zoom Level Pointer element
try:
action.move_to_element(zoomLevelPointer).click_and_hold(zoomLevelPointer).move_to_element_with_offset(zoomOutButtonElement, 45+zoomInPosition, 0).release().pause(1).perform()
sleep(2)
except Exception:
writeToLog("INFO", "FAILED to use zoom in option properly at the " + str(x+1) + " try")
return False
# Take the Location for Zoom option Pointer after using the zoom in option
pointerUpdated = self.wait_element(self.KEA_TIMELINE_CONTROLS_ZOOM_LEVEL_POINTER, 1, True).location['x']
# Take the Timeline Container size after using the zoom in option
containerUpdatedSize = int(self.wait_element(self.KEA_TIMELINE_SECTION_CONTAINER, 30, True).size['width'])
# Verify that the Timeline pointer location has been changed after using the zoom in option
if pointerInitial >= pointerUpdated:
writeToLog("INFO", "FAILED, Zoom Level pointer was set at " + str(pointerInitial) + " and we expected " + str(pointerUpdated) + " or higher value")
return False
# Verify that the Timeline container size is bigger after using the zoom in option
if containerInitialSize >= containerUpdatedSize:
writeToLog("INFO", "FAILED, Zoom Level container was set at " + str(pointerInitial) + " and we expected " + str(pointerUpdated))
return False
# Use the zoom out option, by drag and drop of Zoom Level Pointer element, in order to reach zero state
try:
action.move_to_element(zoomLevelPointer).click_and_hold(zoomLevelPointer).move_to_element(zoomOutButtonElement).release(zoomLevelPointer).perform()
except Exception:
writeToLog("INFO", "FAILED to use zoom out option till the end")
return False
sleep(2)
containerZoomedOutSize = int(self.wait_element(self.KEA_TIMELINE_SECTION_CONTAINER, 30, True).size['width'])
# Verify that the Initial Timeline Container size is the same after using the zoom in and zoom out option
if containerDefaultSize != containerZoomedOutSize:
writeToLog("INFO", "FAILED, the default Timeline size container was " + str(containerDefaultSize) + " and after we moved back to the initial position using zoom out option, it was " + str(containerZoomedOutSize))
return False
# Verify that the Zoom Level pointer is displayed back at the beginning of the bar
if self.wait_element(zoomLevelDefault, 5, True) == False:
writeToLog("INFO", "FAILED, the Zoom Level Pointer was not set back to the original position")
return False
writeToLog("INFO", "Zoom Level has been successfully verified inside the KEA timeline section")
return True
# @Author: Horia Cus
# This function can delete any available Question displayed in the KEA Timeline section
# questionDeleteList must contain only the string of the Question Title
# E.g questionDeleteList = ['Quesion Title 1', 'Question Title 5']
def deleteQuestions(self, questionDeleteList):
self.switchToKeaIframe()
# Verify that we are in the KEA Page
if self.wait_element(self.KEA_TIMELINE_SECTION_CONTAINER, 30, True) == False:
writeToLog("INFO", "FAILED to find the KEA Timeline section")
return False
# Take all the available quiz question cue points from the KEA Timline section
presentedInitialCuePointsInTimeline = self.wait_elements(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE, 1)
presentedUpdatedCuePointsInTimeline = self.wait_elements(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE, 1)
i = 0
# Iterate through each Question that needs to be deleted
for questionToBeDeleted in questionDeleteList:
# Used in order to verify the number of Questions in Timeline section after we delete them
i += 1
# Iterate through each Question Cue Point until finding the desired Question and delete it
for x in range(0, len(presentedUpdatedCuePointsInTimeline)):
# Create the element for the current Question Cue Point
questionCuePoint = presentedUpdatedCuePointsInTimeline[x]
# Hover over the current Question Cue Point
try:
ActionChains(self.driver).move_to_element(questionCuePoint).pause(2).perform()
except Exception:
writeToLog("INFO", "FAILED to hover over the quiz " + str(x+1) + " question number Cue Point during the first try")
return False
# Take the presented title from the hovered Cue Point
questionTitlePresented = self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE_TITLE, 5, True)
# Verify that the Question Title is presented while hovering over the Cue Point
if questionTitlePresented == False:
for x in range(0,5):
# Add a redundancy step for action chain
try:
ActionChains(self.driver).move_to_element(questionCuePoint).pause(2).perform()
except Exception:
writeToLog("INFO", "FAILED to hover over the quiz " + str(x+1) + " question number Cue Point during the second try")
return False
questionTitlePresented = self.wait_element(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE_TITLE, 5, True)
if questionTitlePresented == False:
writeToLog("INFO", "FAILED to take the question title from the question cue point number " + str(x+1))
return False
else:
break
else:
try:
# Take the Question Title presented on the hovered Cue Point
questionTitlePresented = questionTitlePresented.text
except Exception:
writeToLog("INFO", "FAILED to take the Question Title from the cue point number " + str(x+1) + " while using the element")
return False
# Verify if the Question Title Presented matches with Question Title from given List
if questionTitlePresented == questionToBeDeleted:
# Access the Question Editing page
if self.clickElement(questionCuePoint) == False:
writeToLog("INFO", "FAILED to select the question cue point number " + str(x+1))
return False
# Delete the Question
deleteButton = (self.KEA_QUIZ_BUTTON[0], self.KEA_QUIZ_BUTTON[1].replace('BUTTON_NAME', 'Delete'))
if self.click(deleteButton, 5, True) == False:
writeToLog("INFO", "FAILED to click on the Question delete button")
return False
# Wait for two seconds in order to give time for the Cue Point to disappear from KEA Timeline section and break from the loop
sleep(2)
break
else:
# Verify if the Question that needs to be deleted was found within the maximum amount of tries
if x+1 == len(presentedUpdatedCuePointsInTimeline):
writeToLog("INFO", "FAILED to find the question: " + questionToBeDeleted + " inside the KEA Timeline section")
return False
# Take the current number of Question Cue Points
presentedUpdatedCuePointsInTimeline = self.wait_elements(self.KEA_TIMELINE_SECTION_QUESTION_BUBBLE, 1)
# Verify that the KEA Timeline section has been updated properly with the correct number of Question Cue Points
if len(presentedUpdatedCuePointsInTimeline)+i != len(presentedInitialCuePointsInTimeline):
writeToLog("INFO", "FAILED, after we deleted question " + questionToBeDeleted + " we expected " + len(presentedUpdatedCuePointsInTimeline) + " question numbers in timeline, but " + len(presentedInitialCuePointsInTimeline) + " are displayed")
return False
questionsDeleted = ", ".join(questionDeleteList)
writeToLog("INFO","The following Questions were deleted: " + questionsDeleted)
return True
# @Author: Horia Cus
# This function can use any action option available for the KEA Editor ( Undo, Redo and Reset)
# It will verify that the KEA Section is presented with the proper time length, size and number of sections based on the action selected
# actionToBePerformed must contain enums ( e.g enums.KeaEditorTimelineOptions.REDO )
def timeLineUndoRedoReset(self, actionToBePerformed):
self.switchToKeaIframe()
# Verify that we are in the KEA Page
if self.wait_element(self.KEA_TIMELINE_SECTION_CONTAINER, 30, True) == False:
writeToLog("INFO", "FAILED to find the KEA Timeline section")
return False
# Take the KEA Timeline section details before performing any action
presentedSectionsInitial = self.wait_elements(self.KEA_TIMELINE_PRESENTED_SECTIONS, 5)
sectionListSizesInitial = []
sectionListTotalTimeInitial = []
presentedTotalTimeInitial = self.wait_element(self.EDITOR_TOTAL_TIME_TOOLBAR, 2).text.replace('Total:', '')[1:6]
# Take the size of section's container and the time length displayed within each presented section
for x in range(0, len(presentedSectionsInitial)):
sectionListSizesInitial.append(int(presentedSectionsInitial[x].size['width']))
sectionListTotalTimeInitial.append(presentedSectionsInitial[x].text)
# Verify the action that needs to be performed
if actionToBePerformed == enums.KeaEditorTimelineOptions.RESET:
# Verify that there are elements that can be reset
if len(presentedSectionsInitial) > 1 or sectionListSizesInitial[x] < 1065:
# Click on the 'Reset' action button
if self.click(self.EDITOR_TIMELINE_OPTION_RESET, 3, True) == False:
writeToLog("INFO", "FAILED to click on the Reset button")
return False
# Verify if the confirmation pop up is displayed
if self.wait_element(self.KEA_CONFIRMATION_POP_UP_TITLE, 3, True) != False:
# Confirm the Reset action
if self.click(self.KEA_CONFIRMATION_POP_UP_CONTINUE, 1, True) == False:
writeToLog("INFO", "FAILED to confirm the Reset option changes")
return False
else:
writeToLog("INFO", "No confirmation pop up was presented for the reset action")
# Take the elements for the presented sections
presentedSectionsUpdated = self.wait_elements(self.KEA_TIMELINE_PRESENTED_SECTIONS, 5)
presentedTotalTimeUpdatedForSection = presentedSectionsUpdated[0].text[:5]
presentedTotalTimeUpdated = self.wait_element(self.EDITOR_TOTAL_TIME_TOOLBAR, 2).text.replace('Total:', '')[1:6]
# Verify that only one section is presented
if len(presentedSectionsUpdated) != 1:
writeToLog("INFO", "FAILED, " + len(presentedSectionsUpdated) + " sections are displayed, instead of only one, after using the reset option")
return False
# Verify that the present section's time matches with the total time of the entry
if presentedTotalTimeUpdated != presentedTotalTimeUpdatedForSection:
writeToLog("INFO", "FAILED, in the timeline section, ")
return False
# Return false if no 'Reset' action can be performed
else:
writeToLog("INFO", "FAIFAILED, there's nothing to reset because no changes were performed within the timeline section")
return False
# Verify the action that needs to be performed
elif actionToBePerformed == enums.KeaEditorTimelineOptions.REDO:
# Click on the 'Redo' action button
if self.click(self.EDITOR_TIMELINE_OPTION_REDO, 3, True) == False:
writeToLog("INFO", "FAILED to click on the Redo button")
return False
# Wait one second to make sure that the UI is updated
sleep(1)
# Take the KEA timeline section details after using the 'Redo' option
presentedSectionsUpdated = self.wait_elements(self.KEA_TIMELINE_PRESENTED_SECTIONS, 5)
presentedTotalTimeUpdated = self.wait_element(self.EDITOR_TOTAL_TIME_TOOLBAR, 2).text.replace('Total:', '')[1:6]
sectionListSizesUpdated = []
sectionListTotalTimeUpdated = []
# Take the size of section's container and the time length displayed from each presented section
for x in range(0, len(presentedSectionsUpdated)):
sectionListSizesUpdated.append(int(presentedSectionsUpdated[x].size['width']))
sectionListTotalTimeUpdated.append(presentedSectionsUpdated[x].text)
# Verify the 'Redo' action after a Delete
# Verify that a section has been deleted after using the 'Redo' action
if len(presentedSectionsUpdated) == len(presentedSectionsInitial) - 1:
# Verify that the updated total time for the entry is lower after a section has been deleted
if presentedTotalTimeUpdated > presentedTotalTimeInitial:
writeToLog("INFO", "FAILED, the presented total time after using the Redo option for a Delete action, is not lower than the initial total time" )
return False
# Verify the 'Redo' action after a Set In / Set Out
elif len(presentedSectionsUpdated) == len(presentedSectionsInitial):
# Verify that a section has been set out / set in by checking its size ( set out / set in section remains at 2px width only after a split )
for x in range(0, len(sectionListSizesUpdated)):
if sectionListSizesUpdated[x] < sectionListSizesInitial[x]:
break
if x + 1 == len(sectionListSizesInitial):
writeToLog("INFO", "FAILED, no section has been set out / set in after using the 'Redo' action")
return False
# Verify that the time length of the entry has been decreased after resuming a set out / set in section
if presentedTotalTimeUpdated >= presentedTotalTimeInitial:
writeToLog("INFO", "FAILED, the presented total time after using the Redo option for a Set Out / Set In action, is not higher than the initial total time" )
return False
# Verify the 'Redo' action after a Split
# Verify that the number of sections has been increased by one after using the 'Redo' action
elif len(presentedSectionsUpdated) == len(presentedSectionsInitial) + 1:
tries = 1
# Verify that the size for at least two section has been decreased after using the 'Redo' action in order to re-split the sections
for updatedSize in sectionListSizesUpdated:
# Verify that the updatedSize for the section that has been re-split has been changed
if updatedSize in sectionListSizesInitial:
if len(sectionListSizesUpdated) == tries:
writeToLog("INFO", "FAILED, section size has not been properly changed after a split performed by 'Redo'")
return False
tries += 1
tries = 1
# Verify that the time length for at least two section has been decreased after using the 'Redo' action in order to re-split the sections
for updatedTime in sectionListTotalTimeUpdated:
# Verify that the updatedTime for the section that has been re-split has been changed
if updatedTime in sectionListTotalTimeInitial:
if len(sectionListTotalTimeUpdated) == tries:
writeToLog("INFO", "FAILED, section time length has not been properly changed after a split performed by 'Redo'")
return False
tries += 1
# Verify the action that needs to be performed
elif actionToBePerformed == enums.KeaEditorTimelineOptions.UNDO:
# Verify that at least one undo input can be performed for the current state
if len(presentedSectionsInitial) > 1 or sectionListSizesInitial[x] < 1065:
# Click on the 'Undo' action button
if self.click(self.EDITOR_TIMELINE_OPTION_UNDO, 3, True) == False:
writeToLog("INFO", "FAILED to click on the Undo button")
return False
# Wait one second to make sure that the UI is updated
sleep(1)
# Take the KEA timeline section details after using the undo option
presentedSectionsUpdated = self.wait_elements(self.KEA_TIMELINE_PRESENTED_SECTIONS, 5)
sectionListSizesUpdated = []
sectionListTotalTimeUpdated = []
# Take the size of section's container and the time length displayed from each presented section
for x in range(0, len(presentedSectionsUpdated)):
sectionListSizesUpdated.append(int(presentedSectionsUpdated[x].size['width']))
sectionListTotalTimeUpdated.append(presentedSectionsUpdated[x].text)
# Verify the 'Undo' action after a SPLIT
# Verify that a section has been combined after using the 'Undo' action
if len(presentedSectionsUpdated) == len(presentedSectionsInitial) - 1:
# Verify that the section's container size has been increased after using the 'Undo' action
for x in range(0, len(presentedSectionsUpdated)):
# Compare the initial size with the updated size and verify that the updated size for at least one section's container has been increased
if sectionListSizesUpdated[x] > sectionListSizesInitial[x]:
writeToLog("INFO", "Size has been increased for a section, after using the undo option, after a split")
break
# Verify that we were able to find an increase in size within the number of presented sections
if x + 1 == len(presentedSectionsUpdated):
writeToLog("INFO", "FAILED, after using the split option and then undo, the same number of sections were displayed")
return False
# Verify that the section's time length has been increased after using the 'Undo' action
for x in range(0, len(presentedSectionsUpdated)):
# Compare the initial time length with the updated time length and verify that the time length for the updated section has been increased
if sectionListTotalTimeUpdated[x] > sectionListTotalTimeInitial[x]:
writeToLog("INFO", "Time length has been increased for a section, after using the undo option, after a split")
break
# Verify that we were able to find an increase in time length within the number of presented sections
else:
if x + 1 == len(presentedSectionsUpdated):
writeToLog("INFO", "FAILED, after splitting a section and then using the undo option, section time length didn't increased")
return False
# Verify the 'Undo' option after a Set In and / or Set Out
# Verify that the same number of the presented section remained after using the 'Undo' action
elif len(presentedSectionsUpdated) == len(presentedSectionsInitial):
# Verify that the size of at least one presented section has been increased after combining a set out / set in section
for x in range(0, len(presentedSectionsUpdated)):
if sectionListTotalTimeUpdated[x] > sectionListTotalTimeInitial[x]:
writeToLog("INFO", "Time length has been increased for a section, after using the undo option")
break
# Verify that we were able to find an increase in size for at least one presented section within the number of presented sections
else:
if x + 1 == len(presentedSectionsUpdated):
writeToLog("INFO", "FAILED, section time has not been increased for any available section, after using the undo option")
return False
# Verify the 'Undo' option after a Delete
# Verify that the number of sections has been increased by one than the initial presented section
elif len(presentedSectionsUpdated) == len(presentedSectionsInitial) + 1:
# Verify that the time length information has been presented for each presented section
if len(sectionListTotalTimeUpdated) != len(sectionListTotalTimeInitial) + 1:
writeToLog("INFO", "FAILED, after the user deleted a section and used the Undo option, the previously section was not resumed")
return False
# Return False if no criteria was match for the 'Undo' action
else:
writeToLog("INFO", "FAILED, no undo changes were performed")
return False
# Return False if no elements can be undo
else:
writeToLog("INFO", "FAILED, no undo option is available for the current state")
return False
writeToLog("INFO", "KEA Timeline option has been successfully changed using the " + actionToBePerformed.value + " option")
return True
# @Author: Horia Cus
# This function will save the current entry with the latest changes performed within the KEA Editor Timeline
# if saveCopy = False, it will save the current entry with the latest changes performed
# if saveCopy = True, it will save the changes from the entry in a new entry
def saveEditorChanges(self, saveCopy=False):
self.switchToKeaIframe()
# Save the current entry with all the changes that were performed within the KEA Timeline section
if saveCopy == False:
if self.click(self.EDITOR_SAVE_BUTTON) == False:
writeToLog("INFO","FAILED to click on the save Button from KEA Editor")
return False
if self.click(self.EDITOR_SAVE_BUTTON_CONF, 1, True) == False:
writeToLog("INFO","FAILED to confirm the save pop up from KEA Editor")
return False
if self.wait_element(self.EDITOR_SAVED_MSG, 360) == False:
writeToLog("INFO","FAILED, ""Media was successfully saved."" - was not presented within the 360 seconds")
return False
if self.click(self.EDITOR_SAVED_OK_MSG, multipleElements=True) == False:
writeToLog("INFO","FAILED to dismiss the confirmation pop up by clicking on the OK button")
return False
# Save the changes that were performed within the KEA Timeline section as a new entry
elif saveCopy == True:
if self.click(self.EDITOR_SAVE_A_COPY_BUTTON) == False:
writeToLog("INFO","FAILED to click on the Save a Copy button from KEA Editor")
return False
if self.click(self.EDITOR_CREATE_BUTTON, 1, True) == False:
writeToLog("INFO","FAILED to confirm the save pop up from KEA Editor")
return False
if self.wait_element(self.EDITOR_SUCCESS_MSG, 360) == False:
writeToLog("INFO","FAILED, ""Media was successfully saved."" - was not presented within the 360 seconds")
return False
writeToLog("INFO", "Changes from KEA Editor timeline were saved properly")
return True
# @Author: Horia Cus
# This function will compare the entry length from Entry Page and KEA Page
def compareEntryDurationInKeaAndEntryPage(self, entryName, expectedDuration):
self.switch_to_default_content()
# Navigate to the entry page
if self.clsCommon.navigateTo(navigateTo=enums.Location.ENTRY_PAGE, navigateFrom=enums.Location.MY_MEDIA, nameValue=entryName) == False:
writeToLog("INFO", "FAILED to navigate to the Entry Page for " + entryName + " entry")
return False
# Wait if the media is still being processed
if self.clsCommon.entryPage.waitTillMediaIsBeingProcessed() == False:
writeToLog("INFO", "FAILED to wait until the " + entryName + " has been processed")
return False
self.clsCommon.player.switchToPlayerIframe()
# Take the entry time length from the player
try:
entryDurationInEntryPage = self.wait_element(self.clsCommon.player.PLAYER_TOTAL_VIDEO_LENGTH, 75).text.replace('/','').strip()
except Exception:
writeToLog("INFO", "FAILED to take the entry duration from Entry Page for " + entryName + " entry")
return False
# Going to add one more 0 at the beginning of the entry length if the video has less than 9 minutes in order to match the structure from KEA Page
if len(entryDurationInEntryPage) == 4 and entryDurationInEntryPage[0] == '0':
entryDurationInEntryPage = '0'+ entryDurationInEntryPage
self.switch_to_default_content()
# Navigate to the KEA Editor
if self.launchKEA(entryName, navigateTo=enums.Location.ENTRY_PAGE, navigateFrom=enums.Location.MY_MEDIA) == False:
writeToLog("INFO", "FAILED to navigate to the KEA page for " + entryName + " entry")
return False
self.switchToKeaIframe()
# Take the entry time length from the KEA section
try:
entryDurationInKea = self.wait_element(self.EDITOR_TOTAL_TIME, 60).text.replace('/','').strip()
except Exception:
writeToLog("INFO", "FAILED to take the entry duration from Entry Page for " + entryName + " entry")
return False
self.switch_to_default_content()
# Verify that the entry time length it's the same in both KEA Editor and Entry Page
if entryDurationInEntryPage != entryDurationInKea:
writeToLog("INFO","FAILED, " + entryDurationInEntryPage + " time has been presented in Entry Page and " + entryDurationInKea + " in KEA page" )
return False
if entryDurationInEntryPage != expectedDuration:
writeToLog("INFO","FAILED, " + entryDurationInEntryPage + " time has been presented in Entry Page and KEA but " + expectedDuration + " was expected" )
return False
writeToLog("INFO", "Entry duration matches in both Entry Page and KEA Page")
return True
# @Author: Horia Cus
# hotspotList must contain the following structure ['Hotspot Title', enums.keaLocation.Location, startTime, endTime, 'link.address', enums.textStyle.Style, 'font color code', 'background color code', text size, roundness size, container size]
# A hotspot list may contain only the hotspot title
# For the link.address we can have a web page ( e.g https://6269.qakmstest.dev.kaltura.com/ ) and also a time location ( e.g 90, which will translate into 01:30 )
# If you want to specify only the Title, Location, and Text Size you can put '' string at the options that you don't want to be changed
# hotspotOne = ['Hotspot Title One', enums.keaLocation.TOP_RIGHT, 0, 10, 'https://autoone.kaltura.com/', enums.textStyle.BOLD, '#fafafa', '#fefefe', '', '', enums.keaHotspotContainerSize.SMALL]
# hotspotTwo = ['Hotspot Title Two', enums.keaLocation.TOP_LEFT, 5, 15, '', enums.textStyle.NORMAL, '', '', 12, 12]
# hotspotThree = ['Hotspot Title Three', enums.keaLocation.CENTER, 15, 20, 'https://autothree.kaltura.com/', enums.textStyle.THIN, '', '', 12, 12]
# hotspotFour = ['Hotspot Title Four', enums.keaLocation.BOTTOM_RIGHT, 20, 25, '', enums.textStyle.THIN, '', '', 12, 16]
# hotspotFive = ['Hotspot Title Five', enums.keaLocation.BOTTOM_LEFT, 25, 30, '', enums.textStyle.BOLD, '', '', 18, 16]
# hotspotsDict = {'1':hotspotOne,'2':hotspotTwo, '3':hotspotThree, '4':hotspotFour, '5':hotspotFive}
# hotspotsDict must contain the following structure = {'1':hotspotOne,'2':hotspotTwo}
# creationType = the type of the method that will be used in order to select the start time / end time of the hotspot ( end time is supported only for Cue Point method)
def hotspotCreation(self, hotspotsDict, openHotspotsTab=False, creationType=enums.keaHotspotCreationType.VIDEO_PAUSED):
self.switchToKeaIframe()
# Navigate to the Hotspot tab if needed
if openHotspotsTab == True:
if self.launchKEATab('', enums.keaTab.HOTSPOTS) == False:
writeToLog("INFO", "FAILED to navigate to the KEA Hotsptos tab during the first try")
if self.launchKEATab('', enums.keaTab.HOTSPOTS) == False:
writeToLog("INFO", "FAILED to navigate to the KEA Hotsptos tab during the second try")
return False
# Create all the desired Hotspots
for hotspotNumber in hotspotsDict:
# Take the details for the current hotspot
hotspotDetails = hotspotsDict[hotspotNumber]
if creationType == enums.keaHotspotCreationType.VIDEO_PLAYING:
if self.playEntryAndReturnAtTime(hotspotDetails[2]) == False:
return False
# Verify if the font color should be changed
if len(hotspotDetails) > 1 and hotspotDetails[1] != '':
if self.hotspotLocation(hotspotDetails[1]) == False:
writeToLog("INFO", "FAILED to change the location for " + hotspotDetails[0] + " to " + hotspotDetails[1])
return False
else:
# Create a new hotspot
if self.click(self.KEA_HOTSPOTS_ADD_NEW_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Add new Button")
return False
# Verify that the video playing process stopped after clicking on the Add New Hotspot
if creationType == enums.keaHotspotCreationType.VIDEO_PLAYING:
if self.wait_element(self.KEA_PLAYER_CONTROLS_PLAY_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED, the Video Playing process didn't stopped after clicking on ADD New Hotspot")
return False
# Leave time for the Hotspot Creation tool tip to proper be displayed
sleep(1.3)
if self.wait_element(self.KEA_HOTSPOTS_ADVANCED_SETTINGS, 5, True) == False:
writeToLog("INFO", "FAILED to display the Advanced Settings option within the Hotspot creation tool tip")
return False
if self.click(self.KEA_HOTSPOTS_ADVANCED_SETTINGS, 1, True) == False:
writeToLog("INFO", "FAILED to activate the Advanced Settings for Hotspots")
return False
# Insert the text for the current hotspot
if self.click_and_send_keys(self.KEA_HOTSPOTS_FORM_TEXT_INPUT_FIELD, hotspotDetails[0], True)== False:
writeToLog("INFO", "FAILED to insert " + hotspotDetails[0] + " text inside the Text Field")
return False
# Verify if a link should be inserted
if len(hotspotDetails) > 4:
if hotspotDetails[4] != '':
# Verify if the link is to a web page
if type(hotspotDetails[4]) is str:
sleep(0.2)
if self.click(self.KEA_HOTSPOTS_FORM_LINK_TYPE_URL, 1, True) == False:
writeToLog("INFO", "FAILED to click on the URL label")
return False
if self.click_and_send_keys(self.KEA_HOTSPOTS_FORM_LINK_INPUT_FIELD, hotspotDetails[4], True) == False:
writeToLog("INFO", "FAILED to insert " + hotspotDetails[4] + " link inside the Link Field")
return False
# Verify if the link needs to be set to a time location of the entry
elif type(hotspotDetails[4]) is int:
if self.click(self.KEA_HOTSPOTS_FORM_LINK_TYPE_TIME, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Time label")
return False
# time string has format mm:ss
timeString = str(datetime.timedelta(seconds=hotspotDetails[4]))[2:]
if self.clear_and_send_keys(self.KEA_HOTSPOTS_FORM_LINK_INPUT_FIELD_TIME, timeString, True) == False:
writeToLog("INFO", "FAILED to insert the " + timeString + " time inside the Link Input time field of the hotspot")
return False
sleep(0.3)
else:
writeToLog("INFO", "FAILED, invalid format used while trying to specify a LINK for the hotspot")
return False
# Verify if the font style should be changed
if len(hotspotDetails) > 5:
if hotspotDetails[5] != '':
textStyle = (self.KEA_HOTSPOTS_FORM_TEXT_STYLE_VALUE[0], self.KEA_HOTSPOTS_FORM_TEXT_STYLE_VALUE[1].replace("TEXT_STYLE", hotspotDetails[5].value))
if self.click(self.KEA_HOTSPOTS_FORM_TEXT_STYLE, 1, True) == False:
writeToLog("INFO", "FAILED to activate the Text Color drop down menu")
return False
if self.click(textStyle, 1, True)== False:
writeToLog("INFO", "FAILED to select the font style for " + hotspotDetails[0] + " as " + hotspotDetails[5].value)
return False
# Verify if the font color should be changed
if len(hotspotDetails) > 6:
# Because Font Color and Background Color field have the same locators we use indexing [0] = Font color [1] = Background Color
colorPicker = self.wait_elements(self.KEA_HOTSPOTS_FORM_COLOR, 1)
if colorPicker == False:
writeToLog("INFO", "FAILED to find the color picker option")
return False
if hotspotDetails[6] != '':
if self.clickElement(colorPicker[0]) == False:
writeToLog("INFO", "FAILED to click on the Hotspot Font Color button")
return False
if self.clear_and_send_keys(self.KEA_HOTSPOTS_FORM_COLOR_VALUE, hotspotDetails[6], True) == False:
writeToLog("INFO", "FAILED to select the font color for " + hotspotDetails[0] + " as " + hotspotDetails[6].value)
return False
if self.clsCommon.sendKeysToBodyElement(Keys.ENTER) != True:
writeToLog("INFO", "FAILED to save the color by clicking on the enter button")
return False
if self.clickElement(colorPicker[0]) == False:
writeToLog("INFO", "FAILED to collapse the Hotspot Font Color tool tip menu")
return False
# Verify if the background color should be changed
if len(hotspotDetails) > 7:
if hotspotDetails[7] != '':
if self.clickElement(colorPicker[1]) == False:
writeToLog("INFO", "FAILED to click on the Background Color button")
return False
if self.clear_and_send_keys(self.KEA_HOTSPOTS_FORM_COLOR_VALUE, hotspotDetails[7], True) == False:
writeToLog("INFO", "FAILED to select the background color for " + hotspotDetails[0] + " as " + hotspotDetails[7].value)
return False
if self.clsCommon.sendKeysToBodyElement(Keys.ENTER) != True:
writeToLog("INFO", "FAILED to save the color by clicking on the enter button")
return False
if self.clickElement(colorPicker[1]) == False:
writeToLog("INFO", "FAILED to collapse the Background Color tool tip menu")
return False
# Verify if the Font Size should be changed
if len(hotspotDetails) > 8:
if hotspotDetails[8] != '':
# Selecting the Text Input Field
if self.click(self.KEA_HOTSPOTS_FORM_TEXT_SIZE, 1, True) == False:
writeToLog("INFO", "FAILED to click on the form text size input field for " + hotspotDetails[0] + " hotspot")
return False
# Changing the value of the Text Size
if self.clear_and_send_keys(self.KEA_HOTSPOTS_FORM_TEXT_SIZE, str(hotspotDetails[8]), True) == False:
writeToLog("INFO", "FAILED to change the font size to " + str(hotspotDetails[8]) + " for " + hotspotDetails[0] + " hotspot")
return False
# Verify if the Roundness of the Hotspot Container should be changed
if len(hotspotDetails) > 9:
if hotspotDetails[9] != '':
# Selecting the Roundness Input Field
if self.click(self.KEA_HOTSPOTS_FORM_ROUNDNESS, 1, True) == False:
writeToLog("INFO", "FAILED to click on the roundness input field for " + hotspotDetails[0] + " hotspot")
return False
# Changing the value of the Roundness Size
if self.clear_and_send_keys(self.KEA_HOTSPOTS_FORM_ROUNDNESS, str(hotspotDetails[9]), True) == False:
writeToLog("INFO", "FAILED to change the font size to " + str(hotspotDetails[9]) + " for " + hotspotDetails[0] + " hotspot")
return False
# Verify if the Hotspot Container size should be changed
if len(hotspotDetails) > 10:
if hotspotDetails[10] != '':
# Create the container size specific for each class
if hotspotDetails[10] == enums.keaHotspotContainerSize.DEFAULT:
width = 128
height = 32
elif hotspotDetails[10] == enums.keaHotspotContainerSize.SMALL:
width = 64
height = 32
elif hotspotDetails[10] == enums.keaHotspotContainerSize.MEDIUM:
width = 256
height = 64
elif hotspotDetails[10] == enums.keaHotspotContainerSize.LARGE:
width = 364
height = 128
else:
writeToLog("INFO", "FAILED, the desired container size doesn't exists " + hotspotDetails[10])
return False
# Highlight the width input field
if self.click(self.KEA_HOTSPOTS_FORM_SIZE_WIDTH, 1, True) == False:
writeToLog("INFO", "FAILED to click on the width input field from the Advanced Settings")
return False
# Select the current width text from the input field
if self.clsCommon.sendKeysToBodyElement(Keys.CONTROL + 'a') != True:
writeToLog("INFO", "FAILED to select the current width from the Advanced Settings Input Field")
return False
# Replace the current width with the desired one
if self.send_keys(self.KEA_HOTSPOTS_FORM_SIZE_WIDTH, str(width), True) == False:
writeToLog("INFO", "FAILED to insert the desired width size")
return False
# Highlight the input field
if self.click(self.KEA_HOTSPOTS_FORM_SIZE_HEIGHT, 1, False) == False:
writeToLog("INFO", "FAILED to click on the height input field from the Advanced Settings")
return False
# Select the current height text from the input field
if self.clsCommon.sendKeysToBodyElement(Keys.CONTROL + 'a') != True:
writeToLog("INFO", "FAILED to select the current width from the Advanced Settings Input Field")
return False
# Replace the current height with the desired one
if self.send_keys(self.KEA_HOTSPOTS_FORM_SIZE_HEIGHT, str(height), True) == False:
writeToLog("INFO", "FAILED to insert the desired height size")
return False
# Save the current hotspot
if self.saveHotspotChanges(settingsChanges=True) == False:
writeToLog("INFO", "FAILED to save the KEA hotspots for " + hotspotDetails[0])
return False
# Set the start time and end time for the hotspot
if len(hotspotDetails) >= 3:
if hotspotDetails[2] != None or hotspotDetails[3] != None:
if creationType == enums.keaHotspotCreationType.VIDEO_PAUSED:
# Start and End time set on Firefox driver using the timleine section
if self.driver.capabilities['browserName'] == 'firefox':
if self.hotspotCuePoint(hotspotDetails[0], hotspotDetails[2], hotspotDetails[3]) == False:
writeToLog("INFO", "FAILED to set for the " + hotspotDetails[0] + " hotspot, start time to " + hotspotDetails[2] + " and end time to " + hotspotDetails[3] + " while using Firefox Browser")
return False
else:
# Because the start and end time of the hotspot may not be saved properly during the first time on Chrome, we run it twice
# Start and end time is set with Advanced Settings
for x in range(0, 2):
if creationType == enums.keaHotspotCreationType.VIDEO_PLAYING:
if self.changeHotspotTimeStamp(hotspotDetails[0], '', hotspotDetails[3]) == False:
writeToLog("INFO", "FAILED to set for the " + hotspotDetails[0] + " hotspot, start time to " + hotspotDetails[2] + " and end time to " + hotspotDetails[3] + " while using Chrome Browser on a played video, during the " + str(x) + " try")
return False
else:
if self.changeHotspotTimeStamp(hotspotDetails[0], hotspotDetails[2], hotspotDetails[3]) == False:
writeToLog("INFO", "FAILED to set for the " + hotspotDetails[0] + " hotspot, start time to " + hotspotDetails[2] + " and end time to " + hotspotDetails[3] + " while using Chrome Browser, during the " + str(x) + " try")
return False
# Move back the real time marker to the initial position
if self.setRealTimeMarkerToTime('00:00') == False:
writeToLog("INFO", "FAILED to set the real time marker back to the initial position after creating " + hotspotDetails[0] + " hotspot")
return False
hotspotNameList = []
for hotspotNumber in hotspotsDict:
hotspotNameList.append(hotspotsDict[hotspotNumber][0])
if len(hotspotNameList) > 1:
hotspots = ", ".join(hotspotNameList)
else:
hotspots = hotspotNameList[0]
sleep(2)
writeToLog("INFO","The following hotspots were verified: " + hotspots + "")
return True
# @Author: Horia Cus
# This function will place the desired hotspot to the desired location
# startTime and endTime must be integer
# startTime represents the place from where the hotspot will be placed
# endTime represents the place from where the hotspots will end
def hotspotCuePoint(self, hotspotName, startTime=None, endTime=None):
self.switchToKeaIframe()
# Verify that the Hotspot section is present
if self.wait_element(self.EDITOR_REALTIME_MARKER, 15, True) == False:
writeToLog("INFO", "FAILED To verify that we are in the Hotspots Section")
return False
# Take entrie's total time length and presented hotspots
presentedHotspots = self.wait_elements(self.KEA_TIMELINE_SECTION_HOTSPOT_CONTAINER, 15)
entryTotalTime = self.wait_element(self.EDITOR_TOTAL_TIME, 1, True).text.replace(' ', '')[1:]
m, s = entryTotalTime.split(':')
entryTotalTimeSeconds = int(m) * 60 + int(s)
# Verify that at least one hotspot is presented
if presentedHotspots == False:
writeToLog("INFO", "FAILED to take the presetend hotspots")
return False
# Change the start time and end time for the desired hotspotName from the presented hotspots
for x in range(0, len(presentedHotspots)):
presentedHotspot = presentedHotspots[x]
presentedHotspotTitle = presentedHotspot.text
presentedHotspotWidth = presentedHotspot.size['width']
widthSizeForOneSecond = presentedHotspotWidth/entryTotalTimeSeconds
# Verify that the hostpotName is a match with the presented Hotspots
if presentedHotspotTitle == hotspotName:
# Highlight the correct hotspot in order to activate the editing options
writeToLog("INFO", "Going to set for " + hotspotName + " start time to " + str(startTime) + " and end time to " + str(endTime))
if self.clickElement(presentedHotspot) == False:
writeToLog("INFO", "FAILED to highlight the " + presentedHotspot.text + " hotspot")
return False
# Take the element that will be used in order to set the start time
hotspotContainerRight = self.wait_element(self.KEA_TIMELINE_SECTION_HOTSPOT_DRAG_CONTAINER_RIGHT, 5, True)
# Take the element that will be used in order to set the end time
hotspotContainerLeft = self.wait_element(self.KEA_TIMELINE_SECTION_HOTSPOT_DRAG_CONTAINER_LEFT, 5, True)
# Verify that the hotspot options are available
if hotspotContainerRight == False or hotspotContainerLeft == False:
writeToLog("INFO", "FAILED to select the " + presentedHotspot + " hotspot")
return False
# The overlay element it's the real time marker that was dragged by action chain instead of the desired hotspot
try:
overlayElement = self.wait_element(self.EDITOR_REALTIME_MARKER_CONTAINER, 5)
self.driver.execute_script("arguments[0].setAttribute('style','display:none;')", overlayElement)
sleep(1)
except Exception:
writeToLog("INFO", "FAILED to dismiss the real time maker while editing the cue points")
return False
# Set the action chain for start time
actionStartTime = ActionChains(self.driver)
# Set the desired start time of the hotspot
if startTime != None:
widthSizeInOrderToReachDesiredStartTime = widthSizeForOneSecond * startTime
try:
actionStartTime.drag_and_drop_by_offset(hotspotContainerLeft, widthSizeInOrderToReachDesiredStartTime, 0).pause(1).perform()
except Exception:
writeToLog("INFO", "FAILED to set the start time for " + hotspotName + " to " + str(startTime) + " second")
return False
# Set the action chain for end time
actionEndTime = ActionChains(self.driver)
# Set the desired end time of the hotspot
if endTime != None:
# Verify that the end time its within boundaries
secondsToDecrease = 0
if endTime > entryTotalTimeSeconds:
writeToLog("INFO", "The end time of " + str(endTime) + " seconds, for " + hotspotName + " exceeds the entry total time of " + str(entryTotalTimeSeconds) + " seconds")
return False
# Take the number of seconds that we need to decrease in order to reach the desired end time
while entryTotalTimeSeconds != endTime:
entryTotalTimeSeconds -= 1
secondsToDecrease += 1
# Take the number of pixels that we need to decrease in order to reach the desired end time
widthSizeInOrderToReachDesiredEndTime = widthSizeForOneSecond * secondsToDecrease
# Verify that the end time cue point is not already placed in the end time location
if widthSizeInOrderToReachDesiredEndTime != float(0.0):
try:
actionEndTime.drag_and_drop_by_offset(hotspotContainerRight, -widthSizeInOrderToReachDesiredEndTime, 0).pause(1).perform()
except MoveTargetOutOfBoundsException:
# Because the MoveTargetOutOfBoundsException error may be trigger due to the fact that the Cue Point is not visible, we navigate to view the element itself
self.driver.execute_script("arguments[0].scrollIntoView();", hotspotContainerRight)
sleep(1)
try:
ActionChains(self.driver).drag_and_drop_by_offset(hotspotContainerRight, -widthSizeInOrderToReachDesiredEndTime, 0).pause(1).perform()
except Exception:
writeToLog("INFO", "FAILED to set the end time for " + hotspotName + " to " + str(endTime) + " second")
return False
# Re-display the real time marker after changing the hotspot size
try:
self.driver.execute_script("arguments[0].setAttribute('style','display;')", overlayElement)
sleep(1)
except Exception:
writeToLog("INFO", "FAILED to re display the real time maker after editing the hotspots cue points")
return False
# Save the new cue point changes
if self.click(self.KEA_HOTSPOTS_SAVE_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to save the hotspot changes")
return False
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER_CONTAINER, 30) == False:
writeToLog("INFO", "FAILED to wait until the hotspot changes were saved")
return False
break
# Verify that the hostpoName was a match with at least one presented hotspot
if x + 1 == len(presentedHotspots):
writeToLog("INFO", "FAILED to find the expected hostpot within the presented hotspots")
return False
writeToLog("INFO", "Hotspot: " + hotspotName + " has been successfully set to " + str(startTime) + " start time and " + str(endTime) + " end time")
return True
# @Author: Horia Cus
# This function will click on the desired location from the player screen
# location must contain enum ( e.g enums.keaLocation.CENTER )
# For now we support Five types of locations, top right / left, buttom right / left and center
def hotspotLocation(self, location):
self.switchToKeaIframe()
# Take the Hotspot Player Screen element details
hotspotScreen = self.wait_element(self.KEA_PLAYER_CONTAINER, 30, True)
# Verify that we are able to take the X, Y coordinates for the desired location
if type(self.hotspotLocationCoordinates(location)) is not list:
writeToLog("INFO", "FAILED to take the coordinates for location " + location.value)
return False
# Take the X, Y coordinates for the desired location
x, y = self.hotspotLocationCoordinates(location)
# Verify if a hotspot is already selected, if so, the hotpost will be unselected
if self.wait_element(self.KEA_HOTSPOTS_PLAYER_HOTSPOT_CONTAINER_SELECTED, 1, True) != False:
if self.clickElement(hotspotScreen) == False:
writeToLog("INFO", "FAILED to click on the hotspot player screen in order to un select the hotspot container")
return False
action = ActionChains(self.driver)
# Move the quiz number to a new timeline location
try:
# Start the location from the Top Left corner and move it to the desired place
if location != enums.keaLocation.CENTER:
action.move_to_element_with_offset(hotspotScreen, 0, 0).pause(2).move_by_offset(x, y).pause(2).click().perform()
# Start from the center of the element and move the element by negative x value in order to proper place the hotspot to the center
elif location == enums.keaLocation.CENTER:
action.move_to_element(hotspotScreen).pause(2).move_by_offset(-x, y).pause(2).click().perform()
except Exception:
writeToLog("INFO", "FAILED to set the KEA Location at " + location.value)
return False
writeToLog("INFO", "KEA Location has been successfully set at " + location.value)
return True
# @Author: Horia Cus
# This function will save the changes performed within the Hotspot section
# If settingsChanges = True, means that changes were performed within the List and it will click on the done button first
def saveHotspotChanges(self, settingsChanges=True):
self.switchToKeaIframe()
if settingsChanges == True:
# Save the settings hotspot changes
if self.click(self.KEA_HOTSPOTS_DONE_BUTTON_ADVANCED_SETTINGS, 1, True) == False:
if self.click(self.KEA_HOTSPOTS_DONE_BUTTON_NORMAL, 1, True) == False:
writeToLog("INFO", "FAILED to save the KEA hotspots setting changes")
return False
# Save the presented hotspots inside the entry
if self.click(self.KEA_HOTSPOTS_SAVE_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to save the hotspot changes")
return False
# Verify that the changes were saved
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER_CONTAINER, 30) == False:
writeToLog("INFO", "FAILED to wait until the hotspot changes were saved")
return False
return True
# @Author: Horia Cus
# This function can edit / delete and duplicate any presented hotspotName
# hotspotName = contains the string of the hotspot title
# hotspotAction must be enum ( e.g enums.keaHotspotActions.DUPLICATE )
def hotspotActions(self, hotspotName, hotspotAction, editHotspotDict=''):
self.switchToKeaIframe()
hotspotIndexLocation = self.returnHotspotIndexFromList(hotspotName)
if type(hotspotIndexLocation) is not int:
writeToLog("INFO", "FAILED to take the hospot: " + hotspotName + " index location")
return False
# Create the elements for hamburger menu
hotspotsActionMenu = self.wait_elements(self.KEA_HOTSPOTS_PANEL_MORE_HAMBURGER_MENU, 1)
# Verify that we were able to find the hamburger menu buttons
if hotspotsActionMenu == False:
writeToLog("INFO", "FAILED to find the action menu for the presented hotspots")
return False
# Create the elements for the Hotspot Title
presentedHotspotsTitle = self.wait_elements(self.KEA_HOTSPOTS_PANEL_ITEM_TITLE, 1)
# Highlight the hotspotName field by clicking on its container
if self.clickElement(presentedHotspotsTitle[hotspotIndexLocation]) == False:
writeToLog("INFO", "FAILED to highligth the " + hotspotName + " hotspot")
return False
sleep(1)
# Trigger the Action Drop Down Menu
if self.clickElement(hotspotsActionMenu[hotspotIndexLocation]) == False:
writeToLog("INFO", "FAILED to trigger the action menu for hotspot: " + hotspotName + " at the second try")
return False
sleep(1)
if hotspotAction == enums.keaHotspotActions.DUPLICATE:
# Duplicate the hotspotName
if self.click(self.KEA_HOTSPOTS_PANEL_ACTION_MENU_DUPLICATE, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Duplicate button for the hotspot: " + hotspotName)
return False
# Add a special suffix for the duplicated hotspot in order to verify it in other function
action = ActionChains(self.driver)
try:
action.send_keys(' Duplicated').perform()
except Exception:
writeToLog("INFO", "FAILED to add Duplicated suffix for the " + hotspotName + " hotspot")
return False
# Save the duplicated hotspot
if self.saveHotspotChanges(settingsChanges=True) == False:
writeToLog("INFO", "FAILED to save the changes for " + hotspotName + " Duplicated hotspot")
return False
elif hotspotAction == enums.keaHotspotActions.EDIT:
# Edit the hotspotName
if self.click(self.KEA_HOTSPOTS_PANEL_ACTION_MENU_EDIT, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Edit button for the hotspot: " + hotspotName)
return False
# Add a suffix to the edited hotspotName in order to verify it in other function
action = ActionChains(self.driver)
try:
action.send_keys(' Edited').perform()
except Exception:
writeToLog("INFO", "FAILED to add Edited suffix for the " + hotspotName + " hotspot")
return False
# Save the edited hotspot
if self.saveHotspotChanges(settingsChanges=True) == False:
writeToLog("INFO", "FAILED to save the changes for " + hotspotName + " Edited hotspot")
return False
elif hotspotAction == enums.keaHotspotActions.DELETE:
# Trigger the delete process for the hotspotName
if self.click(self.KEA_HOTSPOTS_PANEL_ACTION_MENU_DELETE, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Delete Action Button for the hotspot: " + hotspotName)
return False
# Verify that the Delete Confirmation Pop up is triggered
if self.wait_element(self.KEA_CONFIRMATION_POP_UP_CONTAINER, 3, True) == False:
writeToLog("INFO", "FAILED to trigger the Delete Confirmation Pop up")
return False
# Confirm the delete process
if self.click(self.KEA_HOTSPOTS_DELETE_POP_UP_CONFIRMATION_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Delete Hotspot button")
return False
# Verify that the confirmation pop up is no longer present
if self.wait_while_not_visible(self.KEA_CONFIRMATION_POP_UP_CONTAINER, 10) == False:
writeToLog("INFO", "FAILED, the confirmation pop up is still present")
return False
# Save the changes
if self.saveHotspotChanges(settingsChanges=False) == False:
writeToLog("INFO", "FAILED to save the changes after deleting the " + hotspotName + " hotspot")
return False
try:
# Verify that the element is no longer present
presentedHotspotsTitle[hotspotIndexLocation].text
writeToLog("INFO", "FAILED, the hotspot " + hotspotName + " element is still present, although it should have been deleted")
return False
# If an exception its thrown, means that the element is no longer present, which is what we want, since the hotspot has been deleted
except StaleElementReferenceException:
writeToLog("INFO", "The hotspot " + hotspotName + " has been successfully deleted")
return True
elif hotspotAction == enums.keaHotspotActions.CANCEL_DELETE:
# Trigger the delete process for the hotspotName
if self.click(self.KEA_HOTSPOTS_PANEL_ACTION_MENU_DELETE, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Delete Action Button for the hotspot: " + hotspotName)
return False
# Verify that the Delete Confirmation Pop up is triggered
if self.wait_element(self.KEA_CONFIRMATION_POP_UP_CONTAINER, 3, True) == False:
writeToLog("INFO", "FAILED to trigger the Delete Confirmation Pop up")
return False
# Click on the Cancel button
if self.click(self.KEA_CONFIRMATION_POP_UP_CANCEL_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to cancel the Hotspot Deletion process by clicking on the Cancel Button")
return False
sleep(1)
try:
# Verify that the element is still present
presentedHotspotsTitle[hotspotIndexLocation].text
# If an exception its thrown, means that the element is no longer present, which is what we want, since the hotspot has been deleted
except StaleElementReferenceException:
writeToLog("INFO", "The hotspot " + hotspotName + " has been deleted, although it shouldn't")
return False
# Verify that a valid action has been used during the function call
else:
writeToLog("INFO", "FAILED, please make sure that you've used a supported hotspot action")
return False
writeToLog("INFO", "The hotspot " + hotspotName + " has been successfully " + hotspotAction.value + "ed")
return True
# @Author: Horia Cus
# This function can launch the KEA Editor for the desired entry name
# This function will open the specified keaTab while being in the KEA Editor
# entryName must be inserted ( if navigateToEntry = True) in order to verify that the KEA page has been successfully opened and loaded
# keaTab must contain enum ( e.g enums.keaTab.QUIZ)
# expectedConfirmation = True, it will pass the confirmation pop up during the transition, if no confirmation pop up is presented, it will return False
# If you have changes that were not saved, you should expect a confirmation pop up during the transition to another KEA Tab
def launchKEATab(self, entryName, keaTab, navigateToEntry=False, timeOut=1, expectedConfirmation=False):
self.switch_to_default_content()
if navigateToEntry == True:
sleep(timeOut)
if self.launchKEA(entryName, navigateTo=enums.Location.ENTRY_PAGE, navigateFrom=enums.Location.MY_MEDIA) == False:
writeToLog("INFO","Failed to launch KEA for: " + entryName)
return False
if self.verifyKeaEntryName(entryName, 60) == False:
writeToLog("INFO", "FAILED to load the page until the " + entryName + " was present")
return False
sleep(7)
self.switchToKeaIframe()
if keaTab == enums.keaTab.QUIZ:
if self.wait_element(self.KEA_QUIZ_TAB_ACTIVE, 1, True) != False:
writeToLog("INFO", "KEA Quiz tab is already active")
else:
if self.wait_element(self.KEA_QUIZ_TAB, 45, True) == False:
writeToLog("INFO", "FAILED to find the KEA Quiz tab")
return False
if self.click(self.KEA_QUIZ_TAB, 1, True) == False:
writeToLog("INFO", "FAILED to click on the KEA Quiz tab")
return False
if expectedConfirmation == True:
if self.wait_element(self.KEA_CONFIRMATION_POP_UP_CONTAINER, 3, True) == False:
writeToLog("INFO", "FAILED, no confirmation pop up has been displayed")
return False
if self.click(self.KEA_CONFIRMATION_POP_UP_OK_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to click on the OK confirmation pop up")
return False
sleep(0.5)
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER_CONTAINER, 60) == False:
writeToLog("INFO", "FAILED to wait until the KEA Quiz tab has been successfully loaded")
return False
if self.wait_element(self.KEA_QUIZ_TAB_ACTIVE, 5, True) == False:
writeToLog("INFO", "FAILED, the KEA Quiz tab is not displayed as being enabled")
return False
elif keaTab == enums.keaTab.VIDEO_EDITOR:
if self.wait_element(self.KEA_VIDEO_EDITOR_TAB_ACTIVE, 1, True) != False:
writeToLog("INFO", "KEA Video Editor tab is already active")
else:
if self.wait_element(self.KEA_VIDEO_EDITOR_TAB, 45, True) == False:
writeToLog("INFO", "FAILED to find the KEA Video Editor tab")
return False
if self.click(self.KEA_VIDEO_EDITOR_TAB, 1, True) == False:
writeToLog("INFO", "FAILED to click on the KEA Video Editor tab")
return False
if expectedConfirmation == True:
if self.wait_element(self.KEA_CONFIRMATION_POP_UP_CONTAINER, 3, True) == False:
writeToLog("INFO", "FAILED, no confirmation pop up has been displayed")
return False
if self.click(self.KEA_CONFIRMATION_POP_UP_OK_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to click on the OK confirmation pop up")
return False
sleep(0.5)
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER_CONTAINER, 60) == False:
writeToLog("INFO", "FAILED to wait until the KEA Video Editor tab has been successfully loaded")
return False
if self.wait_element(self.KEA_VIDEO_EDITOR_TAB_ACTIVE, 5, True) == False:
writeToLog("INFO", "FAILED, the KEA Video Editor tab is not displayed as being enabled")
return False
elif keaTab == enums.keaTab.HOTSPOTS:
if self.wait_element(self.KEA_HOTSPOTS_TAB_ACTIVE, 1, True) != False:
writeToLog("INFO", "KEA Hotspots tab is already active")
else:
if self.wait_element(self.KEA_HOTSPOTS_TAB, 45, True) == False:
writeToLog("INFO", "FAILED to find the KEA Hotspots tab")
return False
if self.click(self.KEA_HOTSPOTS_TAB, 1, True) == False:
writeToLog("INFO", "FAILED to click on the KEA Hotspots tab")
return False
if expectedConfirmation == True:
if self.wait_element(self.KEA_CONFIRMATION_POP_UP_CONTAINER, 3, True) == False:
writeToLog("INFO", "FAILED, no confirmation pop up has been displayed")
return False
if self.click(self.KEA_CONFIRMATION_POP_UP_OK_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to click on the OK confirmation pop up")
return False
sleep(0.5)
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER_CONTAINER, 60) == False:
writeToLog("INFO", "FAILED to wait until the KEA Hotspots tab has been successfully loaded")
return False
if self.wait_element(self.KEA_HOTSPOTS_TAB_ACTIVE, 5, True) == False:
writeToLog("INFO", "FAILED, the KEA Hotspots tab is not displayed as being enabled")
return False
else:
writeToLog("INFO", "FAILED, please make sure that you've used a supported KEA section")
return False
sleep(3.5)
writeToLog("INFO", "The " + keaTab.value + " has been successfully opened")
return True
# @Author: Horia Cus
# This function will verify that the expected hotspots are properly presented in the timeline section by
# Verifying the hotspot container size based on the duration
# Verifying the X location based on the start time
# Verifying the Y location based on the start and end time
# Verify the place order based on creation
# For hotspotDict structure please check hotspotCreation function
# expectedHotspotNumber = 5, will also verify that exactly five hotspots are presented
def hotspotTimelineVerification(self, hotspotsDict, expectedHotspotNumber=None):
self.switchToKeaIframe()
# Verify that we are in the Hotspot Section
if self.wait_element(self.EDITOR_REALTIME_MARKER, 15, True) == False:
writeToLog("INFO", "FAILED To verify that we are in the Hotspots Section")
return False
# Real Time marker must be at second zero in order to proper take the information needed from the Blank Hotspot
if self.setRealTimeMarkerToTime('00:00') == False:
writeToLog("INFO", "FAILED to set the real time marker at the beginning of the timeline")
return False
# Create a Blank Hotspot in order to take the properties that we need
if self.click(self.KEA_HOTSPOTS_ADD_NEW_BUTTON, 15, True) == False:
writeToLog("INFO", "FAILED to add a new hotspot in order to take its width")
return False
if self.saveHotspotChanges(settingsChanges=True) == False:
writeToLog("INFO", "FAILED to save the blank hotspot")
return False
presentedHotspots = self.wait_elements(self.KEA_TIMELINE_SECTION_HOTSPOT_CONTAINER, 15)
zeroSecondXValue = None
# Take the properties from the Blank Hotspot
for x in range(0, len(presentedHotspots)):
if presentedHotspots[x].text == '':
maximumHotspotSize = presentedHotspots[x].size['width']
zeroSecondXValue = presentedHotspots[x].location['x']
break
if x + 1 == len(presentedHotspots):
writeToLog("INFO", "FAIELD to find the blank hotspot")
return False
# Delete the Blank Hotspot
if self.hotspotActions('<Blank>', enums.keaHotspotActions.DELETE) == False:
writeToLog("INFO", "FAILED to delete the blank hotspot")
return False
# Take the list with all the presented hotspots from the Timeline section
presentedHotspots = self.wait_elements(self.KEA_TIMELINE_SECTION_HOTSPOT_CONTAINER, 15)
# Take the list with all the presented hotspots from the HS List
presetendHotspotsList = self.wait_elements(self.KEA_HOTSPOTS_LIST_PANEL_HOTSPOT, 15)
# Verify that the same number of hotspots are displayed in both Timeline section and HS list
if len(presentedHotspots) != len(presetendHotspotsList):
writeToLog("INFO", "FAILED, a number of " + len(presentedHotspots) + " hotspots were displayed in the timeline and " + len(presetendHotspotsList) + " in the HS List")
return False
# Verify that the Hotspot order list is the same in both Timeline and HS list sections
for x in range(0,len(presentedHotspots)):
try:
presentedHotspotsTitleTimeline = presentedHotspots[x].text
presentedHotspotsTitleList = presetendHotspotsList[x].text
except Exception:
writeToLog("INFO", "FAILED to take the presented hotspot title from timeline and HS list sections")
return False
if presentedHotspotsTitleList.count(presentedHotspotsTitleTimeline) != 1:
writeToLog("INFO", "FAILED to find the " + presentedHotspotsTitleTimeline + " title inside the HS list")
return False
# Take entrie's length time
entryTotalTime = self.wait_element(self.EDITOR_TOTAL_TIME, 1, True).text.replace(' ', '')[1:]
m, s = entryTotalTime.split(':')
entryTotalTimeSeconds = int(m) * 60 + int(s)
# Take the number of px needed for each second based on the entry time
widthSizeForOneSecond = maximumHotspotSize/entryTotalTimeSeconds
# Verify that we have at least one hotspot presented
if presentedHotspots == False:
writeToLog("INFO", "FAILED to find any available hotspots within the timeline section")
return False
# Verify that the expectedHostNumber matches with the number of the presentedHotspots
if expectedHotspotNumber != None:
if expectedHotspotNumber != len(presentedHotspots):
writeToLog("INFO", "FAILED, a number of " + str(expectedHotspotNumber) + " hotspots were expected but " + str(len(presentedHotspots)) + " hotspots were presented")
return False
# Create a list with the successfully verified hotspots
hotspotNameList = []
# Used in order to verify that the hotspot is displayed on the right Y location
previousYValue = -1
expectedHotspotVerified = 0
i = 1
# Iterate through each presented hotspot
for x in range(0, len(presentedHotspots)):
try:
try:
presentedHotspot = presentedHotspots[x]
presentedHotspotTitle = presentedHotspot.text
except Exception:
writeToLog("INFO", "FAILED to take the presented hotspot at the " + str(x) + " try")
return False
# Iterate through the presented hotspots until the expected one is found
for k in range(0,len(presentedHotspots)):
expectedHotspot = hotspotsDict[str(k+1)]
if presentedHotspotTitle == expectedHotspot[0]:
writeToLog("INFO", "The hotspot " + presentedHotspotTitle + " was found at place " + str(x))
break
else:
if k + 1 == len(presentedHotspots):
writeToLog("INFO", "FAILED to find the expected hotspot: " + expectedHotspot[0])
return False
# Take the presented hotspot details
presentedHotspotWidth = presentedHotspot.size['width']
presentedHotspotXValue = presentedHotspot.location['x']
presentedHotspotYValue = presentedHotspot.location['y']
presentedHotspotTime = int(presentedHotspotWidth/widthSizeForOneSecond)
expectedHotspotTime = expectedHotspot[3] - expectedHotspot[2]
expectedHotspotXValue = int(zeroSecondXValue + widthSizeForOneSecond * expectedHotspot[2])
except Exception:
writeToLog("INFO", "FAILED to take the Expected and Presented hotspot details")
# Verify that the width of the hotspot container matches with the expected duration
if presentedHotspotTime != expectedHotspotTime:
# Allow a two second inconsistency
for x in range(0,2):
if presentedHotspotTime + x == expectedHotspotTime:
break
if x == 2:
writeToLog("INFO", "FAILED, the length of " + presentedHotspotTitle + " was " + str(presentedHotspotTime) + " while we expected " + str(expectedHotspotTime))
return False
# Verify that the presented hotspot is presented at the expected X location
if presentedHotspotXValue != expectedHotspotXValue:
# Allow a five px inconsistency
# With positive value
for x in range(0,7):
if presentedHotspotXValue == expectedHotspotXValue + x:
break
if x >= 5:
if presentedHotspotXValue + 1 != expectedHotspotXValue:
writeToLog("INFO", "The x Location of " + presentedHotspotTitle + " was " + str(presentedHotspotXValue) + " while we expected " + str(expectedHotspotXValue))
# With negative value
for k in range(0,7 ):
if presentedHotspotXValue == expectedHotspotXValue - k:
break
if k >= 5:
if presentedHotspotXValue != expectedHotspotXValue - 1:
writeToLog("INFO", "FAILED, the x Location of " + presentedHotspotTitle + " was " + str(presentedHotspotXValue) + " while we expected " + str(expectedHotspotXValue))
return False
else:
break
break
# Verify that the current iterated hotspot is displayed on a higher Y value than the previous hotspot
if presentedHotspotYValue <= previousYValue:
writeToLog("INFO", "FAILED, the Y Location of " + presentedHotspotTitle + " was " + str(presentedHotspotYValue) + " while from the previous hotspot was " + str(previousYValue))
return False
else:
previousYValue = presentedHotspotYValue
i += 1
expectedHotspotVerified += 1
hotspotNameList.append(expectedHotspot[0])
writeToLog("INFO", "The following hotspot has been successfully presented in the timeline section " + expectedHotspot[0])
if len(hotspotNameList) > 1:
hotspots = "\n".join(hotspotNameList)
else:
hotspots = expectedHotspot[0]
# Verify that the expected hotspots were presented in the timeline section
if expectedHotspotVerified != len(hotspotsDict):
writeToLog("INFO", "FAILED, a number of " + str(expectedHotspotVerified) + " hotspots were found based on the hotspotDict, while we expected to verify: " + str(len(hotspotsDict)) + " number of hotspots from hotspotDict")
return False
else:
writeToLog("INFO", "ALL the " + str(len(hotspotsDict)) + " expected hotspots from the hotspotDict were properly found inside the timeline section")
writeToLog("INFO","The following hotspots were properly verified in the timeline section:\n" + hotspots)
return True
# @Author: Horia Cus
# This function will return the X, Y value for the desired location
# location must contain enum ( e.g enums.keaLocation.CENTER )
def hotspotLocationCoordinates(self, location):
self.switchToKeaIframe()
# Take the Hotspot Player Screen element details
hotspotScreen = self.wait_element(self.KEA_PLAYER_CONTAINER, 30, True)
# Verify that the Hotspot Player Screen is presented
if hotspotScreen == False:
writeToLog("INFO", "FAILED to find the Hotspot screen")
return False
# Take the width of hotspot container in order to proper align it to the center location
if location == enums.keaLocation.CENTER:
# In order to proper align the hotspot to the center we need to take container's width, if no container is presented we will divide by the default value
containerSize = self.wait_element(self.KEA_HOTSPOTS_PLAYER_HOTSPOT_CONTAINER, 1, True)
if containerSize != False:
containerSize = containerSize.size['width']
elif type(containerSize) is not int:
writeToLog("INFO", "No hotspots information that contains container size were given")
# Use the default value
containerSize = 128
else:
writeToLog("INFO", "FAILED to take the width size for the " + location.value + " location")
return False
# Set the off sets for the desired KEA Location
if location == enums.keaLocation.TOP_LEFT:
x = hotspotScreen.size['width']/500
y = hotspotScreen.size['height']/500
elif location == enums.keaLocation.TOP_RIGHT:
x = hotspotScreen.size['width']/1.20
y = hotspotScreen.size['height']/500
elif location == enums.keaLocation.BOTTOM_LEFT:
x = hotspotScreen.size['width']/500
y = hotspotScreen.size['height'] - hotspotScreen.size['height']/6.5
elif location == enums.keaLocation.BOTTOM_RIGHT:
x = hotspotScreen.size['width']/1.20
y = hotspotScreen.size['height'] - hotspotScreen.size['height']/6.5
elif location == enums.keaLocation.CENTER:
# width size of the hotspot button, divided by two in order to align it to the center properly
x = containerSize/2
y = 0
elif location == enums.keaLocation.PROTECTED_ZONE_CENTER:
# width size of the hotspot button, divided by two in order to align it to the center properly
x = hotspotScreen.size['width']/2
y = hotspotScreen.size['height']/1.08
elif location == enums.keaLocation.PROTECTED_ZONE_LEFT:
x = hotspotScreen.size['width']/500
y = hotspotScreen.size['height']/1.08
elif location == enums.keaLocation.PROTECTED_ZONE_RIGHT:
x = hotspotScreen.size['width']/1.08
y = hotspotScreen.size['height']/1.08
else:
writeToLog("INFO", "FAILED, please make sure that you've used a supported KEA Location")
return False
locationCoordinatesList = [x,y]
writeToLog("INFO", "The following coordinates were provided for " + location.value + " location, X: " + str(locationCoordinatesList[0]) + " and Y " + str(locationCoordinatesList[1]))
return locationCoordinatesList
# @Author: Horia Cus
# This function verifies that:
# 1. a proper tool tip is displayed while being in any player location, including protected zone
# 2. the tool tip disappears after exiting the player screen
# 3. the tool tip is properly displayed, based on the desired location
# 4. If expectedHotspot = True, we will verify that the Add Hotspot tool tip is not available
def hotspotToolTipVerification(self, location, expectedHotspot=False):
self.switchToKeaIframe()
# Take the Hotspot Player Screen element details
hotspotScreen = self.wait_element(self.KEA_PLAYER_CONTAINER, 30, True)
action = ActionChains(self.driver)
if expectedHotspot == True:
presentedHotspots = self.wait_elements(self.KEA_HOTSPOTS_PLAYER_HOTSPOT_CONTAINER, 10)
if len(presentedHotspots) < 1:
writeToLog("INFO", "FAILED, no hotspots were available within the player")
return False
# Verify that the Add New Hotspot tool tip is not presented for any presented hotspot
for x in range(0, len(presentedHotspots)):
try:
presentedHotspots[x]
action.move_to_element(presentedHotspots[x]).pause(2).perform()
# Verify if the Add New Hotspot tool tip is found
addHotspotToolTip = self.wait_element(self.KEA_HOTSPOTS_PLAYER_ADD_HOTSPOT_TOOLTIP, 1, True)
if addHotspotToolTip != False:
writeToLog("INFO", "FAILED, the Add New Hotspot tool tip was displayed while hovering over the " + presentedHotspots[x].text + " hotspot")
return False
except Exception:
writeToLog("INFO", "FAILED, to hover over the " + presentedHotspots[x].text + " hotspot")
return False
writeToLog("INFO", "AS EXPECTED, no Add New Hotspot tool tip has been presented while hovering over existing hotspots")
return True
else:
# Verify that we are able to take the X, Y coordinates for the desired location
if type(self.hotspotLocationCoordinates(location)) is not list:
writeToLog("INFO", "FAILED to take the coordinates for location " + location.value)
return False
# Take the X, Y coordinates for the desired location
x, y = self.hotspotLocationCoordinates(location)
# Move the quiz number to a new timeline location
try:
# Start the location from the Top Left corner and move it to the desired place
if location != enums.keaLocation.CENTER:
action.move_to_element_with_offset(hotspotScreen, 0, 0).pause(2).move_by_offset(x, y).pause(2).perform()
if self.wait_element(self.KEA_HOTSPOTS_PLAYER_ADD_HOTSPOT_TOOLTIP, 1, True) == False:
writeToLog("INFO", "FAILED to display the hotspot tool tip while being at the location: " + location.value)
return False
# Start from the center of the element and move the element by negative x value in order to proper place the hotspot to the center
elif location == enums.keaLocation.CENTER:
action.move_to_element(hotspotScreen).pause(2).move_by_offset(-x, 0).pause(2).perform()
# Take the Add Hotspot details
addHotspotToolTip = self.wait_element(self.KEA_HOTSPOTS_PLAYER_ADD_HOTSPOT_TOOLTIP, 1, True)
# Verify that the Add Hotspot tool tip was presented
if addHotspotToolTip == False:
writeToLog("INFO", "FAILED to display the hotspot tool tip while being at the location: " + location.value)
return False
if location.value.count('Protected') == 0:
# Verify the Add Hotspot tool tip text
if addHotspotToolTip.text.strip() != 'Add hotspot here':
writeToLog("INFO", "FAILED, an invalid tool tip text was presented: " + addHotspotToolTip.text.strip() + " while being in the hotspot zone")
return False
else:
if addHotspotToolTip.text.strip() != "Can't add hotspot on the protected zone":
writeToLog("INFO", "FAILED, an invalid tool tip text was presented: " + addHotspotToolTip.text.strip() + " while being in protected zone")
return False
hotspotToolTipLocationChrome = {'x': 0, 'y':0}
if location == enums.keaLocation.TOP_LEFT:
hotspotToolTipLocation = {'x': 503, 'y': 75}
hotspotToolTipLocationChrome = {'x': 503, 'y': 73}
elif location == enums.keaLocation.TOP_RIGHT:
hotspotToolTipLocation = {'x': 916, 'y': 75}
hotspotToolTipLocationChrome = {'x': 916, 'y': 73}
elif location == enums.keaLocation.CENTER:
hotspotToolTipLocation = {'x': 782, 'y': 268}
elif location == enums.keaLocation.BOTTOM_LEFT:
hotspotToolTipLocation = {'x': 503, 'y': 402}
elif location == enums.keaLocation.BOTTOM_RIGHT:
hotspotToolTipLocation = {'x': 916, 'y': 402}
elif location == enums.keaLocation.PROTECTED_ZONE_CENTER:
hotspotToolTipLocation = {'x': 846, 'y': 433}
elif location == enums.keaLocation.PROTECTED_ZONE_LEFT:
hotspotToolTipLocation = {'x': 503, 'y': 433}
elif location == enums.keaLocation.PROTECTED_ZONE_RIGHT:
hotspotToolTipLocation = {'x': 849, 'y': 433}
# Verify the Add Hotspot tool tip location
if addHotspotToolTip.location != hotspotToolTipLocation and hotspotToolTipLocationChrome != addHotspotToolTip.location:
writeToLog("INFO", "FAILED, the tool tip for " + location.value + " was displayed at X:" + str(addHotspotToolTip.location['x']) + " and Y:" + addHotspotToolTip.location['y'] + " coordinates" )
return False
playButton = self.wait_element(self.KEA_PLAYER_CONTROLS_PLAY_BUTTON, 1, True)
if playButton == False:
writeToLog("INFO", "FAILED to take the play button in order to move from player section")
return False
ActionChains(self.driver).move_to_element(playButton).pause(2).perform()
addHotspotToolTipUpdated = self.wait_element(self.KEA_HOTSPOTS_PLAYER_ADD_HOTSPOT_TOOLTIP, 1, True)
if addHotspotToolTipUpdated != False:
writeToLog("INFO", "FAILED, the tool tip is still displayed after exiting the player area")
return False
except Exception:
writeToLog("INFO", "FAILED to hover over the KEA location:" + location.value)
return False
writeToLog("INFO", "KEA Location has been successfully verified at " + location.value)
return True
# @Author: Horia Cus
# This function will move the desired hotspot using drag and drop to the new hotspot location
# hotspotName = contains the string of the Hotspot Title
# hotspotNewLocation = contains the enum of the desired new location for the hotspot ( e.g enums.keaLocation.CENTER )
# If the desired new hotspotLocation is already took by other hotspot, the function will return False
def changeHotspotLocationPlayer(self, hotspotName, hotspotNewLocation):
self.switchToKeaIframe()
# Take the list of the presented hotspots
presentedHotspots = self.wait_elements(self.KEA_HOTSPOTS_PLAYER_HOTSPOT_CONTAINER, 10)
# Verify that at least one hotspots has been found in the player screen
if presentedHotspots == False:
writeToLog("INFO", "FAILED, no hotspots were found within the player screen")
return False
# Take the hotspot index for our hotspotName
for x in range(0, len(presentedHotspots)):
if presentedHotspots[x].text == hotspotName:
hotspotIndex = x
break
if x + 1 == len(presentedHotspots):
writeToLog("INFO", "FAILED to find the " + hotspotName + " inside the presented hotspots")
return False
# Create a list and dictionary that will be used in order to create a new hotspot with our desired location
hotspotLocationDetailsList = [hotspotNewLocation.value, hotspotNewLocation]
hotspotDict = {'1': hotspotLocationDetailsList}
# Create a new hotspot that is created at the desired new location
if self.hotspotCreation(hotspotDict) == False:
writeToLog("INFO", "FAILED to create a new hotspot in order to take the coordinates for the desired location" + hotspotNewLocation.value)
return False
# Take the list with the updated presented hotspots
hotspotLocationElement = self.wait_elements(self.KEA_HOTSPOTS_PLAYER_HOTSPOT_CONTAINER, 10)
action = ActionChains(self.driver)
# Move the hotspotName to the desired new location
try:
action.drag_and_drop(hotspotLocationElement[hotspotIndex], hotspotLocationElement[-1]).pause(2).perform()
except Exception:
writeToLog("INFO", "FAILED to move the " + hotspotName + " to the " + hotspotNewLocation.value + " location")
return False
# Delete the hotspot that was created in order to move the hotspotName to the new location
if self.hotspotActions(hotspotNewLocation.value, enums.keaHotspotActions.DELETE) == False:
writeToLog("INFO", "FAILED to delete the new hotspot that was created in order to take the coordinates" + hotspotNewLocation.value)
return False
sleep(5)
writeToLog("INFO", "The hotspot: " + hotspotName + " has been successfully moved to the new location: " + hotspotNewLocation.value)
return True
# @Author: Horia Cus
# This function will move the desired hotspot to the new hotspot location
# hotspotName = contains the string of the Hotspot Title
# hotspotNewLocation = contains the enum of the desired new location for the hotspot ( e.g enums.keaLocation.CENTER )
# If the desired new hotspotLocation is already took by other hotspot, the function will return False
def changeHotspotLocationSettings(self, hotspotName, hotspotNewLocation):
self.switchToKeaIframe()
if self.openHotspotAdvancedSettings(hotspotName) == False:
writeToLog("INFO", "FAILED to enter in Hotspot Advanced Screen for: " + hotspotName + " hotspot")
return False
# Take the X,Y coordinates specific for the hotspotNewLocation
hotspotNewLocationCoordinates = self.hotspotLocationCoordinates(hotspotNewLocation)
# Verify that the X,Y coordinates were properly provided
if type(hotspotNewLocationCoordinates) is not list:
writeToLog("INFO", "FAILED to take the coordinates for the hotspot location: " + hotspotNewLocation.value)
return False
# Create the variables for the x,y locations
x,y = hotspotNewLocationCoordinates
# Add the X location to the Location X input field
if self.click(self.KEA_HOTSPOTS_FORM_LOCATION_X, 1, True) == False:
writeToLog("INFO", "FAILED to highlight the X input field location")
return False
# Select the text present inside the X input field
if self.clsCommon.sendKeysToBodyElement(Keys.CONTROL + 'a') != True:
writeToLog("INFO", "FAILED to select the text from the X input field location")
return False
# Add the new X value to the X input field
try:
ActionChains(self.driver).send_keys(str(int(x))).perform()
except Exception:
writeToLog("INFO", "FAILED to add X coordinate: " + str(int(x)) + " inside the Form list of the hotspot: " + hotspotName)
return False
# Add the Y location to the Location X input field
if self.click(self.KEA_HOTSPOTS_FORM_LOCATION_Y, 1, True) == False:
writeToLog("INFO", "FAILED to highlight the X input field location")
return False
# Select the text present inside the Y input field
if self.clsCommon.sendKeysToBodyElement(Keys.CONTROL + 'a') != True:
writeToLog("INFO", "FAILED to select the text from the X input field location")
return False
# Add the new X value to the X input field
try:
ActionChains(self.driver).send_keys(str(int(y))).perform()
except Exception:
writeToLog("INFO", "FAILED to add Y coordinate: " + str(int(y)) + " inside the Form list of the hotspot: " + hotspotName)
return False
# Save the coordinates changes
if self.saveHotspotChanges(settingsChanges=True) == False:
writeToLog("INFO", "FAILED to save the changes for " + hotspotName + " Edited hotspot")
return False
writeToLog("INFO", "Coordinates for the " + hotspotName + " hotspot were set to X: " + str(int(x)) + " and Y:" + str(int(y)) + " specific for the location " + hotspotNewLocation.value)
return True
# @Author: Horia Cus
# This function verifies the Hotspot present on the Panel from the left side of the player while being in the Hotspots tab
# Verifies that the expected hotspots are displayed with the desired configurations
def hotspotListVerification(self, hotspotDict, expectedHotspotNumber=None):
self.switchToKeaIframe()
# Take the expectedHostpotNumber based on the length of the hotspotDict if no force number was given
if expectedHotspotNumber == None:
expectedHotspotNumber = str(len(hotspotDict))
else:
expectedHotspotNumber = str(expectedHotspotNumber)
# Take the details from the HS List Header
hotspotListHeader = self.wait_element(self.KEA_HOTSPOTS_LIST_HEADER, 10, True)
# Take the details of the HS List counter
hotspotListCounter = self.get_child_element_by_type(hotspotListHeader, 'tag_name', 'span').text.split()
if hotspotListHeader == False:
writeToLog("INFO", "FAILED to take the HS List header details")
return False
# Take the details of the available HS from the list
hotspotListContent = self.wait_element(self.KEA_HOTSPOTS_LIST_CONTENT, 1, True)
if hotspotListContent == False:
writeToLog("INFO", "FAILED to take the HS List content details")
return False
if len(hotspotListCounter) != 2:
writeToLog("INFO", "FAILED, more than the expected information for HS counter were given")
return False
# Verify that the presented hotspots from the HS Panel list matches with the expected number
else:
if hotspotListCounter[0] != expectedHotspotNumber:
writeToLog("INFO", "FAILED,a total of " + hotspotListCounter + " HS were displayed in the HS list but " + expectedHotspotNumber + " were expected")
return False
if hotspotListCounter[1] != 'Hotspots':
writeToLog("INFO", "FAILED, the 'Hotspots' text placeholder was not displayed, instead " + hotspotListCounter[1] + " text was present")
return False
# Verify that a proper placeholder text is presented if no hotspots are available
if expectedHotspotNumber == str(0):
if hotspotListContent.text != 'No hotspots for this video':
writeToLog("INFO", "FAILED, we expected Zero Hotspots but the HS list is populated")
return False
else:
hotspotListPanels = self.wait_elements(self.KEA_HOTSPOTS_LIST_PANEL_HOTSPOT, 1)
# Verify that the number of expected hotspots matches with the number of HS List Panel presented
if type(hotspotListPanels) != list:
writeToLog("INFO", "FAILED, Hotspot List Panels elements couldn't be provided")
return False
else:
if str(len(hotspotListPanels)) != expectedHotspotNumber:
writeToLog("INFO", "FAILED, a number of " + expectedHotspotNumber + " were expected but, " + str(len(hotspotListPanels)) + " HS were presented")
return False
else:
# Verify the hotspot expected details while being in the Advanced Settings
if hotspotDict != '':
for x in range(0, len(hotspotDict)):
expectedHotspotDetails = hotspotDict[str(x+1)]
expectedHotspotDetailTitle = expectedHotspotDetails[0]
presentedHotspotsTitle = self.wait_elements(self.KEA_HOTSPOTS_PANEL_ITEM_TITLE, 1)
presentedHotspotsLink = self.wait_elements(self.KEA_HOTSPOTS_PANEL_ITEM_LINK, 1)
hotspotNameIndex = 0
# Verify and take the expected hotspot index number
for x in range(0, len(presentedHotspotsTitle)):
if presentedHotspotsTitle[x].text == expectedHotspotDetailTitle:
hotspotNameIndex = x
break
if x + 1 == len(presentedHotspotsTitle):
writeToLog("INFO", "FAILED to find the " + expectedHotspotDetailTitle + " hotspot inside the HS list panel")
return False
if expectedHotspotDetails[4] != '':
if type(expectedHotspotDetails[4]) is str:
if presentedHotspotsLink[hotspotNameIndex].text != expectedHotspotDetails[4]:
writeToLog("INFO", "FAILED, we expected that " + expectedHotspotDetails[4] + " link to be displayed, instead " + presentedHotspotsLink[hotspotNameIndex].text + " was presented")
return False
elif type(expectedHotspotDetails[4]) is int:
# time string has format mm:ss
expectedTimeString = str(datetime.timedelta(seconds=expectedHotspotDetails[4]))[2:]
if presentedHotspotsLink[hotspotNameIndex].text != 'Jump to time: ' + expectedTimeString:
writeToLog("INFO", "FAILED, " + presentedHotspotsLink[hotspotNameIndex].text + " time was set in the HS list, however, we expected " + expectedTimeString)
return False
else:
writeToLog("INFO", "FAILED, invalid format for the hotspot link verification in HS list")
return False
else:
try:
presentedHotspotsLink[hotspotNameIndex]
if presentedHotspotsLink[hotspotNameIndex].text != '':
writeToLog("INFO", "FAILED, we expected to have no Link, however, " + presentedHotspotsLink[hotspotNameIndex].text + " link was displayed")
return False
except TypeError:
writeToLog("INFO", "As expected, no hotspot link has been provided for " + presentedHotspotsTitle[hotspotNameIndex].text )
# Highlight the presented Hostpot from the HS Panel List
if self.clickElement(presentedHotspotsTitle[hotspotNameIndex]) == False:
writeToLog("INFO", "FAILED to highligth the " + expectedHotspotDetailTitle + " hotspot")
return False
if self.click(self.KEA_HOTSPOTS_ADVANCED_SETTINGS, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Advanced Settings for " + expectedHotspotDetailTitle + " Hotspot")
return False
sleep(1)
# TO BE DEVELOPED
# title verification
# link verification
# style verification
# need pyperclip issue solved in order to proceed with the implementation
if self.click(self.KEA_HOTSPOTS_CANCEL_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to dismiss the Advanced Settings Option of the " + expectedHotspotDetailTitle + " Hotspot")
return False
if self.wait_element(self.KEA_HOTSPOTS_LIST_CONTENT, 10, True) == False:
writeToLog("INFO", "FAILED to display back the Hotspot List after clicking on the Cancel button from the Advanced Settings")
return False
writeToLog("INFO", "Proper information has been presented inside the HS list")
return True
# @Author: Horia Cus
# This function will stop the playing process from KEA player and resume it from beginning, and start the playing process again from second zero
def startFromBeginningPlayingProcess(self,):
self.switchToKeaIframe()
# Verify that the video playing process is stopped
if self.wait_element(self.KEA_PLAYER_CONTROLS_PLAY_BUTTON, 0.3, True) == False:
if self.wait_element(self.KEA_PLAYER_CONTROLS_PAUSE_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to find both play and pause buttons")
return False
else:
if self.click(self.KEA_PLAYER_CONTROLS_PAUSE_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to pause the video")
return False
else:
if self.wait_element(self.KEA_PLAYER_CONTROLS_PLAY_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to find the play button after pausing the video")
return False
# Take the time were the video is paused at
realTimeMarkerCurrentTime = self.wait_element(self.EDITOR_REALTIME_MARKER, 1, True).text
realTimeMarkerTimeUpdated = None
if realTimeMarkerCurrentTime != '00:00.00':
while realTimeMarkerTimeUpdated != '00:00.00':
self.clsCommon.sendKeysToBodyElement(Keys.ARROW_LEFT)
realTimeMarkerTimeUpdated = self.wait_element(self.EDITOR_REALTIME_MARKER, 1, True).text
sleep(3)
# Trigger the playing process
if self.click(self.KEA_PLAYER_CONTROLS_PLAY_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to trigger the playing process from second zero of the entry")
return False
sleep(0.1)
# Wait until the loading spinner is no longer present
if self.wait_while_not_visible(self.KEA_LOADING_SPINNER_QUIZ_PLAYER, 30) == False:
writeToLog("INFO", "FAILED to load the KEA entry video playing process")
return False
return True
# @Author: Horia Cus
# This function will play the entry and return once reaching the timeToReturn
# timeToReturn must contain the following format 'mm:ss' ( e.g '01:59')
def playEntryAndReturnAtTime(self, timeToReturn):
self.switchToKeaIframe()
# Trigger the playing process from second one
if self.startFromBeginningPlayingProcess() == False:
writeToLog("INFO", "FAILED to initiate the playing process from second zero")
return False
currentPlayTime = None
# Return when the time reaches the timeToReturn value
while timeToReturn != currentPlayTime:
currentPlayTime = self.wait_element(self.EDITOR_REALTIME_MARKER, 1, True).text[:5]
writeToLog("INFO", "The video was returned at time " + timeToReturn)
return True
# @Author: Horia Cus
# This function can perform four type of hotspot creation interrupts:
# By clicking on the Cancel Button when the Hotspot Creation Tool Tip is active
# By clicking on the Player Screen when the Hotspot Creation Tool Tip is active
# By performing a switch between the tabs after placing a blank hotspot in the Hotspot Panel
# By exiting the KEA Editor when the Hotspot Creation Tool Tip is active
# hotspotInterruptType must be enum ( e.g enums.keaHotspotCreationInterrupt.CANCEL_BUTTON)
# hotspotLocation must contain enum ( e.g enums.keaLocation.CENTER )
def hotspotCreationInterrupts(self, hotspotInterruptType, hotspotLocation, entryName):
self.switchToKeaIframe()
# Place a new Add Hotspot on the player
if self.hotspotLocation(hotspotLocation) == False:
writeToLog("INFO", "FAILED to set the Hotspot Location at " + hotspotLocation)
return False
# Verify the Hotspot Creation Interrupts while using the Cancel Button
if hotspotInterruptType == enums.keaHotspotCreationInterrupt.CANCEL_BUTTON:
if self.click(self.KEA_HOTSPOTS_TOOL_TIP_CREATION_CANCEL_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Cancel Button from the Creation Tool Tip")
return False
# Verify the Hotspot Creation Interrupts while clicking on the player
elif hotspotInterruptType == enums.keaHotspotCreationInterrupt.CANCEL_OUTSIDE:
if self.click(self.KEA_PLAYER_CONTAINER, 1, True) == False:
writeToLog("INFO", "FAILED to click on the player container in order to dismiss the Creation Tool Tip")
return False
# Verify the Hotspot Creation Interrupts while switching between KEA Tabs
elif hotspotInterruptType == enums.keaHotspotCreationInterrupt.TAB_SWITCHING:
# Place the Blank Hotspot inside the panel, without saving it
if self.click(self.KEA_HOTSPOTS_DONE_BUTTON_NORMAL, 1, True) == False:
writeToLog("INFO", "FAILED to save the KEA hotspots setting changes")
return False
# Verify that the confirmation pop up during the transition is present when the hotspots are not saved
if self.launchKEATab(entryName, enums.keaTab.VIDEO_EDITOR, False, 0, True) == False:
writeToLog("INFO", "FAILED to switch to a second tab while having Hotspot Creation Tool Tip active")
return False
# Resume back to the Hotspots Tab
if self.launchKEATab(entryName, enums.keaTab.HOTSPOTS, False, 0, False) == False:
writeToLog("INFO", "FAILED to switch back to the Hotspot Tab")
return False
# Verify that there's no Blank Hotspot to be deleted
if self.hotspotActions('<Blank>', enums.keaHotspotActions.DELETE) != False:
writeToLog("INFO", "FAILED the Blank hotspot could be deleted from the Hotspot Panel, when it shouldn't have been presented in the first place")
return False
# Verify the Hotspot Creation Interrupts while Exiting the KEA Editor
elif hotspotInterruptType == enums.keaHotspotCreationInterrupt.EXIT_KEA:
if self.exitKeaEditor() == False:
writeToLog("INFO", "FAILED to exit the KEA Editor for " + entryName + " entry")
return False
if self.launchKEATab(entryName, enums.keaTab.HOTSPOTS, True, 0, False) == False:
writeToLog("INFO", "FAILED to re-launch the KEA Hotspots tab for " + entryName + " entry")
return False
else:
writeToLog("INFO", "FAILED, please make sure that you've selected a supported hotspot creation interrupt option")
return False
# Verify that the Add New Hotspot Creation Tool Tip is no longer present
if self.wait_element(self.KEA_HOTSPOTS_TOOL_TIP_CREATION_CONTAINER, 1, True) != False:
writeToLog("INFO", "FAILED, the Hotspot Tool Tip Creation is still displayed after perform the: " + hotspotInterruptType.value + " interrupt")
writeToLog("INFO", "The hotspot interrupt: " + hotspotInterruptType.value + " has been performed successfully on " + entryName + " entry")
return True
# @Author: Horia Cus
# This function verifies the negative and positive flow while exiting the KEA Editor page
def exitKeaEditor(self,):
self.switchToKeaIframe()
# Verify that the KEA Editor container is present
if self.wait_element(self.KEA_MAIN_CONTAINER, 20, True) == False:
writeToLog("INFO", "FAILED to find the KEA Editor page")
return False
# Trigger the Exit Confirmation Pop Up
if self.click(self.KEA_EXIT_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Exit Button from the navigation bar")
return False
# Verify that the Exit Confirmation Pop Up is presented
if self.wait_element(self.KEA_MAIN_CONFIRMATION_POP_UP, 5, True) == False:
writeToLog("INFO", "FAILED to display the Confirmation Dialog for exiting the KEA Editor")
return False
# Verify the negative flow
if self.click(self.KEA_MAIN_CONFIRMATION_POP_UP_CANCEL_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to cancel the Exit Kea Editor process")
return False
# Verify that the KEA Exit pop up is no longer present
if self.wait_while_not_visible(self.KEA_MAIN_CONFIRMATION_POP_UP, 15) == False:
writeToLog("INFO", "FAILED to dismiss the KEA Exit confirmation pop up after clicking on the Cancel Button")
return False
# Trigger the Exit Confirmation Pop Up
if self.click(self.KEA_EXIT_BUTTON, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Exit Button from the navigation bar")
return False
# Confirm the Exit Confirmation Pop uo
if self.click(self.KEA_MAIN_CONFIRMATION_POP_UP_SURE_BUTTON, 3, True) == False:
writeToLog("INFO", "FAILED to click on the Exit Confirmation Button")
return False
# Verify that the KEA Editor container is no longer present
if self.wait_while_not_visible(self.KEA_MAIN_CONTAINER, 45) == False:
writeToLog("INFO", "FAILED, the KEA Page is still displayed")
return False
# Switch to default content, due to the fact that we are no longer in the KEA Editor page
self.switch_to_default_content()
writeToLog("INFO", "KEA Editor has been successfully exited")
return True
# @Author: Horia Cus
# This function navigates to the desired tab that needs to be verified
# Verifies that the following elements for all the KEA Tabs are displayed
# KEA Player, timeline section, entry name, zoom level, tab name, timeline marker time set to zero,
# Verifies the specific elements for each individual KEA Tab
def keaTabVerification(self, entryName, keaTab, expectedSaveButtonState=False, hotspotsPresented=False, verifyZoomLevelOption=False):
self.switchToKeaIframe()
# Navigate to the desired kea tab
if self.launchKEATab(entryName, keaTab) == False:
writeToLog("INFO", "FAILED to access the " + keaTab.value + " KEA Tab")
return False
# Verify that the KEA Player is presented
if self.wait_element(self.KEA_PLAYER_CONTAINER, 15, True) == False:
writeToLog("INFO", "FAILED to display the v2 Player inside the tab")
return False
# Verify that the expected entry name is presented in the tab
if self.verifyKeaEntryName(entryName, 5) == False:
writeToLog("INFO", "FAILED to display the entry name: " + entryName + " inside the " + keaTab.value)
return False
# Verify that the timeline section is presented
if self.wait_element(self.KEA_TIMELINE_PRESENTED_SECTIONS, 30, True) == False:
writeToLog("INFO", "FAILED, the timeline section for " + keaTab.value + " is not presented")
return False
if verifyZoomLevelOption == True:
# Verify the zoom options
if self.verifyZoomLevelInTimeline() == False:
writeToLog("INFO", "FAILED, the Zoom Leave wasn't be properly presented inside the " + keaTab.value)
return False
# Verify that a proper tab name is presented in the title
tabNameLocator = (self.KEA_TAB_TITLE[0], self.KEA_TAB_TITLE[1].replace('TAB_NAME', keaTab.value))
if self.wait_element(tabNameLocator, 1, True) == False:
writeToLog("INFO", "FAILED to display the " + keaTab.value + " Tab Title")
return False
# Verify that the marker timeline starts from second zero
try:
markerTimeInTimeline = self.wait_element(self.EDITOR_REALTIME_MARKER, 3, True).text
except Exception:
writeToLog("INFO", "FAILED to take the time from the real time timeline section")
return False
if markerTimeInTimeline != '00:00.00':
writeToLog("INFO", "FAILED, we expected to have the Timeline Marker at second zero but it was displayed at: " + markerTimeInTimeline)
return False
# Verify if the save option is enabled or disabled
if keaTab == enums.keaTab.HOTSPOTS or keaTab == enums.keaTab.VIDEO_EDITOR:
if self.verifySaveButtonState(expectedSaveButtonState) == False:
return False
# Verify the specific options from the Video Editor and Quiz KEA Tab
if keaTab == enums.keaTab.VIDEO_EDITOR or keaTab == enums.keaTab.QUIZ:
# Take the Media Details from the element
try:
mediaDetailsEntry = self.wait_element(self.KEA_EDITOR_MEDIA_DETAILS_CONTAINER, 1, True).text.splitlines()
except Exception:
writeToLog("INFO", "FAILED to display the details for the selected media entry inside the KEA " + keaTab.value + " Tab")
return False
# Verify that the presented entry name from media details matches with our entry name
if mediaDetailsEntry[1] != entryName:
writeToLog("INFO", "FAILED to display the " + entryName + " inside the Media Details list, instead " + mediaDetailsEntry[1] + " entry name has been presented")
return False
# Verify that the elements properly collapse
if self.verifySidePanelState() == False:
writeToLog("INFO", "FAILED to proper collapse and expand the side panel for the " + keaTab.value + " KEA Tab")
return False
# Verify the specific elements from Hotspots Tab
if keaTab == enums.keaTab.HOTSPOTS:
if self.keaHotspotsTabVerification(hotspotsPresented) == False:
return False
writeToLog("INFO", "The KEA Tab " + keaTab.value + " has been properly displayed in the KEA Page")
return True
# @Author: Horia Cus
# This function verifies the specific Hotspots elements while being in KEA Page
# if hotspotsPresented=True, we will verify that the hotspot list is populated and that the specific Hotspots Action are presented
# if hotspotsPresented=False, we will verify that a proper placeholder text is displayed
def keaHotspotsTabVerification(self, hotspotsPresented):
self.switchToKeaIframe()
# Verify that the Hotspot Counter and Add Hotspot options are displayed
if self.wait_element(self.KEA_HOTSPOTS_LIST_HEADER, 1, True) == False:
writeToLog("INFO", "FAILED to display the Hotspots counter and Add Hotspot button")
return False
if hotspotsPresented == True:
# Take a list with all the available hotspots from the sidebar
if self.wait_element(self.KEA_HOTSPOTS_PANEL_ITEM_TITLE, 5, True) == False:
writeToLog("INFO", "FAILED to find any hotspots presented in the hotspots list from the side bar menu")
return False
# Verify that the action button is displayed
if self.wait_element(self.KEA_HOTSPOTS_PANEL_MORE_HAMBURGER_MENU, 1, True) == False:
writeToLog("INFO", "FAILED to find the trigger for the action menu inside the Hotspots List")
return False
# Verify that we are able to trigger the Hotspots Action drop down menu
if self.click(self.KEA_HOTSPOTS_PANEL_MORE_HAMBURGER_MENU, 1, True) == False:
writeToLog("INFO", "FAILED to trigger the Hotspots Action menu")
return False
# Verify that the Duplicate Hotspot Action is displayed
if self.wait_element(self.KEA_HOTSPOTS_PANEL_ACTION_MENU_DUPLICATE, 1, True) == False:
writeToLog("INFO", "FAILED to display the Duplicate Action inside the Hotspots Action Menu")
return False
# Verify that the Delete Hotspot Action is displayed
if self.wait_element(self.KEA_HOTSPOTS_PANEL_ACTION_MENU_DELETE, 1, True) == False:
writeToLog("INFO", "FAILED to display the Delete Action inside the Hotspots Action Menu")
return False
# Verify that the Edit Hotspot Action is displayed
if self.wait_element(self.KEA_HOTSPOTS_PANEL_ACTION_MENU_EDIT, 1, True) == False:
writeToLog("INFO", "FAILED to display the Edit Action inside the Hotspots Action Menu")
return False
if self.click(self.KEA_HOTSPOTS_PANEL_MORE_HAMBURGER_MENU, 1, True) == False:
writeToLog("INFO", "FAILED to collapse the Hotspots Action menu")
return False
else:
hotspotListContent = self.wait_element(self.KEA_HOTSPOTS_LIST_CONTENT, 1, True)
# Verify that a proper placeholder is displayed while having no Hotspots for the entry
if hotspotListContent.text != 'No hotspots for this video':
writeToLog("INFO", "FAILED, we expected Zero Hotspots but the HS list is populated")
return False
return True
# @Author: Horia Cus
# This function verifies that the Side Panel can be collapsed and expanded
# Verifies that when the Side Panel is collapsed, the player container is expanded
# Verifies that when the Side Panel is expanded, the player container is collapsed
def verifySidePanelState(self):
self.switchToKeaIframe()
# Collapse the side panel
self.click(self.KEA_COLLAPSE_PANEL_BUTTON, 1, True)
expandedPlayerContainer = self.wait_element(self.KEA_EXPANDED_PLAYER_CONTAINER, 3, True)
# Verify that the player container is now expanded
if expandedPlayerContainer == False:
writeToLog("INFO", "FAILED to expand the player container after collapsing the Side Panel")
return False
expandedPlayerContainerXLocation = expandedPlayerContainer.location['x']
# Expand the side panel
self.click(self.KEA_EXPAND_PANEL_BUTTON, 1, True)
collapsedPlayerContainer = self.wait_element(self.KEA_COLLAPSED_PLAYER_CONTAINER, 3, True)
# Verify that the player container is now collapsed
if collapsedPlayerContainer == False:
writeToLog("INFO", "FAILED to resume the player container to the initial size after re opening the Side Panel")
return False
collapsedPlayerContainerXLocation = collapsedPlayerContainer.location['x']
# Verify that the Player Container is properly displayed based on the Side Panel status
if collapsedPlayerContainerXLocation <= expandedPlayerContainerXLocation:
writeToLog("INFO", "FAILED to center the Player Container after collapsing the side panel")
return False
return True
# @Author: Horia Cus
# This function verifies if the save button is enabled or disabled
def verifySaveButtonState(self, isEnabled=True):
self.switchToKeaIframe()
# Take the save button element
saveButton = self.wait_element(self.KEA_HOTSPOTS_SAVE_BUTTON, 5, True)
if saveButton != False:
# Take the parent of the save button element
saveButtonParrent = self.wait_element(self.KEA_HOTSPOTS_SAVE_BUTTON_PARENT, 1, True)
# Take the arguments from the parent
saveButtonArgumentsDict = self.driver.execute_script('var items = {}; for (index = 0; index < arguments[0].attributes.length; ++index) { items[arguments[0].attributes[index].name] = arguments[0].attributes[index].value }; return items;', saveButtonParrent)
# Convert the arguments into a string
saveButtonArgumentsString = ', '.join("{!s}={!r}".format(key,val) for (key,val) in saveButtonArgumentsDict.items())
# Verify if disabled is presented as argument
isEnabledNumber = saveButtonArgumentsString.count('disabled')
if isEnabled == True:
if isEnabledNumber == 1:
writeToLog("INFO", "FAILED, the Save button it's displayed as being disabled, although we expected to be enabled")
else:
if isEnabledNumber == 0:
writeToLog("INFO", "FAILED, the Save button it's displayed as being enabled, although we expected to be disabled")
else:
writeToLog("INFO", "FAILED, the save button is not presented at all in the KEA Page")
return False
return True
# @Author: Horia Cus
# This function creates a Dictionary that contains a full list of details with different configurations for each Hotspot
# desiredNumberOfHotspots = represents the number of hotspots lists that you want to have in the dictionary
# entryDuration needs to be integer and cover the time of the entry, in order to proper create a start and end time of cue points
# Make sure that you used an entry that has at least 10 seconds of length
def keaGenerateHotspotsDictionary(self, desiredNumberOfHotspots, entryDuration):
# Create empty variables that will be populated within the for loop
hotspotDetailsList = None
hotspotGenereatedDict = {}
# Create and update the hotspot Dict with new Hotspots Details
for x in range(0, desiredNumberOfHotspots):
# Create a random Hotspot Title that can have at least four characters and maximum of 15
hotspotTitle = ''.join(random.choice(string.ascii_letters) for x in range(random.randint(4,15)))
# Create a list with the available pre-defined locations
hotspotLocationList = [enums.keaLocation.BOTTOM_LEFT, enums.keaLocation.BOTTOM_RIGHT, enums.keaLocation.CENTER, enums.keaLocation.TOP_LEFT, enums.keaLocation.TOP_RIGHT]
# Create an integer interval for the start time of the hotspots based on the entry duration
hotspotCuePointStartTime = random.randint(0,entryDuration-4)
# Create an integer interval for the end time of the hotspots based on the entry duration
hotspotCuePointEndTime = random.randint(hotspotCuePointStartTime+3,entryDuration)
# Create a string that contains a valid link format
hotspotLink = 'https://' + ''.join(random.choice(string.ascii_letters) for x in range(random.randint(4,15))).lower() + '.' +''.join(random.choice(string.ascii_letters) for x in range(random.randint(2,4))).lower()
# Create a list that contains the valid link string and empty
hotspotLinkListWithEmptyLink = [hotspotLink, '']
# Pick randomly if the hotspot to have or not a link
hotspotLinkRandom = random.choice(hotspotLinkListWithEmptyLink)
# Create a list with the available pre-defined text styles
hotspotTextStyleList = [enums.textStyle.BOLD, enums.textStyle.NORMAL]
# Pick randomly a text style from the available list
hotspotTextStyle = random.choice(hotspotTextStyleList)
# Font and Background color returns Fails due to KMS = TBA
hotspotFontColor = ''
hotspotBackgroundColor = ''
# Create an integer interval based on the maximum and minimum values from the Text Size option
hotspotTextSize = random.randint(12,18)
# Create an integer interval based on the maximum and minimum values from the Roundness Size option
hotspotRoundnessSize = random.randint(2,16)
# Verify if the hotspot location has been used already for an Hotspot from our Dictionary
if len(hotspotGenereatedDict) >= 1:
try:
for i in range(0, len(hotspotGenereatedDict)):
if len(hotspotGenereatedDict) == 1:
availableHotspotLocationList = [x for x in hotspotLocationList if x != hotspotGenereatedDict[str(i)][1]]
# Verify if a hotspot location is free or not
if type(availableHotspotLocationList) is list and len(availableHotspotLocationList) == 0:
break
# Create a list with the unused hotspot locations
if len(hotspotGenereatedDict) > 1:
availableHotspotLocationList = [x for x in availableHotspotLocationList if x != hotspotGenereatedDict[str(i)][1]]
except Exception:
pass
else:
availableHotspotLocationList = hotspotLocationList
# If a hotspot location is available, we will use first the pre-defined locations
if len(availableHotspotLocationList) >= 1:
hotspotLocation = availableHotspotLocationList[0]
# If no pre-defined hotspot locations are available, we will use the Add New button option and place the hotspot randomly
else:
hotspotLocation = ''
hotspotDetailsList = [hotspotTitle, hotspotLocation, hotspotCuePointStartTime, hotspotCuePointEndTime, hotspotLinkRandom, hotspotTextStyle, hotspotFontColor, hotspotBackgroundColor, hotspotTextSize, hotspotRoundnessSize]
hotspotGenereatedDict.update({str(x):hotspotDetailsList})
# Verify that the expected number of hotsptos were inserted inside the dictionary
if len(hotspotGenereatedDict) != desiredNumberOfHotspots:
writeToLog("INFO", "FAILED, we expected to have a dictionary that contains " + str(desiredNumberOfHotspots) + " number of hotspots but only " + str(len(hotspotGenereatedDict)))
return False
return hotspotGenereatedDict
# @Author: Horia Cus
# Verify that the user is unable to create a new hotspot while having an invalid URL
# Verification process is supported in both Advanced Settings and Add Hotspot Tool Tip
# hotspotCreationScreen must contain enum ( e.g enums.keaHotspotCreationScreen.ADVANCED_SETTINGS )
# invalidURLString must contain any string without . ( e.g invalidurlstring)
def verifyHotspotsCreationWithInvalidURL(self, hotspotCreationScreen, invalidURLString):
self.switchToKeaIframe()
# Trigger the Add New Hotspot tool tip in order to verify the invalid URL functionality inside the Add Hotspot Tool Tip
if self.click(self.KEA_HOTSPOTS_ADD_NEW_BUTTON, 5, True) == False:
writeToLog("INFO", "FAILED to click on the Add new Button")
return False
sleep(1)
# Reach the Advanced Settings creen and take the specific locators
if hotspotCreationScreen == enums.keaHotspotCreationScreen.ADVANCED_SETTINGS:
if self.click(self.KEA_HOTSPOTS_ADVANCED_SETTINGS, 1, True) == False:
writeToLog("INFO", "FAILED to activate the Advanced Settings for Hotspots")
return False
doneButtonLocator = self.KEA_HOTSPOTS_DONE_BUTTON_ADVANCED_SETTINGS
cancelButtonLocator = self.KEA_HOTSPOTS_CANCEL_BUTTON
# Take the locators from the Add Hotspot tool tip
else:
doneButtonLocator = self.KEA_HOTSPOTS_DONE_BUTTON_NORMAL
cancelButtonLocator = self.KEA_HOTSPOTS_TOOL_TIP_CREATION_CANCEL_BUTTON
# Insert the invalid url string inside the link field
if self.clear_and_send_keys(self.KEA_HOTSPOTS_FORM_LINK_INPUT_FIELD, invalidURLString, True) == False:
writeToLog("INFO", "FAILED to insert the invalid URL:" + invalidURLString + " inside Link Address from the Add Hotspots tool tip")
return False
# Try to save the hotspot by clicking on the done button
if self.click(doneButtonLocator, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Done button from the Add Hotspots tool tip")
return False
# Take the element that needs to be triggered while having an invalid URL
urlInputErrorElement = self.wait_element(self.KEA_HOTSPOTS_URL_INPUT_ERROR, 5, True)
# Verify that the element for invalid URL is presented
if urlInputErrorElement == False:
writeToLog("INFO", "FAILED to display the Invalid URL Format error")
return False
else:
# Verify that the expected error message is displayed
if urlInputErrorElement.text != 'Invalid URL Format':
writeToLog("INFO", "FAILED, we expected to see 'Invalid URL Format' error but " + urlInputErrorElement.text + " error was presented")
return False
# Verify that the Hotspot creation screen remained present and it can be dismissed
if self.click(cancelButtonLocator, 1, True) == False:
writeToLog("INFO", "FAILED to dismiss the " + hotspotCreationScreen.value + " Screen by clicking on the Cancel button")
return False
writeToLog("INFO", "The invalid url has been successfully verified while being in " + hotspotCreationScreen.value + " Screen")
return True
# @Author: Horia Cus
# This function will return the index number for the desired hotspotName from the HS List ( Side Bar )
def returnHotspotIndexFromList(self, hotspotName):
self.switchToKeaIframe()
# Take a list with all the available hotspots from the HS List ( Side Bar )
hotspotsPanelTitle = self.wait_elements(self.KEA_HOTSPOTS_PANEL_ITEM_TITLE, 5)
# Verify that we were able to find hotspots inside the HS List
if hotspotsPanelTitle == False:
writeToLog("INFO", "FAILED to find any available hotspots in the side bar panel")
return False
# Take the hotspot index specific for the HS List ( Side Bar )
hotspotIndexLocation = None
for x in range(0, len(hotspotsPanelTitle)):
# Verify if the current iterrated hotspot matches with our desired one
if hotspotsPanelTitle[x].text == hotspotName:
hotspotIndexLocation = x
break
# Verify that we were able to find our hotspot within the available number of tries
if x + 1 == len(hotspotsPanelTitle):
writeToLog("INFO", "FAILED to find the " + hotspotName + " inside the sidebar panel")
return False
writeToLog("INFO", "Hotspot Index Location for " + hotspotName + " hotspot is at: " + str(hotspotIndexLocation))
return hotspotIndexLocation
# @Author: Horia Cus
# This function will open the Advanced Settings Screen for the desired hotspotName
def openHotspotAdvancedSettings(self, hotspotName):
self.switchToKeaIframe()
hotspotIndexLocation = self.returnHotspotIndexFromList(hotspotName)
if type(hotspotIndexLocation) is not int:
writeToLog("INFO", "FAILED to take the hospot: " + hotspotName + " index location")
return False
# Create the elements for the Hotspot Title
presentedHotspotsTitle = self.wait_elements(self.KEA_HOTSPOTS_PANEL_ITEM_TITLE, 1)
# Highlight the hotspotName field by clicking on its container
if self.clickElement(presentedHotspotsTitle[hotspotIndexLocation]) == False:
writeToLog("INFO", "FAILED to highligth the " + hotspotName + " hotspot from the Hotspot List Screen")
return False
# Trigger the Advanced Settings Screen
if self.click(self.KEA_HOTSPOTS_ADVANCED_SETTINGS, 1, True) == False:
writeToLog("INFO", "FAILED to click on the Advanced Settings button for " + hotspotName + " hotspot")
return False
writeToLog("INFO", "Hotspot Advanced Setting Screen has been successfully opened for: " + hotspotName)
return True
# @Author: Horia Cus
# This function changes the time stamp location for an existing hotspotName
# hotspotName must contain the entire name of the desired hotspot
# You may modify only the start time or endtime or even both
# startTime and endTime must have the following format mm:ss
def changeHotspotTimeStamp(self, hotspotName, startTime, endTime):
self.switchToKeaIframe()
# Convert the integer seconds to a mm:ss string format
if type(startTime) is int:
startTime = time.strftime('%M:%S', time.gmtime(startTime))
if type(endTime) is int:
endTime = time.strftime('%M:%S', time.gmtime(endTime))
# Trigger the advanced settings screen for the desired hotspotName
if self.openHotspotAdvancedSettings(hotspotName) == False:
writeToLog("INFO", "FAILED to enter in the Hotspot Advanced Screen for: " + hotspotName + " hotspot")
return False
if startTime != '':
# Select the Start Time input field
if self.click(self.KEA_HOTSPOTS_FORM_START_TIME, 1, True) == False:
writeToLog("INFO", "FAILED to highlight the start time input field from the Advanced Settings screen for hotspot: " + hotspotName)
return False
sleep(0.2)
if self.driver.capabilities['browserName'] == 'firefox':
# Select the presented Start Time text
if self.clsCommon.sendKeysToBodyElement(Keys.CONTROL + 'a') != True:
writeToLog("INFO", "FAILED to select the presented start time text from the input field")
return False
# Insert the new desired Start Time inside the input field
try:
ActionChains(self.driver).send_keys(startTime).pause(0.4).perform()
except Exception:
writeToLog("INFO", "FAILED to set the start time for " + hotspotName + " at: " + startTime)
return False
if endTime != '':
# Select the End Time input field
if self.click(self.KEA_HOTSPOTS_FORM_END_TIME, 1, True) == False:
writeToLog("INFO", "FAILED to highlight the End time input field from the Advanced Settings screen for hotspot: " + hotspotName)
return False
sleep(0.2)
if self.driver.capabilities['browserName'] == 'firefox':
# Select the presented End Time text
if self.clsCommon.sendKeysToBodyElement(Keys.CONTROL + 'a') != True:
writeToLog("INFO", "FAILED to select the presented End time text from the input field")
return False
# Insert the new desired End Time inside the input field
try:
ActionChains(self.driver).send_keys(endTime).pause(0.4).perform()
except Exception:
writeToLog("INFO", "FAILED to set the End time for " + hotspotName + " at: " + endTime)
return False
# Save the new time stamp location for the desired hotspotName
if self.saveHotspotChanges(settingsChanges=True) == False:
writeToLog("INFO", "FAILED to save the time stamp changes for the " + hotspotName + " hotspot")
return False
if startTime == '':
startTime = 'unchanged'
if endTime == '':
endTime = 'unchanged'
writeToLog("INFO", "The hotspot: " + hotspotName + " time stamp location has been successfully set to: start time: " + startTime + " end time: " + endTime )
return True | NadyaDi/kms-automation | web/lib/kea.py | kea.py | py | 317,258 | python | en | code | 0 | github-code | 13 |
39443461963 | # -*- coding: utf-8 -*-
""" Vented box enclosure """
import numpy as np
from . import air
class VentedBox(object):
""" Model a vented box loudspeaker enclosure """
def __init__(self, Vab, fb, Ql):
self._Vab = Vab
#: Acoustic compliance of box :math:`C_{ab}`
#:
#: .. note:: Do not set this directly, use :meth:`Vab`
self.Cab = Vab / (air.RHO*air.C**2)
self._fb = fb
#: Angular frequency :math:`\omega_b = 2 \pi f_b`
#:
#: .. note:: Do not set this directly, use :meth:`fb`
self.wb = 2.0*np.pi*fb
#: Time constant of the box :math:`T_b = \frac{1}{\omega_b}`; not to
#: be confused with a period
#: :math:`t = \frac{1}{f} = \frac{2\pi}{\omega}`
#:
#: .. note:: Do not set this directly, use :meth:`fb`
self.Tb = 1.0 / self.wb
#: Enclosure leakage losses
self.Ql = Ql
@property
def Vab(self):
""" Box Volume in m³
The box volume in m³. Setting this attribute also sets :attr:`Cab`.
"""
return self._Vab
@Vab.setter
def Vab(self, Vab):
""" Sets Vab, as well as Cab """
self._Vab = Vab
self.Cab = Vab / (air.RHO*air.C**2)
@property
def fb(self):
""" Box Tuning Frequency in Hz
The tuning frequency of the box. Setting this attribute also sets
:attr:`wb` and :attr:`Tb`.
"""
return self._fb
@fb.setter
def fb(self, fb):
""" Sets fb, as well as wb and Tb """
self._fb = fb
self.wb = 2*np.pi*fb
self.Tb = 1 / self.wb
| Psirus/altai | altai/lib/vented_box.py | vented_box.py | py | 1,632 | python | en | code | 0 | github-code | 13 |
41847458573 | from schemas.response import Response, ErrorResponse, \
JsonResponse
from schemas.event import EventScheme
from lib.db import ydbclient
import ydb
from crud import create
async def main(event: EventScheme) -> Response:
print(event)
try:
await ydbclient.connect()
id = await create(event.body.get('name', 'Test'))
return JsonResponse({
'id': id
}, 201)
except ydb.Error:
return ErrorResponse('Database error')
async def handler(event, context) -> dict:
data = EventScheme.model_validate(event)
payload = await main(data)
return payload.to_dict()
| Gamer201760/cloud-func-base | main.py | main.py | py | 629 | python | en | code | 0 | github-code | 13 |
40206200810 | #! /usr/bin/python3
#Single Exponential Smoothing
'''
Similar to weighted average, only with the diference that we consider all of data points, while assigning exponentially smaller
weights as we go back in time, eventually approaching the big old zero, the weights are dictated by math and decay uniformly.
The smaller the starting weight, the faster it approaches to zero.
'''
#Formula: y^x=α⋅yx+(1−α)⋅y^x−1
# α is considered the smoothing factor or smoothing coefficient
#Perhaps α would be better referred to as memory decay rate: the higher the α, the faster the method “forgets”.
import matplotlib.pyplot as plt
def exponential_smoothing(series, alpha):
result = []
for i in range(len(series)):
result.append(alpha * series[i] + (1 - alpha) * series[i-1])
return result
def main():
series = [3,10,12,13,12,10,12]
alpha = 0.9 #This value has to be < 1
ses_series = exponential_smoothing(series, alpha)
print("The single exponential smoothing of the following series: " + str(series) + " is: " + str(ses_series))
plt.plot(series, marker = 'o', color = 'r')
plt.plot(ses_series, marker = 'o', color = 'b')
plt.ylabel('Values')
plt.xlabel('Data points')
plt.show()
main()
#Why is it called “smoothing”?
#To the best of my understanding this simply refers to the effect these methods have on a graph
#if you were to plot the values: jagged lines become smoother.
#Moving average also has the same effect, so it deserves the right to be called smoothing just as well. | PitCoder/NetworkMonitor | Service_Monitoring/Holt-Winters/single_exponential_smoothing.py | single_exponential_smoothing.py | py | 1,515 | python | en | code | 2 | github-code | 13 |
32315220205 | import time
import pyvisa
import logging
import numpy as np
class Array3664A:
def __init__(self, time_offset, resource_name):
self.time_offset = time_offset
self.rm = pyvisa.ResourceManager()
if resource_name != 'client':
try:
self.instr = self.rm.open_resource(resource_name)
except pyvisa.errors.VisaIOError:
self.verification_string = "False"
self.instr = False
return
self.instr.parity = pyvisa.constants.Parity.none
self.instr.data_bits = 8
self.instr.baud_rate = 9600
self.instr.term_char = '\r\n'
# make the verification string
self.verification_string = self.QueryIdentification()
# HDF attributes generated when constructor is run
self.new_attributes = []
# shape and type of the array of returned data
self.dtype = ('f', 'float', 'float')
self.shape = (3, )
def __enter__(self):
return self
def __exit__(self, *exc):
if self.instr:
self.instr.close()
def ReadValue(self):
ret = [time.time()-self.time_offset, self.GetVoltage(), self.GetCurrent() ]
return ret
def GetWarnings(self):
return None
def QueryIdentification(self):
"""Identifies the instrument model and software level.
Returns:
<manufacturer>, <model number>, <serial number>, <firmware date>
"""
try:
return self.instr.query("*IDN?")
except pyvisa.errors.VisaIOError:
return np.nan
#################################################################
########## IEEE-488/SERIAL COMMANDS ##########
#################################################################
def Output(self, state):
self.instr.write(f'OUTPUT:STATE {int(state)}')
def Apply(self, voltage, current):
self.instr.write(f'APPL {voltage},{current}')
def GetCurrent(self):
return float(self.instr.query('MEAS:SCAL:CURR?'))
def GetVoltage(self):
return float(self.instr.query('MEAS:SCAL:VOLT?'))
def SetVoltage(self, voltage):
self.instr.write(f'SOUR:VOLT:LEV:IMM:AMPL {voltage}')
if __name__ == '__main__':
resource_name = input('specify resource name : ')
psu = Array3664A(time.time(), resource_name)
psu.Apply(5,2)
psu.Output(1)
time.sleep(2)
print(psu.ReadValue())
psu.Output(0)
psu.__exit__()
| js216/CeNTREX | drivers/Array3664A.py | Array3664A.py | py | 2,537 | python | en | code | 1 | github-code | 13 |
5108829198 | #-*-coding:utf-8-*-
import asyncio
import danmaku
import redis
import pyautogui
list_name = 'bilibili'
key_list = ('w', 's', 'a', 'd', 'j', 'k', 'u', 'i', 'z', 'x', 'c',
'v', 'b', 'n', 'm', 'f', 'o','p','g','h', 'l',
'q', 'e', 'r', 'y', '+', '-')
direction = ('w', 's', 'a', 'd')
def init_redis():
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
return r
def direction_number(list_str):
temp = []
if len(list_str) >= 2 and '1' <= list_str[1] \
and list_str[1] <= '9' and \
list_str[0] in key_list:
# temp.append(list_str[0])
for i in range(3 * int(list_str[1])):
temp.append(list_str[0])
return temp
return list_str
async def printer(q, redis):
while True:
m = await q.get()
if m['msg_type'] == 'danmaku':
print(m["content"])
m["content"] = m["content"].replace("前", 'wq')
m["content"] = m["content"].replace("后", 'sq')
m["content"] = m["content"].replace('左', 'aq')
m["content"] = m["content"].replace("右", 'dq')
print(f'{m["name"]}:{m["content"]}')
list_str = list(m["content"])
if len(list_str) == 3 and list_str[0] == 'l' and\
list_str[1] in direction and \
'1' <= list_str[2] and list_str[2] <= '9':
new_str = list_str[0] + list_str[1] + list_str[2]
redis.rpush(list_name, new_str)
elif len(list_str) == 2 and \
list_str[0] in direction and \
'1' <= list_str[1] and list_str[1] <= '9':
new_str = 's' + list_str[0] + list_str[1]
redis.rpush(list_name, new_str)
elif m["content"] == 'save':
redis.rpush(list_name, 'save')
elif m["content"] == 'enter':
redis.rpush(list_name, 'enter')
elif m["content"][0] == 'L' and \
m["content"][1] == 'L':
if len(m["content"]) == 2:
new_str = m["content"] + '0'
redis.rpush(list_name, new_str)
elif len(m["content"]) == 3 and \
'0' <= m["content"][2] <= '9':
redis.rpush(list_name, m["content"])
elif m["content"][0] == 'R' and \
m["content"][1] == 'R':
if len(m["content"]) == 2:
new_str = m["content"] + '0'
redis.rpush(list_name, new_str)
elif len(m["content"]) == 3 and \
'0' <= m["content"][2] <= '9':
redis.rpush(list_name, m["content"])
else:
print("弹幕拆分:", list_str)
list_str = direction_number(list_str)
print(list_str)
for char in list_str:
if char.lower() in key_list:
print('推送队列:', char.lower())
redis.rpush(list_name, char.lower())
async def main(url):
redis = init_redis()
q = asyncio.Queue()
dmc = danmaku.DanmakuClient(url, q)
asyncio.create_task(printer(q, redis))
await dmc.start()
a = 'http://live.bilibili.com/24616287'
asyncio.run(main(a))
| ShaoChenHeng/danmu_pokemon | danmu/main_sword.py | main_sword.py | py | 3,471 | python | en | code | 4 | github-code | 13 |
24375497002 | import numpy as np
import pandas as pd
dates = pd.date_range('20130101',periods=6)
df = pd.DataFrame(np.arange(24).reshape((6,4)),index=dates,columns=['A','B','C','D'])
df.iloc[2,2] = 111
df.loc['20130101','B'] = 222
# df[df.A>4] = 0 #整个A>4的整列都更改
# df.A[df.A>4] = 0 #仅对A列A>4的数字都更改为0
df.B[df.A>4] = 0 #仅对B列A>4的数字都更改为0
df['F'] = np.nan #增加F列,值全部为nan
df['E'] = pd.Series([1,2,3,4,5,6],index=dates) #新插入的列,如果要融合到原有表格中,需要有相同的index
print (df) | BaymaxBai01/Machine_Learning | numpy & pandas & matplotlib/hm_20_pd_3.py | hm_20_pd_3.py | py | 559 | python | en | code | 0 | github-code | 13 |
15153951132 | #!/usr/bin/env python
"""
translate.py [-] <filename>
Translates a DNA sequence to a protein sequence
"""
import sys
from optparse import OptionParser
from mungo.fasta import FastaFile, pretty
from mungo import sequence
usage = "%prog [options] <fasta file>"
parser = OptionParser(usage=usage)
parser.add_option("-o", "--output", dest="oFilename",
help="Output filename", default=None)
parser.add_option("-w", "--width", dest="width", type="int",
help="Sequence width", default=60)
options, args = parser.parse_args(sys.argv)
if len(args)!=2: sys.exit(__doc__)
if args[1]!='-':
faFile = FastaFile(args[1])
else:
faFile = FastaFile(sys.stdin)
if options.oFilename:
oFile = open(options.oFilename, 'w')
else:
oFile = sys.stdout
for header,seq in faFile:
protein = sequence.translate(seq)
print >> oFile, '>%s' % header
print >> oFile, pretty(protein, width=options.width)
| PapenfussLab/Mungo | bin/translate.py | translate.py | py | 914 | python | en | code | 1 | github-code | 13 |
73507449297 | from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import cv2
import numpy as np
def tsne(data, labels):
tsne = TSNE(n_components=2)
data_tsne = tsne.fit_transform(data)
unique_labels = np.unique(labels)
plt.figure(figsize=(8, 6))
for label in unique_labels:
indices = np.where(labels == label)[0]
plt.scatter(data_tsne[indices, 0], data_tsne[indices, 1], label=f'Cluster {label}', alpha=0.6)
plt.title('Agglomerative Clustering with t-SNE Visualization')
#plt.legend()
plt.show()
def show_cluster_examples(video_path, cluster_labels, cluster, num_examples):
cap = cv2.VideoCapture(video_path)
frames_to_show = np.random.choice(cluster_labels[cluster][:100], num_examples, replace=False)
print(frames_to_show)
fig, axs = plt.subplots(1, num_examples, figsize=(20, 15))
#fig.suptitle(f"Cluster {cluster}")
for i in range(1, num_examples+1):
cap.set(cv2.CAP_PROP_POS_FRAMES, frames_to_show[i-1])
res, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # BGR to RGB
axs[i-1].imshow(frame)
axs[i-1].axis('off')
#plt.show()
| tiagojosemiranda/Giro | visualizations.py | visualizations.py | py | 1,183 | python | en | code | 0 | github-code | 13 |
26068868148 | def get_tree():
nums = int(input())
node = dict()
for i in range(nums):
node[str(i)] = input().split()
return nums, node
def get_root(nums, tree):
tmp = []
for i in tree.values():
tmp += i[1:]
for i in range(nums):
if not str(i) in tmp:
return tree[str(i)]
nums1, tree1 = get_tree()
nums2, tree2 = get_tree()
#print(tree1)
#print(tree2)
if not tree1 or not tree2:
if not tree1 and not tree2:
print('Yes')
exit(0)
else:
print('No')
exit(0)
root1, root2 = get_root(nums1, tree1), get_root(nums2, tree2)
#print(root1, root2)
from collections import Counter
def judge(n1, n2, tree1, tree2):
if n1[0] != n2[0]:
#print(n1[0], n2[0])
print('No')
exit(0)
if Counter(n1)['-'] != Counter(n2)['-']:
#print(Counter(n1)['-'], Counter(n2)['-'])
print('No')
exit(0)
if not n1[0]:
return 0
else:
l1, r1 = deeper(n1, tree1)
if not n2[0]:
return 0
else:
l2, r2 = deeper(n2, tree2)
if l1[0] == l2[0] and r1[0] == r2[0]:
judge(l1, l2, tree1, tree2)
judge(r1, r2, tree1, tree2)
elif l1[0] == r2[0] and r1[0] == l2[0]:
judge(l1, r2, tree1, tree2)
judge(r1, l2, tree1, tree2)
else:
print('No')
exit(0)
def deeper(node, tree):
if not node[1] == '-':
left = tree[node[1]]
else:
left = [None]
if not node[2] == '-':
right = tree[node[2]]
else:
right = [None]
return left,right
judge(root1, root2, tree1, tree2)
print('Yes')
| piglaker/PTA_ZJU_mooc | src06.py | src06.py | py | 1,630 | python | en | code | 0 | github-code | 13 |
72308568019 | import numpy as np
import cv2
class Stitcher:
# 拼接函数
def stitch(self, images, ratio=0.75, reprojThresh=4.0, showMatches=False):
# ratio是k对匹配算法里面的比例,一般设置0.75;reprojThresh是用来计算单应性矩阵的特征点个数
# 获取输入图片
(imageB, imageA) = images
# 检测A、B图片的SIFT关键特征点,并计算特征描述子
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)
# 匹配两张图片的所有特征点,返回匹配结果
M = self.matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh) # 这个函数是自行定义的,在下面
# M里面包含了3个参数,其中H是单应性矩阵
# 如果返回结果为空,没有匹配成功的特征点,退出算法
if M is None:
return None
# 否则,提取匹配结果
# H是3x3视角变换矩阵
(matches, H, status) = M # 这里的M是上面匹配的结果
# 将图片A进行视角变换,result是变换后图片
result = cv2.warpPerspective(imageA, H, (imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
self.cv_show('result', result)
# 将图片B传入result图片最左端,进行拼接
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
self.cv_show('result', result)
# 是否需要显示图片匹配,对结果没有影响,只是可以看看中间过程的结果,略过即可
if showMatches:
# 生成匹配图片
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches, status)
# 返回结果
return (result, vis)
# 返回匹配结果
return result
def cv_show(self, name, img): # 基础显示图像操作,略过即可
cv2.imshow(name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def detectAndDescribe(self, image): # 这个函数的作用就是求特征点
# 将彩色图片转换成灰度图
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 建立SIFT生成器
descriptor = cv2.xfeatures2d.SIFT_create()
# 检测SIFT特征点,并计算描述子
(kps, features) = descriptor.detectAndCompute(image, None)
# 将结果转换成NumPy数组
kps = np.float32([kp.pt for kp in kps])
# 返回特征点集,及对应的描述特征
return (kps, features)
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh): # 这个函数的作用就是匹配上特征点,并求得单应性矩阵
# 建立暴力匹配器
matcher = cv2.BFMatcher()
# 使用KNN检测来自A、B图的SIFT特征匹配对,K=2
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []
for m in rawMatches:
# 当最近距离跟次近距离的比值小于ratio值时,保留此匹配对
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
# 存储两个点在featuresA, featuresB中的索引值
matches.append((m[0].trainIdx, m[0].queryIdx))
# 当筛选后的匹配对大于4时,计算视角变换矩阵
if len(matches) > 4:
# 获取匹配对的点坐标
ptsA = np.float32([kpsA[i] for (_, i) in matches]) # 这里matches里面储存的两张图片对应匹配的特征点
ptsB = np.float32([kpsB[i] for (i, _) in matches])
# 计算视角变换矩阵
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, reprojThresh) # 这个函数直接实现了求单应性矩阵和随机抽样一致算法
# 返回结果
return (matches, H, status)
# 如果匹配对小于4时,返回None
return None
def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status): # 这个函数只是为了对特征匹配的情况进行可视化,略过即可
# 初始化可视化图片,将A、B图左右连接到一起,生硬的拼接
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
# 联合遍历,画出匹配对
for ((trainIdx, queryIdx), s) in zip(matches, status):
# 当点对匹配成功时,画到可视化图上
if s == 1:
# 画出匹配对
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
# 返回可视化结果
return vis
#获取图片
imageB = cv2.imread("imagel/pinjie/5.jpg")
imageA = cv2.imread("imagel/pinjie/6.jpg")
#把图像拼接成全景图
stitcher = Stitcher()
(result,vis) = stitcher.stitch([imageA,imageB],showMatches=True)
#显示所有图片
cv2.imshow("ImageA",imageA)
cv2.imshow("ImageB",imageB)
cv2.imshow("Keypiont Matches",vis)
cv2.imshow("Result",result)
cv2.waitKey(0)
cv2.destroyAllWindows()
| huangxinyu1/opencv- | opencv学习/测试.py | 测试.py | py | 5,456 | python | zh | code | 0 | github-code | 13 |
37482016184 | N = int(input())
danzi = [] # 지도의 크기
total_danzi = 0 # 총 단지수
house_cnt = 0 # 단지내 집의 수
house_list = [] # 단지내 집의 수 리스트
for _ in range(N): # 단지 입력받기
val = list(map(int,input()))
danzi.append(val)
def dfs(x,y):
global house_cnt
if x <= -1 or x >= N or y <= -1 or y >= N:
return False
if danzi[x][y] == 1:
danzi[x][y] = 0
house_cnt += 1
dfs(x+1,y)
dfs(x-1,y)
dfs(x,y-1)
dfs(x,y+1)
return True
return False
for i in range(N):
for j in range(N):
if danzi[i][j] == 1:
house_cnt = 0
if dfs(i,j) == True:
total_danzi += 1
house_list.append(house_cnt)
print(total_danzi)
house_list = sorted(house_list) # 오름차순 정렬
for i in house_list:
print(i)
| Choi-Seong-Hyeok/Algorithm | DFS/단지번호붙이기(rt).py | 단지번호붙이기(rt).py | py | 891 | python | ko | code | 0 | github-code | 13 |
20418498932 | from math import *
print("(0 -single filers,1-married filing jointly,\n 2-married filing separately,3 -head ofhousehold)")
status=int(input("Enter the filing status: "))
tax=0
if (status == 0):
taxableIncome = int(input("Enter the taxable income: "))
a=8350
b=33950
c=82250
d=171550
e=372950
elif (status == 1):
taxableIncome = float(input("Enter the taxable income: "))
a=16700
b=67900
c=137050
d=208850
e=327950
print("Tax is " + str(tax))
elif (status == 2):
taxableIncome = float(input("Enter the taxable income: "))
a=8350
b=33950
c=68525
d=104425
e=186475
elif (status == 3):
taxableIncome = float(input("Enter the taxable income: "))
a=11950
b=45500
c=117450
d=190200
e=372950
else:
print("Wrong status!")
if(status >=0 and status<=3):
while taxableIncome > a:
if taxableIncome > e:
tax=tax+((taxableIncome-e)*0.35)
taxableIncome=e
elif taxableIncome > d:
tax=tax+((taxableIncome-d)*0.33)
taxableIncome=d
elif taxableIncome >c:
tax=tax+((taxableIncome-c)*0.28)
taxableIncome=c
elif taxableIncome > b:
tax=tax+((taxableIncome-b)*0.25)
taxableIncome=b
else:
tax=tax+((taxableIncome-a)*0.15)
taxableIncome=a
tax=tax+(taxableIncome)*0.1
print("Tax is " + str("%.2f"%tax))
| Przemek-Gosik/Zadania_Python | zadania python/Zad1.py | Zad1.py | py | 1,517 | python | en | code | 0 | github-code | 13 |
73539334738 | import numpy as np
from ml.stats import Stats
from ml.data import split_xy
def classify(theta, x):
return 1 if np.dot(theta, x) >= 0 else -1
def train(data, iterations=1000):
x, y = split_xy(data)
n, d = x.shape
theta = np.zeros(d)
for it in range(iterations):
for i in range(n):
if classify(theta, x[i]) != y[i]:
theta += y[i] * x[i]
return theta
def test(data, theta):
x, y = split_xy(data)
stats = Stats()
for i in range(len(y)):
yc = classify(theta, x[i])
if yc == 1 and y[i] == 1:
stats.tp += 1
elif yc == 1 and y[i] == -1:
stats.fp += 1
elif yc == -1 and y[i] == 1:
stats.fn += 1
else:
stats.tn += 1
return stats | anton-bannykh/ml-2013 | david.meynster/ml/perceptron.py | perceptron.py | py | 786 | python | en | code | 4 | github-code | 13 |
33076705502 | from turtle import Screen
from paddle import Paddle
from ball import Ball
from scoreboard import ScoreBoard
import time
screen = Screen()
screen.setup(height=int(600), width=int(800))
screen.bgcolor("black")
screen.title("Pong")
screen.tracer(0)
r_paddle = Paddle((350, 0))
l_paddle = Paddle((-350, 0))
ball = Ball()
scoreboard = ScoreBoard((0, -280))
screen.listen()
screen.onkeypress(r_paddle.up, "Up") # Up arrow
screen.onkeypress(r_paddle.down, "Down") # Down arrow
screen.onkeypress(l_paddle.up, "w") # Up arrow
screen.onkeypress(l_paddle.down, "s") # Down arrow
game_is_on = True
while game_is_on:
screen.update()
time.sleep(ball.move_speed)
ball.move()
#Detect collision with top and bottom wall
if ball.ycor() > 280 or ball.ycor() < -280:
ball.bounce()
#Detect collision with paddle
if ball.distance(r_paddle) < 50 and ball.xcor() > 330 or ball.distance(l_paddle) < 50 and ball.xcor() < -330:
ball.hit()
#Detect if ball goes out of the edge
if ball.xcor() > 380 or ball.xcor() < -380:
if ball.xcor() > 0:
scoreboard.increase_l()
else:
scoreboard.increase_r()
ball.reset_pos(scoreboard.turn)
screen.exitonclick() | MClaireaux/Pong | main.py | main.py | py | 1,270 | python | en | code | 0 | github-code | 13 |
8574143885 | from ImageData import train_test_split,load_image
import matplotlib.pyplot as plt
images, labels, image_names, category =load_image(100)
data = train_test_split(image_size=128, test_size=0.3)
print("images in test set",len(data.train.images))
print('displaying a loaded image ')
plt.imshow(images[123])
plt.show()
| shibinmak/CNN-TF-FLOYDHUB | floydhub execution/checks.py | checks.py | py | 320 | python | en | code | 0 | github-code | 13 |
21993106572 | import pymongo
def get_collection_bicycles():
import os
# Declaramos una variable con el tiempo de espera máximo para la respuesta del servidor
mongo_timeout = 5000
# Variable de entorno que contiene un string con la URI de conexión al cluster
mongo_uri = os.environ['MONGO_URI']
# Esta variable contiene un string con la base de datos que vamos a utilizar
mongo_db = "proyecto_bicicletas"
# Esta variable contiene la colección que vamos a utilizar
mongo_collection = "bicicletas"
try:
# Intentamos conectarnos al cluster y meter la colección en una variable, si funciona, devolvemos la variable
client = pymongo.MongoClient(mongo_uri, serverSelectionTimeoutMS=mongo_timeout)
database = client[mongo_db]
bicycles_collection = database[mongo_collection]
return bicycles_collection
except pymongo.errors.ServerSelectionTImeoutError:
print('Tiempo de espera agotado')
except pymongo.errors.ConnectionFailure:
print('Fallo al conectarse')
except pymongo.errors.InvalidURI:
print('Hay un error en la URI')
| isaacvt01/isaacvt01.github.io | src/db/connection/get_collection_bicycles.py | get_collection_bicycles.py | py | 1,125 | python | es | code | 1 | github-code | 13 |
17834694846 | # -*- coding: utf-8 -*-
"""
@author: zhaox
"""
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib import pyplot as plt
import matplotlib.animation as animation
import FE_model
import numpy as np
import FE_analysis
#Build the FE model
mesh = FE_model.mesh()
properties = FE_model.properties(mesh)
BC = FE_model.boundary_condition(mesh)
FE = FE_model.FE_model(mesh, properties, BC)
analysis1 = FE_analysis.modal_analysis(FE)
H = analysis1.FRF_run(list_points=range(20))
# %%
H_abs = np.abs(H)
ims = []
fig, ax = plt.subplots()
ax.set_yticks(np.arange(0,H_abs.shape[0]))
ax.set_xticks(np.arange(0,H_abs.shape[0]))
ax.set_yticklabels(np.arange(1,H_abs.shape[0]+1))
ax.set_xticklabels(np.arange(1,H_abs.shape[0]+1))
for i in range(H_abs.shape[2]):
im = ax.imshow(H_abs[:,:,i], animated=True)
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=10, blit=True)
plt.show()
# ani.save('anima_FRF.mp4')
| e8543420/PS_ML | anima_FRF.py | anima_FRF.py | py | 928 | python | en | code | 0 | github-code | 13 |
73908821457 | #Import augmentation libraries
import albumentations as A
import cv2
import numpy as np
from pathlib import Path
#Import libraries for data visualization
import matplotlib.pyplot as plt
import json
#Define a function to read bounding boxes
def get_bbox(data):
#Create a list to store the bounding boxes
bboxes = []
#Loop through the data
for box in data:
#Get the bounding box
bbox = box['bbox']
bbox.append('damage')
#Append the bounding box to the list
bboxes.append(bbox)
return bboxes
#Define a function read json file
def read_json(json_file:str)->dict:
"""Read a json file.
Args:
json_file ([type]): path to the json file
Returns:
[dict]: returns a dictionary
"""
with open(json_file, 'r') as f:
data = json.loads(f.read())
return data
#Create a function to load an image
def load_image(img_list:list)->list:
"""Read an image from a file path (JPEG or PNG).
Args:
path ([type]): path to the image file
Returns:
[list]: returns a list of images
"""
#Read all the images in the list
images = []
for img_path in img_list:
img = cv2.imread(str(img_path))
#Check if the image is read correctly
if img is None:
print('Failed to read image: {}'.format(path))
continue
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#Add the image to the list
images.append(img)
return images
data = read_json('103_0ac05322-414f-4cfb-9daa-528c4bfad3c3.json')
#Print data annotation
# print(data['annotations'])
#Get the bounding boxes
bboxes = get_bbox(data['annotations'])
#Print the bounding boxes
#Declare an augmentation pipeline
transform = A.Compose([
#Add crop augmentation
A.RandomCrop(height=1024, width=1024, p=1.0),
#Add horizontal flip augmentation
A.HorizontalFlip(p=0.5),
#add rotation augmentation
A.Rotate(limit=5, p=0.5),
],bbox_params=A.BboxParams(format='coco',min_visibility=0.2))
#Define a function to apply the augmentation pipeline
def get_augmented(images:list,bboxes:list,transform:A.Compose)->tuple:
"""Apply the augmentation pipeline to the images and bounding boxes.
Args:
images ([type]): list of images
bboxes ([type]): list of bounding boxes
transform ([type]): augmentation pipeline
Returns:
[tuple]: returns a tuple of images and bounding boxes
"""
#Create a list to store the augmented images
augmented_images = []
#Create a list to store the augmented bounding boxes
augmented_bboxes = []
#Loop through the images
for img, bbox in zip(images, bboxes):
#Apply the augmentation pipeline
for idx in range(30):
augmented = transform(image=img, bboxes=bbox)
#Append the augmented image to the list
augmented_images.append(augmented['image'])
#Append the augmented bounding box to the list
augmented_bboxes.append(augmented['bboxes'])
return augmented_images, augmented_bboxes
img = load_image(['103_0ac05322-414f-4cfb-9daa-528c4bfad3c3.jpg'])[0]
#Draw the bounding boxes
# for bbox in bboxes:
# x, y, w, h,_ = bbox
# cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
#Apply the augmentation pipeline
augmented_images, augmented_bboxes = get_augmented([img], [bboxes], transform)
# # augmented_image = augmented_images[0]
# # augmented_bboxe = augmented_bboxes[0]
# #Loop through the augmented images
# for idx,(augmented_image, augmented_bbox) in enumerate(zip(augmented_images, augmented_bboxes)):
# #Draw the bounding boxes with id
# for bbox in augmented_bbox:
# x, y, w, h,_ = bbox
# #Convert the bounding box to integer
# x, y, w, h = int(x), int(y), int(w), int(h)
# #Define the color of the bounding box
# color = (255, 0, 0)
# cv2.rectangle(augmented_image, (x, y), (x+w, y+h),color, 2)
# cv2.putText(augmented_image, str(idx), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# #save the image using idx
# cv2.imwrite('augmented_image_{}.jpg'.format(idx), augmented_image)
#Create a visualization function for a batch of images
def plot_grid_bbox(images:list,bboxes:list, n_cols=2, n_rows=2,save_path=None):
fig, axes = plt.subplots(n_rows, n_cols, figsize=(n_cols*4, n_rows*4))
for i in range(n_rows):
for j in range(n_cols):
#Get the image
img = images[i*n_cols+j].astype(np.uint8)
#Draw the bounding boxes on the image
for bbox in bboxes[i*n_cols+j]:
x, y, w, h,_ = bbox
#Convert the bounding box to integer
x, y, w, h = int(x), int(y), int(w), int(h)
#Define the color of the bounding box
color = (255, 0, 0)
cv2.rectangle(img, (x, y), (x+w, y+h),color, 2)
cv2.putText(img, str(i*n_cols+j), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
axes[i,j].imshow(img)
axes[i,j].axis('off')
#Show the plot without hspace and wspace
plt.subplots_adjust(wspace=0, hspace=0)
plt.tight_layout()
# plt.show(block=False)
#Save the plot if a path is provided otherwise show the plot
if save_path is not None:
plt.savefig(save_path)
else:
plt.show()
#Plot the images and bounding boxes
plot_grid_bbox(augmented_images,augmented_bboxes, n_cols=4, n_rows=4,save_path='augmented_images.jpg')
| dataschoolai/augmentation_objectdetection | coco_augmentation.py | coco_augmentation.py | py | 5,566 | python | en | code | 0 | github-code | 13 |
74060072658 | import json
import os
from setuptools.command.install import install
class InstallEntry(install):
def run(self):
default_site = 'codeforces'
cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'ACedIt')
from acedit.main import supported_sites
for site in supported_sites:
# create cache directory structure
if not os.path.isdir(os.path.join(cache_dir, site)):
os.makedirs(os.path.join(cache_dir, site))
data = {'default_site': default_site.strip(
), 'default_contest': None, 'cachedir': cache_dir}
with open(os.path.join(cache_dir, 'constants.json'), 'w') as f:
f.write(json.dumps(data, indent=2))
install.run(self)
| coderick14/ACedIt | acedit/install_entry.py | install_entry.py | py | 755 | python | en | code | 77 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.