index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
14,200 | 6c4e42631a909243557e54a636b3759dbea29936 | def stock_availability(inventory, command, *args):
if command == 'delivery':
return inventory + [*args]
if not args:
return inventory[1:]
if isinstance(args[0], int):
count = int(args[0])
return inventory[count:]
sold_items = set(args)
return [item for item in inventory if item not in sold_items]
print(stock_availability(["choco", "vanilla", "banana"], "delivery", "caramel", "berry"))
print(stock_availability(["chocolate", "vanilla", "banana"], "delivery", "cookie", "banana"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell", 3))
print(stock_availability(["chocolate", "chocolate", "banana"], "sell", "chocolate"))
print(stock_availability(["cookie", "chocolate", "banana"], "sell", "chocolate"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell", "cookie")) |
14,201 | 7a3a0ca584c3890e6c2aa7947849bada7bdd60f1 | import subprocess
def run(command):
out = subprocess.check_output(command, shell=True)
if out:
return out.decode('utf-8').strip()
def git_hash(short=False):
if short:
return run('git rev-parse --short HEAD')
return run('git rev-parse HEAD')
def git_origin():
return run('git remote get-url origin')
def git_current_branch():
return run('git rev-parse --abbrev-ref HEAD')
def git_repo_name():
return run('basename `git rev-parse --show-toplevel`')
def github_repo():
origin = git_origin()
if 'git@github.com:' in origin:
origin = origin.replace('git@github.com:', '').replace('.git', '')
return origin.split('/')[-2:]
return None, None
def github_repo_name():
return github_repo()[1]
def github_repo_account():
return github_repo()[0]
def git_commit(files='.', message='automated commit', branch=None):
if branch:
try:
run(f'git checkout {branch}')
except subprocess.SubprocessError:
run(f'git checkout -b {branch}')
if isinstance(files, str):
run(f'git add {files}')
else:
run(['git', 'add', *files])
run(f'git commit -m {message}')
def git_diff():
return run('git diff')
def shutdown_computer():
return run('sudo shutdown -h now')
def nvidia_smi():
return run('nvidia-smi')
def pip_freeze():
return run('pip freeze')
def conda_list(explicit=False):
return run(f'conda list {"--explicit" if explicit else ""}')
|
14,202 | b3ff4449f72325c32155a5b402404aa7947f2e6f | import math
import random
from random import randint
import time
def partition(arr, low, high):
i = (low - 1)
pivot = arr[high]
for j in range(low, high):
if arr[j] <= pivot:
i = i + 1
arr[i], arr[j] = arr[j], arr[i]
arr[i + 1], arr[high] = arr[high], arr[i + 1]
return i + 1
def quickSort(arr, low, high):
if len(arr) == 0:
pass
elif low < high:
pi = partition(arr, low, high)
quickSort(arr, low, pi - 1)
quickSort(arr, pi + 1, high)
def heapify(arr, n, i):
largest = i
l = 2 * i + 1
r = 2 * i + 2
if l < n and arr[i] < arr[l]:
largest = l
if r < n and arr[largest] < arr[r]:
largest = r
if largest != i:
arr[i], arr[largest] = arr[largest], arr[i]
heapify(arr, n, largest)
def heapSort(arr):
for i in range(len(arr), -1, -1):
heapify(arr, len(arr), i)
for i in range(len(arr) - 1, 0, -1):
arr[i], arr[0] = arr[0], arr[i]
heapify(arr, i, 0)
def introsort(arr, maxdepth, low, high):
if maxdepth == 0:
heapSort(arr)
if low < high:
pi = partition(arr, low, high)
introsort(arr,maxdepth - 1, low, pi - 1)
introsort(arr,maxdepth - 1, pi + 1, high)
if __name__ == '__main__':
arr = []
arr2 = []
listLength = 1000
for i in range(listLength):
arr.append(randint(0, 500))
arr2.append(randint(0, 500))
timeoutstart = time.time()
quickSort(arr, 0, listLength-1)
timeoutend = time.time()
end = timeoutend - timeoutstart
print('quicksort: ' + str(end))
timeoutstart = time.time()
#heapSort(arr)
timeoutend = time.time()
end = timeoutend - timeoutstart
#print('heapsort: ' + str(end))
maxdepth = int(math.floor(math.log(len(arr2)))) * 2
timeoutstart = time.time()
introsort(arr2, maxdepth, 0, 99)
timeoutend = time.time()
end = timeoutend - timeoutstart
print('introsort: ' + str(end)) |
14,203 | fecbc98e550b93a73e43872079aa30783e6a02c7 | import unittest
import requests
class TestBooks(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.book_id = None
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.base_url = 'http://pulse-rest-testing.herokuapp.com'
self.book_data = {"author": "Charles Dickens",
"title": "Oliver Twist",}
def tearDown(self):
pass
def test_1_book_created(self):
book_data = {"author": "Charles Dickens",
"title": "Oliver Twist"}
book = requests.post(self.base_url + "/books", data=self.book_data)
book_as_obj = book.json()
book_id = book_as_obj['id']
self.assertEqual(book.status_code, 201)
self.assertIn(book_as_obj, requests.get(self.base_url + "/books").json())
TestBooks.book_id = book_id
def test_2_book_read(self):
book = requests.get(self.base_url + "/books" + f"/{TestBooks.book_id}")
self.assertEqual(book.status_code, 200)
self.book_data['id'] = TestBooks.book_id
self.assertDictEqual(self.book_data, book.json())
def test_3_book_update(self):
book_data = {"author": "Jane Austen",
"title": "Pride and Prejudice"}
book = requests.put(self.base_url + "/books" + f"/{TestBooks.book_id}", data=book_data)
book_as_obj = book.json()
self.assertEqual(book.status_code, 200)
self.assertIn(book_as_obj, requests.get(self.base_url + "/books").json())
def test_4_book_delete(self):
book = requests.delete(self.base_url + "/books" + f"/{TestBooks.book_id}")
self.assertEqual(book.status_code, 204)
self.assertEqual(requests.get(self.base_url + "/book" + f"/{TestBooks.book_id}").status_code, 404)
# def test_delete(self):
# books = requests.get(self.base_url + "/books")
# for i in books.json():
# requests.delete(self.base_url + f"/books/{i['id']}")
if __name__ == "__main__":
unittest.main(verbosity=2)
|
14,204 | cad118626090366b0551de25f91bf7ceb7a23dcd | """Setup for doc-helper
See: https://github.com/ynshen/DocHelper
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='doc-helper',
version='1.1.1',
description='Compose docstrings with repeated arguments',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ynshen/DocHelper/',
author='Yuning Shen',
author_email='ynshen23@gmail.com',
keywords='documentation docstring utility',
package_dir={'': 'src'},
packages=find_packages(where='src'),
python_requires='>=3.5',
install_requires=['pandas'],
classifiers=[
'License :: OSI Approved :: BSD License'
],
project_urls={
'Bug Reports': 'https://github.com/ynshen/DocHelper/issues/',
'Source': 'https://github.com/ynshen/DocHelper/',
},
)
|
14,205 | a58448761aa49b54805445c1628e881cf76dd383 | def persistence(n):
n = str(n)
counter = 0
while len(n) > 1:
sum = 1
for num in n:
sum *= int(num)
n = str(sum)
counter += 1
return counter
print(persistence(int(input("Num:")))) |
14,206 | 6143dc00e5b74ca5ff84a2ee3041d72e7457ae24 | from pyswip.prolog import Prolog
class Doctor(object):
"""A Python adapter to the prolog medical system."""
def __init__(self, prolog_file):
self.prolog_file = prolog_file
def __repr__(self):
return 'Doctor("%s")' % self.prolog_file
def diagnose(self, symptoms, age_group=None):
"""Returns a list of possible diagnosis, if any, given a list of symptoms.
For example, if german measles has the symptoms 'runny nose', 'fever',
'headache' and 'rash', the system will return a list containing a dict with
the diagnosis.
symptoms = ['runnynose', 'fever', 'headache', 'rash']
doctor = Doctor('medical')
results = doctor.diagnose(symptoms)
results
>>> [{'Diagnosis': 'germanmeasles'}]
The diagnosis can be accessed with the key 'Diagnosis'.
NOTE: The capital D is important.
diagnosis = results[0]
diagnosis['Diagnosis']
>>> 'germanmeasles'
"""
prolog = Prolog()
if age_group is not None:
prolog.assertz('age_group(patient,%s)' % age_group)
for symptom in symptoms:
prolog.assertz('symptom(patient,%s)' % symptom)
prolog.consult(self.prolog_file)
return list(prolog.query('hypothesis(patient,Diagnosis)'))
def diagnose_one(self, symptoms, age_group=None):
"""Convenience wrapper around diagnose that returns only one result.
If no results are found, this returns None."""
try:
return self.diagnose(symptoms, age_group=age_group)[0]
except IndexError:
return None
if __name__ == '__main__':
"""If we're running this directly, run the 'unit tests'.
"""
adult_symptoms = ['headache', 'sleepiness', 'poorcoordination',
'impairedvision', 'nobladdercontrol', 'memoryloss',
'difficultywalking']
d = Doctor('medical.pl')
diagnosis = d.diagnose_one(adult_symptoms, age_group='adult')
assert diagnosis['Diagnosis'] == 'adulthydrocephalus'
|
14,207 | 3fe8dd11ed249a6d9885c5b704b6025bcf81788d | lst = [[0,1,2,3],[3,4,5,5],[6,7,8,8],[9,0,1,9]]
print(f"minimum value element in the array:{min(map(lambda x: min(x),lst))}")
print(f"maximum value element in the array:{max(map(lambda x: max(x),lst))}")
col_min = list(min(map(lambda x: x[i],lst)) for i in range(4))
print(f"elements with minimum values column-wise: {col_min}")
col_max = list(max(map(lambda x: x[i],lst)) for i in range(4))
print(f"elements with maximum values column-wise: {col_max}")
row_min = list(map(lambda x:min(x),lst))
row_max = list(map(lambda x:max(x),lst))
print(f"elements with minimum values row-wise: {row_min}")
print(f"elements with maximum values row-wise: {row_max}") |
14,208 | 8db94bb9345665334fe029750cff590abe520ee8 | #coding: UTF-8
# Author - 袁骏涛和家人共同完成
import time # 让程序停止
import os, subprocess
from colorama import init,Fore,Back,Style # 导入颜色模块
init(autoreset=True) # 初始化Colorama
class Light:
# 构造函数
def __init__(self):
# 定义light列表
self.light = []
# 自动初始化
self.prepare_light()
def prepare_light(self):
# 准备50行40列2000个灯
for row in range(50):
temp = [] # 每行40个
for col in range(40):
# 每一列的灯默认不亮
temp.append(False)
# 把每行的40个插入到light集合中
self.light.append(temp)
class TrafficLight:
# 构造函数
def __init__(self,green_time,yellow_time,red_time):
self.green_time = green_time # 绿灯的时间
self.yellow_time = yellow_time # 黄灯的时间
self.red_time = red_time # 红灯的时间
self.number01 = Light() # 显示第一个数字的电子屏
self.number02 = Light() # 显示第二个数字的电子屏
def bulid_LED_number(self,char:str):
"""
根据提供的数字来构建电子屏上的数字显示
数字显示设计为 - 横五竖六;并且每块电子屏幕的上、下、左各边空格行列数为5, 12, 5
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:param char:提供的数字
:return:返回构建的电子屏
"""
temp_LED = Light() # 新建一个电子屏屏幕,并定义显示的数字矩阵
if char == "0":
for row in range(50):
for col in range(40):
if 4 < row < 10 and col > 11 : # 第0行到第4行共5行中的元素都点亮
temp_LED.light[row][col] = True
if 45 > row > 39 and col > 11: # 第45行到第49行共5行中的元素点亮
temp_LED.light[row][col] = True
if 11 < col < 18 and 45 > row > 4: # 第0列到第5列共6列中的元素点亮
temp_LED.light[row][col] = True
if col > 33 and 45 > row > 4: # 第34列到第39列共6列中的元素点亮
temp_LED.light[row][col] = True
# 将上、下、左边各向内缩进两行
elif char == "1":
for row in range(50):
for col in range(40):
if 4 < row < 45 and col > 33: # 第34列到第39列共6列中的元素点亮
temp_LED.light[row][col] = True
# 将上、下边各向内缩进两行
elif char == "2":
for row in range(50):
for col in range(40):
if 4 < row < 10 and col > 11: # 第0行到第4行共5行中的元素都点亮
temp_LED.light[row][col] = True
if 4 < row <22 and col > 33:
temp_LED.light[row][col] = True
if 21 < row < 27 and col > 11: # row == 22 or row == 23 or row == 24 or row == 25 or row == 26:
temp_LED.light[row][col] = True
if 45 > row > 26 and 11 < col < 18:
temp_LED.light[row][col] = True
if 45 > row > 39 and col > 11:
temp_LED.light[row][col] = True
elif char == "3":
for row in range(50):
for col in range(40):
if (4 < row < 10 and col > 11) or (45 > row > 39 and col > 11):
temp_LED.light[row][col] = True
if 22 <= row <= 26 and col > 11: #row == 22 or row == 23 or row == 24 or row == 25 or row == 26: 注意此处写法等同于line69#写法
temp_LED.light[row][col] = True
if col > 33 and 4 < row < 45 :
temp_LED.light[row][col] = True
elif char == "4":
for row in range(50):
for col in range(40):
if (row == 22 or row == 23 or row == 24 or row == 25 or row == 26) and col > 11: #### 注意这个写法。。。
temp_LED.light[row][col] = True
if 11 < col < 18 and 4 < row < 22:
temp_LED.light[row][col] = True
if col > 33 and 45 > row > 4 :
temp_LED.light[row][col] = True
elif char == "5":
for row in range(50):
for col in range(40):
if (4 < row < 10 and col > 11) or (45 > row > 39 and col > 11):
temp_LED.light[row][col] = True
if (row == 22 or row == 23 or row == 24 or row == 25 or row == 26) and col > 11:
temp_LED.light[row][col] = True
if 11 < col < 18 and 4 < row < 22:
temp_LED.light[row][col] = True
if col > 33 and 45 > row > 26:
temp_LED.light[row][col] = True
elif char == "6":
for row in range(50):
for col in range(40):
if (4 < row < 10 and col > 11) or (45 > row > 39 and col > 11):
temp_LED.light[row][col] = True
if (row == 23 or row == 24 or row == 25 or row == 26 or row == 27) and col > 11:
temp_LED.light[row][col] = True
if 11 < col < 18 and 4 < row < 45 :
temp_LED.light[row][col] = True
if col > 33 and 45 > row > 27 :
temp_LED.light[row][col] = True
elif char == "7":
for row in range(50):
for col in range(40):
if 4 < row < 10 and col > 11 :
temp_LED.light[row][col] = True
if col > 33 and 4 < row < 45:
temp_LED.light[row][col] = True
elif char == "8":
for row in range(50):
for col in range(40):
if (4 < row < 10 and col > 11) or (45 > row > 39 and col > 11):
temp_LED.light[row][col] = True
if (row == 22 or row == 23 or row == 24 or row == 25 or row == 26) and col > 11:
temp_LED.light[row][col] = True
if (11 < col < 18 or col > 33) and 4 < row < 45: # row 重叠部分重复涂色,并没有增加色差!!!
temp_LED.light[row][col] = True
elif char == "9":
for row in range(50):
for col in range(40):
if (4 < row < 10 and col > 11) or (45 > row > 39 and col > 11):
temp_LED.light[row][col] = True
if 6 < row < 22 and 11 < col < 18:
temp_LED.light[row][col] = True
if (row == 22 or row == 23 or row == 24 or row == 25 or row == 26) and col > 11:
temp_LED.light[row][col] = True
if col > 33 and 4 < row < 45:
temp_LED.light[row][col] = True
# 返回这个LED
return temp_LED
def print_LED(self,color:str):
for row in range(50):
# 打印第一个数字
for col01 in range(40):
if self.number01.light[row][col01] == True:
if color == "green":
# print(Fore.GREEN + "○",end="")
print(Fore.GREEN + Back.GREEN + "袁",end="") # print函数默认换行,是end='\n'在起作用, 定义end=“”可以使打印的字符并列显示
elif color == "yellow":
# print(Fore.YELLOW + "○", end="")
print(Fore.YELLOW + Back.YELLOW + "骏", end="")
elif color == "red":
# print(Fore.RED + "○", end="")
print(Fore.RED + Back.RED + "涛", end="")
else:
print(Fore.BLACK + "空",end="")
print("\t",end="") # 两个数字之间的空格 \t 代表的是制表符,表示空四个字符,也称缩进,就是按一下Tab键
# 打印第二个数字
for col02 in range(40):
if self.number02.light[row][col02] == True:
if color == "green":
# print(Fore.GREEN + "○", end="")
print(Fore.GREEN + Back.GREEN + "袁" , end="")
elif color == "yellow":
# print(Fore.YELLOW + "○", end="")
print(Fore.YELLOW + Back.YELLOW + "骏", end="")
elif color == "red":
# print(Fore.RED + "○", end="")
print(Fore.RED + Back.RED + "涛", end="")
else:
print(Fore.BLACK + "空", end="")
# 换行
print()
def start_display(self,number:int,color:str):
"""
把传递过来的数字用指定的颜色打印
:param number: 指定的数字
:param color: 指定的颜色
:return: 无返回值
"""
# 把数字格式化
number_str = "%02d" % number # python格式化输出, 表示输出宽度为2的字符串,如果number的宽度不够,则左边补0
# 构建LED上显示的两个数字
# 第一块电子屏
self.number01 = self.bulid_LED_number(number_str[0])
# 第二块电子屏
self.number02 = self.bulid_LED_number(number_str[1])
# 在电子屏上展示
self.print_LED(color)
def start(self):
"""
开始红绿灯的倒计时
"""
while True:
# 默认一直循环下去
for number in range(self.green_time,-1,-1): # green_time-开始时间;-1 - 结束时间为 0 而不是-1 ,-1 - 每次按步长为1递减
os.system("cls") # 清屏(windows中换为cls)
print()
self.start_display(number,"green") # 调用函数开始用特定的颜色打印
# print(Fore.GREEN + "%02d" % number) # 这里的2表示占用两个宽度,如果不够宽度,前面补零
time.sleep(1)
# 黄灯
for number in range(self.yellow_time,-1,-1):
os.system("cls")
print()
self.start_display(number, "yellow")
# print(Fore.YELLOW +"%02d" % number)
time.sleep(1)
# 红灯
for number in range(self.red_time,-1,-1):
os.system("cls")
print()
self.start_display(number, "red")
# print(Fore.RED + "%02d" % number)
time.sleep(1)
def input_time(color:str):
while True:
time = ""
# 根据颜色提醒输入
if color.lower() == "green":
time = input(Fore.GREEN + " Input a pass time for the Green Light: ")
if color.lower() == "yellow":
time = input(Fore.YELLOW + " Input a warming time for the Yellow Light: ")
if color.lower() == "red":
time = input(Fore.RED + " Enter a waiting time for the Red Light: ")
# 校验输入的是否符合要求:数字、正数、1-99
# 如果不符合要求怎么处理:1. 出现异常,系统退出,报错;2.提醒重新输入
if not time.isdigit(): # 验证time是否为数字类型,python类型检测方法
print(u"验证输入的数字!因为是两块电子屏,输入的数字应该在1~99之间")
continue # 结束当前循环
else:
time_number = int(time)
if time_number < 1 or time_number > 99:
print(u"输入的值不符合要求!输入的数字应该在1~99之间")
continue
else:
# 符合要求
return time_number
if __name__ == '__main__':
# 输入红绿黄灯的时间
green_time = TrafficLight.input_time("green")
yellow_time = TrafficLight.input_time("yellow")
red_time = TrafficLight.input_time("red")
# 实例化
lello = TrafficLight(green_time, yellow_time, red_time)
# 开始倒计时
lello.start()
|
14,209 | 5aafb6d2b8b082edba0c509b16c7a12b91861a42 | """
A basic pygame template
"""
import pygame
import random
import time
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
GREEN = ( 0, 255, 0)
RED = ( 255, 0, 0)
pygame.init()
# Set the width and height of the screen [width, height]
size = (800, 600)
screen = pygame.display.set_mode(size)
background_image = pygame.image.load('space.jpg').convert()
player_image = pygame.image.load('player.png').convert()
player_image.set_colorkey(BLACK)
laser_image = pygame.image.load('laser.png').convert()
enemy_image = pygame.image.load('Virus.png').convert()
enemy_image.set_colorkey(WHITE)
enemy2_image = pygame.image.load('virus2.jpg').convert()
enemy2_image.set_colorkey(WHITE)
enemy3_image = pygame.image.load('virus3 (2).png').convert()
enemy3_image.set_colorkey(WHITE)
enemy4_image = pygame.image.load('virus4.png').convert()
enemy4_image.set_colorkey(WHITE)
background_position = [0, 0]
pygame.display.set_caption("Space Virus")
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
level = 1
#enemy 1-6 properties
enemy1_x = random.randint(10, 700)
enemy1_y = random.randint(20, 250)
enemy2_x = random.randint(10, 700)
enemy2_y = random.randint(20, 250)
enemy3_x = random.randint(10, 700)
enemy3_y = random.randint(20, 250)
enemy4_x = random.randint(10, 700)
enemy4_y = random.randint(20, 250)
enemy5_x = random.randint(10, 700)
enemy5_y = random.randint(20, 250)
enemy6_x = random.randint(10, 700)
enemy6_y = random.randint(20, 250)
enemy1_x_move = 4
enemy2_x_move = 4
enemy3_x_move = 4
enemy4_x_move = 4
enemy5_x_move = 4
enemy6_x_move = 4
enemy_y_move = 1
alive1 = True
alive2 = True
alive3 = True
alive4 = True
alive5 = True
alive6 = True
#player properties
player_x = 400
player_y = 400
player_y_move = 0
player_x_move = 0
player_alive = True
#laser properties
laser_x = 5
laser_y = 10
laser_velocity = 0
fire = False
#Make the enemies move the same way
def reset_speed(speed):
if speed < 0:
return speed * -1
else:
return speed
#If one of the ships corners hits an enemy player dies
def player_hit(enemy_x, enemy_y, player_x, player_y):
if player_x < enemy_x + 80 and player_x > enemy_x and player_y < enemy_y + 61 and player_y > enemy_y or player_x + 99 < enemy_x + 80 and player_x + 99 > enemy_x and player_y < enemy_y + 61 and player_y > enemy_y or player_x < enemy_x + 80 and player_x > enemy_x and player_y + 75 < enemy_y + 61 and player_y + 75 > enemy_y or player_x + 99 < enemy_x + 80 and player_x + 99 > enemy_x and player_y + 75 < enemy_y + 61 and player_y + 75 > enemy_y:
return False
return True
#Set count for enemies defeated
count = 0
#Blit level 1 and info of 1st virus
screen.blit(background_image, (0, 0))
font = pygame.font.SysFont('Calibri', 50, True, False)
text = font.render("Level 1", True, GREEN)
screen.blit(text, [325, 275])
pygame.display.update()
time.sleep(2)
screen.blit(background_image, (0, 0))
font = pygame.font.SysFont('Calibri', 25, True, False)
text = font.render("Adware is the least dangerous.", True, GREEN)
screen.blit(text, [235, 255])
font = pygame.font.SysFont('Calibri', 25, True, False)
text = font.render("It collects information from your browsing and sells it to ads.", True, GREEN)
screen.blit(text, [100, 275])
pygame.display.update()
time.sleep(4)
# -------- Main Program Loop -----------
while not done:
# --- Main event loop
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
screen.blit(background_image, (0, 0))
# --- Game logic should go here
#Make key inputs to move
if event.type == pygame.KEYDOWN and player_alive:
if event.key == pygame.K_LEFT:
player_x_move = -5
if event.key == pygame.K_RIGHT:
player_x_move = 5
if event.key == pygame.K_UP:
player_y_move = -5
if event.key == pygame.K_DOWN:
player_y_move = 5
if event.key == pygame.K_LSHIFT:
if not fire:
laser_x = player_x + 47
laser_y = player_y -15
fire = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT or event.key == pygame.K_UP or event.key == pygame.K_DOWN:
player_x_move = 0
player_y_move = 0
#Set player boundaries
player_x += player_x_move
if player_x <= 0:
player_x = 0
elif player_x >= 700:
player_x = 700
player_y += player_y_move
if player_y <= 0:
player_y = 0
elif player_y >= 525:
player_y = 525
#Make bullet move
if fire:
screen.blit(laser_image, (laser_x, laser_y))
laser_velocity = -10
laser_y -= 10
if laser_y < 0:
laser_velocity = 0
fire = False
#Blit level 2 + info and spawn enemies with increased speed
if count == 6 and player_alive and level == 1:
font = pygame.font.SysFont('Calibri', 50, True, False)
text = font.render("Level 2", True, GREEN)
screen.blit(text, [325, 275])
pygame.display.update()
time.sleep(2)
screen.blit(background_image, (0, 0))
font = pygame.font.SysFont('Calibri', 25, True, False)
text = font.render("Spyware is a type of malware installed without knowledge.", True, GREEN)
screen.blit(text, [95, 255])
font = pygame.font.SysFont('Calibri', 25, True, False)
text = font.render("It invades the device, steals sensitive information and internet usage data.", True, GREEN)
screen.blit(text, [20, 275])
player_x = 348
player_y = 400
enemy1_x = random.randint(10, 700)
enemy1_y = random.randint(20, 250)
enemy2_x = random.randint(10, 700)
enemy2_y = random.randint(20, 250)
enemy3_x = random.randint(10, 700)
enemy3_y = random.randint(20, 250)
enemy4_x = random.randint(10, 700)
enemy4_y = random.randint(20, 250)
enemy5_x = random.randint(10, 700)
enemy5_y = random.randint(20, 250)
enemy6_x = random.randint(10, 700)
enemy6_y = random.randint(20, 250)
enemy1_x_move = reset_speed(enemy1_x_move)
enemy2_x_move = reset_speed(enemy2_x_move)
enemy3_x_move = reset_speed(enemy3_x_move)
enemy4_x_move = reset_speed(enemy4_x_move)
enemy5_x_move = reset_speed(enemy5_x_move)
enemy6_x_move = reset_speed(enemy6_x_move)
enemy1_x_move += 1
enemy2_x_move += 1
enemy3_x_move += 1
enemy4_x_move += 1
enemy5_x_move += 1
enemy6_x_move += 1
enemy_y_move += 0.2
alive1 = True
alive2 = True
alive3 = True
alive4 = True
alive5 = True
alive6 = True
level = 2
pygame.display.update()
time.sleep(5)
#Blit level 3 + info and spawn enemies with increased speed
if count == 12 and player_alive and level == 2:
font = pygame.font.SysFont('Calibri', 50, True, False)
text = font.render("Level 3", True, GREEN)
screen.blit(text, [325, 275])
pygame.display.update()
time.sleep(2)
screen.blit(background_image, (0, 0))
font = pygame.font.SysFont('Calibri', 25, True, False)
text = font.render("Rootkit is designed to be hard to detect and remove.", True, GREEN)
screen.blit(text, [120, 255])
font = pygame.font.SysFont('Calibri', 25, True, False)
text = font.render("It accesses the owner’s information without the owner knowing.", True, GREEN)
screen.blit(text, [65, 275])
player_x = 348
player_y = 400
enemy1_x = random.randint(10, 700)
enemy1_y = random.randint(20, 250)
enemy2_x = random.randint(10, 700)
enemy2_y = random.randint(20, 250)
enemy3_x = random.randint(10, 700)
enemy3_y = random.randint(20, 250)
enemy4_x = random.randint(10, 700)
enemy4_y = random.randint(20, 250)
enemy5_x = random.randint(10, 700)
enemy5_y = random.randint(20, 250)
enemy6_x = random.randint(10, 700)
enemy6_y = random.randint(20, 250)
enemy1_x_move = reset_speed(enemy1_x_move)
enemy2_x_move = reset_speed(enemy2_x_move)
enemy3_x_move = reset_speed(enemy3_x_move)
enemy4_x_move = reset_speed(enemy4_x_move)
enemy5_x_move = reset_speed(enemy5_x_move)
enemy6_x_move = reset_speed(enemy6_x_move)
enemy1_x_move += 1
enemy2_x_move += 1
enemy3_x_move += 1
enemy4_x_move += 1
enemy5_x_move += 1
enemy6_x_move += 1
enemy_y_move += 0.2
alive1 = True
alive2 = True
alive3 = True
alive4 = True
alive5 = True
alive6 = True
level = 3
pygame.display.update()
time.sleep(5)
#Blit level 4 + info and spawn enemies with increased speed and moving up
if count == 18 and player_alive and level == 3:
font = pygame.font.SysFont('Calibri', 50, True, False)
text = font.render("Level 4", True, GREEN)
screen.blit(text, [325, 275])
pygame.display.update()
time.sleep(2)
screen.blit(background_image, (0, 0))
font = pygame.font.SysFont('Calibri', 30, True, False)
text = font.render("Trojan is the most dangerous Malware.", True, GREEN)
screen.blit(text, [175, 250])
font = pygame.font.SysFont('Calibri', 30, True, False)
text = font.render("It discovers your financial information,", True, GREEN)
screen.blit(text, [175, 275])
font = pygame.font.SysFont('Calibri', 30, True, False)
text = font.render("and takes over your computer’s system.", True, GREEN)
screen.blit(text, [175, 300])
player_x = 348
player_y = 400
enemy1_x = random.randint(10, 700)
enemy1_y = random.randint(20, 250)
enemy2_x = random.randint(10, 700)
enemy2_y = random.randint(20, 250)
enemy3_x = random.randint(10, 700)
enemy3_y = random.randint(20, 250)
enemy4_x = random.randint(10, 700)
enemy4_y = random.randint(20, 250)
enemy5_x = random.randint(10, 700)
enemy5_y = random.randint(20, 250)
enemy6_x = random.randint(10, 700)
enemy6_y = random.randint(20, 250)
enemy1_x_move = reset_speed(enemy1_x_move)
enemy2_x_move = reset_speed(enemy2_x_move)
enemy3_x_move = reset_speed(enemy3_x_move)
enemy4_x_move = reset_speed(enemy4_x_move)
enemy5_x_move = reset_speed(enemy5_x_move)
enemy6_x_move = reset_speed(enemy6_x_move)
enemy1_x_move += 1
enemy2_x_move += 1
enemy3_x_move += 1
enemy4_x_move += 1
enemy5_x_move += 1
enemy6_x_move += 1
enemy_y_move += -2.8
alive1 = True
alive2 = True
alive3 = True
alive4 = True
alive5 = True
alive6 = True
level = 4
pygame.display.update()
time.sleep(6)
#Blit victory screen
if count == 24 and player_alive and level == 4:
font = pygame.font.SysFont('Calibri', 50, True, False)
text = font.render("You killed the virus", True, GREEN)
screen.blit(text, [205, 275])
#Once player dies enemies die and defeat screen
if not player_alive:
alive1 = False
alive2 = False
alive3 = False
alive4 = False
alive5 = False
alive6 = False
font = pygame.font.SysFont('Calibri', 50, True, False)
text = font.render("You couldn't kill the virus", True, RED)
screen.blit(text, [150, 275])
#Call to def player_hit while player alive
if player_alive and alive1:
player_alive = player_hit(enemy1_x, enemy1_y, player_x, player_y)
if player_alive and alive2:
player_alive = player_hit(enemy2_x, enemy2_y, player_x, player_y)
if player_alive and alive3:
player_alive = player_hit(enemy3_x, enemy3_y, player_x, player_y)
if player_alive and alive4:
player_alive = player_hit(enemy4_x, enemy4_y, player_x, player_y)
if player_alive and alive5:
player_alive = player_hit(enemy5_x, enemy5_y, player_x, player_y)
if player_alive and alive6:
player_alive = player_hit(enemy6_x, enemy6_y, player_x, player_y)
#Make laser collision with enemies
if laser_x < enemy1_x + 80 and laser_x > enemy1_x and laser_y < enemy1_y + 61 and laser_y > enemy1_y and alive1 and fire:
alive1 = False
fire = False
count += 1
if laser_x < enemy2_x + 80 and laser_x > enemy2_x and laser_y < enemy2_y + 61 and laser_y > enemy2_y and alive2 and fire:
alive2 = False
fire = False
count += 1
if laser_x < enemy3_x + 80 and laser_x > enemy3_x and laser_y < enemy3_y + 61 and laser_y > enemy3_y and alive3 and fire:
alive3 = False
fire = False
count += 1
if laser_x < enemy4_x + 80 and laser_x > enemy4_x and laser_y < enemy4_y + 61 and laser_y > enemy4_y and alive4 and fire:
alive4 = False
fire = False
count += 1
if laser_x < enemy5_x + 80 and laser_x > enemy5_x and laser_y < enemy5_y + 61 and laser_y > enemy5_y and alive5 and fire:
alive5 = False
fire = False
count += 1
if laser_x < enemy6_x + 80 and laser_x > enemy6_x and laser_y < enemy6_y + 61 and laser_y > enemy6_y and alive6 and fire:
alive6 = False
fire = False
count += 1
#Enemy movement + different enemies
if alive1:
if enemy1_x + 80 >= 800 or enemy1_x <= 0:
enemy1_x_move *= -1
enemy1_x += enemy1_x_move
enemy1_y += enemy_y_move
if enemy1_y >= 600:
enemy1_y = 0
if enemy1_y < 0:
enemy1_y = 599
if count < 6:
screen.blit(enemy_image, (enemy1_x, enemy1_y))
if count >= 6 and count < 12:
screen.blit(enemy2_image, (enemy1_x, enemy1_y))
if count >= 12 and count < 18:
screen.blit(enemy3_image, (enemy1_x, enemy1_y))
if count >= 18 and count < 24:
screen.blit(enemy4_image, (enemy1_x, enemy1_y))
if alive2:
if enemy2_x + 80 >= 800 or enemy2_x <= 0:
enemy2_x_move *= -1
enemy2_x += enemy2_x_move
enemy2_y += enemy_y_move
if enemy2_y >= 600:
enemy2_y = 0
if enemy2_y < 0:
enemy2_y = 599
if count < 6:
screen.blit(enemy_image, (enemy2_x, enemy2_y))
if count >= 6 and count < 12:
screen.blit(enemy2_image, (enemy2_x, enemy2_y))
if count >= 12 and count < 18:
screen.blit(enemy3_image, (enemy2_x, enemy2_y))
if count >= 18 and count < 24:
screen.blit(enemy4_image, (enemy2_x, enemy2_y))
if alive3:
if enemy3_x + 80 >= 800 or enemy3_x <= 0:
enemy3_x_move *= -1
enemy3_x += enemy3_x_move
enemy3_y += enemy_y_move
if enemy3_y >= 600:
enemy3_y = 0
if enemy3_y < 0:
enemy3_y = 599
if count < 6:
screen.blit(enemy_image, (enemy3_x, enemy3_y))
if count >= 6 and count < 12:
screen.blit(enemy2_image, (enemy3_x, enemy3_y))
if count >= 12 and count < 18:
screen.blit(enemy3_image, (enemy3_x, enemy3_y))
if count >= 18 and count < 24:
screen.blit(enemy4_image, (enemy3_x, enemy3_y))
if alive4:
if enemy4_x + 80 >= 800 or enemy4_x <= 0:
enemy4_x_move *= -1
enemy4_x += enemy4_x_move
enemy4_y += enemy_y_move
if enemy4_y >= 600:
enemy4_y = 0
if enemy4_y < 0:
enemy4_y = 599
if count < 6:
screen.blit(enemy_image, (enemy4_x, enemy4_y))
if count >= 6 and count < 12:
screen.blit(enemy2_image, (enemy4_x, enemy4_y))
if count >= 12 and count < 18:
screen.blit(enemy3_image, (enemy4_x, enemy4_y))
if count >= 18 and count < 24:
screen.blit(enemy4_image, (enemy4_x, enemy4_y))
if alive5:
if enemy5_x + 80 >= 800 or enemy5_x <= 0:
enemy5_x_move *= -1
enemy5_x += enemy5_x_move
enemy5_y += enemy_y_move
if enemy5_y >= 600:
enemy5_y = 0
if enemy5_y < 0:
enemy5_y = 599
if count < 6:
screen.blit(enemy_image, (enemy5_x, enemy5_y))
if count >= 6 and count < 12:
screen.blit(enemy2_image, (enemy5_x, enemy5_y))
if count >= 12 and count < 18:
screen.blit(enemy3_image, (enemy5_x, enemy5_y))
if count >= 18 and count < 24:
screen.blit(enemy4_image, (enemy5_x, enemy5_y))
if alive6:
if enemy6_x + 80 >= 800 or enemy6_x <= 0:
enemy6_x_move *= -1
enemy6_x += enemy6_x_move
enemy6_y += enemy_y_move
if enemy6_y >= 600:
enemy6_y = 0
if enemy6_y < 0:
enemy6_y = 599
if count < 6:
screen.blit(enemy_image, (enemy6_x, enemy6_y))
if count >= 6 and count < 12:
screen.blit(enemy2_image, (enemy6_x, enemy6_y))
if count >= 12 and count < 18:
screen.blit(enemy3_image, (enemy6_x, enemy6_y))
if count >= 18 and count < 24:
screen.blit(enemy4_image, (enemy6_x, enemy6_y))
#Show player while alive
if player_alive:
screen.blit(player_image, (player_x, player_y))
# --- Go ahead and update the screen with what we've drawn.
pygame.display.update()
# --- Limit to 60 frames per second
clock.tick(60)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit()
|
14,210 | 9a1cf3fc8adb6d43e9c1442bda82c73fe1767438 | k=[]
n=int(input("Enter number of elements:"))
for i in range(1,n+1):
b=int(input("Enter element:"))
k.append(b)
k.sort()
print("Largest element is:",k[n-1])
|
14,211 | 79f0d5da6ba70f240bd0764467b61360224c8421 | import rospy
import time
import numpy as np
from geometry_msgs.msg import Twist
from std_msgs.msg import Float64, Float32
from openai_ros import robot_gazebo_env
from nav_msgs.msg import Odometry
class CATVehicleEnv(robot_gazebo_env.RobotGazeboEnv):
def __init__(self):
"""
Initializes a new CATVehicle environment.
CATVehicle doesnt use controller_manager, therefore we wont reset the
controllers in the standard fashion. For the moment we wont reset them.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that the stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
"""
rospy.logdebug("Start CATVehicle_ENV INIT...")
self.controllers_list = []
self.publishers_array = []
self.robot_name_space = ""
self.reset_controls = False
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(CATVehicleEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
self._check_all_sensors_ready()
self._cmd_vel_pub = rospy.Publisher('/catvehicle/cmd_vel', Twist, queue_size=10)
rospy.Subscriber("/catvehicle/distanceEstimatorSteeringBased/dist", Float64, self._distsb_callback)
rospy.Subscriber("/catvehicle/distanceEstimatorSteeringBased/angle", Float64, self._anglesb_callback)
rospy.Subscriber("/catvehicle/distanceEstimator/dist", Float32, self._dist_callback)
rospy.Subscriber("/catvehicle/distanceEstimator/angle", Float32, self._angle_callback)
rospy.Subscriber("/catvehicle/odom", Odometry, self._odom_callback)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.logdebug("Finished TurtleBot2Env INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
#self._check_joint_states_ready()
self._check_cmd_vel_pub()
return True
def _check_all_sensors_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_dist_ready()
self._check_angle_ready()
self._check_odom_ready()
self._check_distsb_ready()
self._check_anglesb_ready()
return True
# Check our distance sensor working
def _check_dist_ready(self):
self.dist = None
rospy.logdebug("Waiting for /catvehicle/distanceEstimator/dist to be READY...")
while self.dist is None and not rospy.is_shutdown():
try:
self.dist = rospy.wait_for_message("/catvehicle/distanceEstimator/dist", Float32, timeout=5.0)
rospy.logdebug("Current /catvehicle/distanceEstimator/dist READY=>")
except:
rospy.logerr("Current /catvehicle/distanceEstimator/dist not ready yet, retrying for getting dist")
return self.dist
# Checks our angle sensor is working
def _check_angle_ready(self):
self.angle = None
rospy.logdebug("Waiting for /catvehicle/distanceEstimator/angle to be READY...")
while self.angle is None and not rospy.is_shutdown():
try:
self.angle = rospy.wait_for_message("/catvehicle/distanceEstimator/angle", Float32, timeout=5.0)
rospy.logdebug("Current /catvehicle/distanceEstimator/angle READY=>")
except:
rospy.logerr("Current /catvehicle/distanceEstimator/angle not ready yet, retrying for getting angle")
return self.angle
def _check_cmd_vel_pub(self):
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
# Check that all publishers are working
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz; HOW DOES THIS WORK FOR CATVEHICLE/
self._check_cmd_vel_pub()
rospy.logdebug("All Publishers READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /catvehicle/odom to be READY...")
while self.dist is None and not rospy.is_shutdown():
try:
self.dist = rospy.wait_for_message("/catvehicle/odom", Odometry, timeout=5.0)
rospy.logdebug("Current /catvehicle/odom READY=>")
except:
rospy.logerr("Current /catvehicle/odom not ready yet, retrying for getting odom")
return self.dist
def _check_distsb_ready(self):
self.distsb = None
rospy.logdebug("Waiting for /catvehicle/distanceEstimatorSteeringBased/dist to be READY...")
while self.distsb is None and not rospy.is_shutdown():
try:
self.distsb = rospy.wait_for_message("/catvehicle/distanceEstimatorSteeringBased/dist", Float64, timeout=5.0)
rospy.logdebug("Current /catvehicle/distanceEstimatorSteeringBased/dist READY=>")
except:
rospy.logerr("Current /catvehicle/distanceEstimatorSteeringBased/dist not ready yet, retrying for getting dist")
return self.distsb
def _check_anglesb_ready(self):
self.anglesb = None
rospy.logdebug("Waiting for /catvehicle/distanceEstimatorSteeringBased/angle to be READY...")
while self.anglesb is None and not rospy.is_shutdown():
try:
self.anglesb = rospy.wait_for_message("/catvehicle/distanceEstimatorSteeringBased/angle", Float64, timeout=5.0)
rospy.logdebug("Current /catvehicle/distanceEstimatorSteeringBased/angle READY=>")
except:
rospy.logerr("Current /catvehicle/distanceEstimatorSteeringBased/angle not ready yet, retrying for getting angle")
return self.anglesb
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_car(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10, min_laser_distance=-1):
"""
It will move the car based on the linear and angular speeds given.
(no) It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist() # Describes linear motion and angular motion of robot
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logwarn("CATVehicle Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
time.sleep(0.01) # This is the timespan per timestep?
#time.sleep(0.02)
""" # Implement this later?
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate,
min_laser_distance)
"""
def has_crashed(self, min_distance):
"""
It states based on the laser scan if the robot has crashed or not.
Crashed means that the minimum laser reading is lower than the
min_laser_distance value given.
If min_laser_distance == -1, it returns always false, because its the way
to deactivate this check.
"""
robot_has_crashed = False
dist = self.distsb.data
if (dist <= min_distance):
rospy.logwarn("CATVehicle HAS CRASHED >>> item = " + str(dist)+" < "+str(min_distance))
robot_has_crashed = True
return robot_has_crashed
def get_dist(self):
return self.dist
def get_angle(self):
return self.angle
def _dist_callback(self, data):
self.dist = data
def _angle_callback(self, data):
self.angle = data
def _distsb_callback(self, data):
self.distsb = data
def _anglesb_callback(self, data):
self.anglesb = data
def _odom_callback(self, data):
self.odom = data
|
14,212 | 749a5396a5e7b29e619461e1e871fc58b0dfa9fb | class BackupError(Exception):
pass
class RestoreError(Exception):
pass
|
14,213 | 324648f2ad6d5bc2bf670bbef69210fc0b882ac3 | #encoding=utf-8
import MySQLdb
from config_handler import ConfigParse
class DB(object):
def __init__(self):
self.db_conf = ConfigParse().get_db_conf()
self.conn = MySQLdb.connect(
host = self.db_conf["host"],
port = self.db_conf["port"],
user = self.db_conf["user"],
passwd = self.db_conf["password"],
db = self.db_conf["db"],
charset = "utf8"
)
self.cur = self.conn.cursor()
def close_connect(self):
# 关闭数据连接
self.conn.commit()
self.cur.close()
self.conn.close()
def get_api_list(self):
sqlStr = "select * from interface_api where status=1"
self.cur.execute(sqlStr)
# 返回tuple对象
apiList = list(self.cur.fetchall())
return apiList
def get_api_case(self, api_id):
sqlStr = "select * from interface_test_case where api_id=%s" %api_id
self.cur.execute(sqlStr)
api_case_list = list(self.cur.fetchall())
return api_case_list
def get_rely_data(self, api_id, case_id):
sqlStr = "select data_store from interface_data_store where api_id=%s and case_id=%s" %(api_id, case_id)
self.cur.execute(sqlStr)
# 字典对象
rely_data = eval((self.cur.fetchall())[0][0])
return rely_data
if __name__ == '__main__':
db = DB()
print db.get_api_list()
print db.get_rely_data(1,1)
|
14,214 | da23a4e6ae38b1ead935ae8cde8b44ad49e989e1 | # Upper Confidence Bound
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
import math
d = 10;
N = 10000;
ads_selected = []
# Implementing UCB
# step - 1
numbers_of_selections = [0] * d
sums_of_rewards = [0] * d
total_reward = 0
# step - 2
for n in range(0, N):
ad = 0
max_upper_bound = 0
for i in range(0,d):
if(numbers_of_selections[i] > 0):
#average reward calculation
average_reward = sums_of_rewards[i] / numbers_of_selections[i]
delta_i = math.sqrt(3/2 * math.log(n+1) / numbers_of_selections[i])
#UCB computation
upper_bound = average_reward + delta_i
else:
#this is so that one of each ad is selected over 'd' rounds
upper_bound = 1e400
#calculating max UCB
if upper_bound > max_upper_bound:
max_upper_bound = upper_bound
#keep a memory of the ad that has the max upper bound
ad = i
#step - 3
#adding the selected ad to the ads_selected list
ads_selected.append(ad)
#incrementing the occurence of that ad in the numbers_of_selections list
numbers_of_selections[ad] += 1
#reward from the dataset(GOD KNOWS)
reward= dataset.values[n,ad]
#incrementing the reward of that ad in the sum_of_reward list
sums_of_rewards[ad] += reward
#total reward calculation
total_reward = sum(sums_of_rewards)
# Visualising the results
plt.hist(ads_selected)
plt.title('Histogram of ads selections')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show() |
14,215 | 26423ccc0a58942a59fe04d6b1f7fd4b3346e0c8 | #!/usr/bin/python
import re
# functions ported from rmgarbage https://github.com/zw/rmgarbage
# Rule L: if string is longer than 40 characters, it is garbage
def too_long(string):
if len(string) > 40:
return True
return False
# Rule A: if a string's ratio of alphanumeric characters to total characters
# is less than 50%, the string is garbage
def bad_alnum_ratio(string):
pattern = re.compile('[\W_]+') # matches [^A-Za-z0-9] (^ = not, _ is required)
alnum_thresholds = { 1: 0, # single chars can be non-alphanumeric
2: 0, # so can doublets
3: 0.32, # at least one of three should be alnum
4: 0.24, # at least one of four should be alnum
5: 0.39 } # at least two of five should be alnum
threshold = alnum_thresholds[len(string)] \
if len(string) in alnum_thresholds else 0.5
if len(string) == 0: # avoid division by zero
return True
if float(len(pattern.sub('', string)))/len(string) < threshold:
return True
return False
# Rule R: if a string has 4 identical characters in a row, it is garbage
def has_4_consecutive(string):
pattern = re.compile(r'((.)\2{3,})') # matches any 4 consecutive characters
if pattern.search(string):
return True
return False
# Rule V: if a string has nothing but alphabetic characters, look at the
# number of consonants and vowels. If the number of one is less than 10% of
# the number of the other, then the string is garbage.
# This includes a length threshold.
def bad_cons_vowel_ratio(string):
alpha_string = filter(str.isalpha, string)
vowel_count = sum(1 for char in alpha_string if char in 'aeiouAEIOU')
consonant_count = len(alpha_string) - vowel_count
if (consonant_count > 0 and vowel_count > 0):
ratio = float(vowel_count)/consonant_count
if (ratio < 0.1 or ratio > 10):
return True
elif (vowel_count == 0 and consonant_count > len('rhythms')):
return True
elif (consonant_count == 0 and vowel_count > len('IEEE')):
return True
return False
# Rule P: "Strip off the first and last characters of a string. If there
# are two distinct punctuation characters in the result, then the string is
# garbage"
#
# Customisation: stripping off the last TWO characters as false positives
# included those ending with ').' and similar.
def has_two_distinct_puncts_inside(string):
non_alnum_string = ''.join(char for char in string[1:-2] \
if not char.isalnum())
for char in non_alnum_string[1:]:
if char != non_alnum_string[0]:
return True
return False
# Rule C: "If a string begins and ends with a lowercase letter, then if
# the string contains an uppercase letter anywhere in between, then it is
# removed as garbage."
#
# Customisation: false positive on "needed.The". Exclude fullstop-capital.
# Extra customisation: Exclude hyphen-capital, apostrophe-capital and
# forwardslash-capital
def has_uppercase_within_lowercase(string):
if (string and string[0].islower() and string[-1].islower()):
string_middle = string[1:-1]
for index, char in enumerate(string_middle):
if char.isupper() and not \
(index > 0 and string_middle[index-1] in ".-'"):
return True
return False
def is_garbage(string):
if too_long(string):
return 'L'
elif bad_alnum_ratio(string):
return 'A'
elif has_4_consecutive(string):
return 'R'
elif bad_cons_vowel_ratio(string):
return 'V'
elif has_two_distinct_puncts_inside(string):
return 'P'
elif has_uppercase_within_lowercase(string):
return 'C'
return False
if __name__ == '__main__':
# test rmgarbage:
import sys
from nltk.corpus import PlaintextCorpusReader
if len(sys.argv) > 1:
root = sys.argv[1]
else:
root = raw_input("Directory root of corpus to be tested: ")
corpus = PlaintextCorpusReader(root, '.*\.txt')
for iD in corpus.fileids():
print "Fileid: ", iD
for string in corpus.raw(iD).split():
returnValue = is_garbage(string)
if returnValue:
print returnValue, string
|
14,216 | 23a7b405d57a823299a5895afe527ea763b4fac1 | ###############################################################################################################
###################################################SREENIDHIN C C##############################################
###############################################################################################################
"""
3.4
Question:
Write a program which can filter even numbers in a list by using filter function. The list is: [1,2,3,4,5,6,7,8,9,10].
"""
li = [1,2,3,4,5,6,7,8,9,10]
print filter(lambda x: x%2==0, li)
|
14,217 | c6183f4770320865c91b4641382a01bc7a6362c1 | import os
import sys
from machine import *
class Cluster:
def __init__(self, machineConfig, machinesPerType, minCpu, minMem, jobSizeThreshold , smallJobThreshold, largeJobThreshold ):
self.numMachines = 0
self.machines = []
self.machinesByType = {}
self.cpuUsage = 0
self.memUsage = 0
self.totCpu = 0
self.totMem = 0
self.jobSizeThreshold = jobSizeThreshold
self.machineConfig = machineConfig
self.machinesPerType = machinesPerType
self.freeMiceMachines = []
print "Creating machines "
for i in range(len(machinesPerType)):
self.numMachines += machinesPerType[i]
mem = machineConfig[i][0]
cpu = machineConfig[i][1]
self.machinesByType[i] = []
for j in range( machinesPerType[i] ):
m = Machine(cpu, mem, minCpu, minMem, self , jobSizeThreshold, smallJobThreshold, largeJobThreshold )
self.machines.append(m)
self.machinesByType[i].append(m)
self.totCpu += cpu
self.totMem += mem
self.freeMiceMachines.append(m)
print "Created : ", len(self.machines) , "machines"
print "Tot mem : " , self.totMem, "Tot cpu : ", self.totCpu
def getJobSizeThreshold(self):
return self.jobSizeThreshold
def getCpuUtil(self):
return float(cpuUsage) / float(totCpu)
def getMemUtil(self):
return float(memUsage) / float(totMem)
def getCpuUsage(self):
return cpuUsage
def getMemUsage(self):
return memUsage
|
14,218 | dd1f4261852ef6057ccdc85daf53835561b56e6b | import asyncio
import logging
import logging.config
import sys
from .config import ReceptorConfig
from .diagnostics import log_buffer
from .logstash_formatter.logstash import LogstashFormatter
logger = logging.getLogger(__name__)
def main(args=None):
try:
config = ReceptorConfig(args)
except Exception as e:
logger.error("An error occured while validating the configuration options:\n%s" % (str(e),))
sys.exit(1)
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "{levelname} {asctime} {node_id} {module} {message}",
"style": "{",
},
"structured": {"()": LogstashFormatter},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "structured"
if config.default_logging_format == "structured"
else "simple",
}
},
"loggers": {
"receptor": {
"handlers": ["console"],
"level": "DEBUG" if config.default_debug else "WARN",
}
},
}
)
def _f(record):
record.node_id = config.default_node_id
if record.levelno == logging.ERROR:
log_buffer.appendleft(record)
return True
for h in logging.getLogger("receptor").handlers:
h.addFilter(_f)
try:
config.go()
except asyncio.CancelledError:
pass
except Exception:
logger.exception("main: an error occured while running receptor")
sys.exit(1)
if __name__ == "__main__":
# We were run with python -m
main()
|
14,219 | 904cb530fbb1afdf349391b78d63420401a2b6d7 | from flask import Flask, render_template # 모듈을 임포트 시켜서 파이썬 파일을 실행할 수 있다. ex) import hello
import datetime
import random
app = Flask(__name__)
@app.route("/") # 데코레이터 , endpoint
def hello(): # def : 함수 생성 , -> Hello Ssafy를 반환하는 hello라는 함수를 만듦.
# return "Hello Ssafy!"
return render_template('index.html') # render_template('작성할 템플릿의 이름')
@app.route('/ssafy')
def ssafy():
return 'Hello SSAFY'
@app.route('/dday') # /가 항상 앞에 붙어야 한다. 안 그러면 인식 못함.
def dday():
today = datetime.datetime.now()
b_day = datetime.datetime(2019, 5, 21)
td = b_day - today
return f'{td.days} 일 남았습니다.'
@app.route('/html')
def html():
return '<h1>This is HTML h1 tag! </h1>'
@app.route('/html_lines')
def html_lines():
return '''
<h1>여러 줄을 보내봅시다.</h1>
<ul>
<li>1번</li>
<li>2번</li>
</ul>
'''
# Variable Routing
@app.route('/greeting/<name>') # IU
def greeting(name): # name == IU
return render_template('greeting.html', html_name=name)
@app.route('/cube/<int:num>') # int값을 넣되, 이름을 num으로 지정
def cube(num):
result = num ** 3
return render_template('cube.html', num=num, result=result)
# 실습
@app.route('/lunch/<int:people>')
def lunch(people):
# 사람 수 만큼의 랜덤 아이템을 menu list에서
# 뽑아서 보여주는 페이지 작성
menu = ['부대찌개', '김치찌개', '된장찌개']
# random.sample # 특정 사람수만큼 뽑는 함수
order = random.sample(menu, people)
return str(order)
@app.route('/movie')
def movie():
movies = ['스파이더맨', '엔드게임', '기생충', '알라딘']
return render_template('movie.html', movies=movies)
# html 파일 보관 폴더명 templates 으로
if __name__ == '__main__':
app.run(debug=True) |
14,220 | 0bf35e7b9d877b47699d4658f80082593130098e | __author__ = 'cmccully'
from matplotlib import pyplot
from glob import glob
from astropy.io import fits
import numpy as np
from astropy.io import ascii
def setup_plot():
pyplot.rcParams['axes.titlesize'] = 'x-large'
pyplot.rcParams['axes.labelsize'] = 'x-large'
pyplot.rcParams['axes.labelweight'] = 'normal'
pyplot.rcParams['lines.linewidth'] = 1.8
pyplot.rcParams['axes.linewidth'] = 1.8
pyplot.rcParams['xtick.labelsize'] = 'x-large'
pyplot.rcParams['xtick.major.pad'] = 8
pyplot.rcParams['xtick.major.size'] = 8
pyplot.rcParams['xtick.major.width'] = 1.8
pyplot.rcParams['xtick.minor.pad'] = 8
pyplot.rcParams['xtick.minor.size'] = 4
pyplot.rcParams['xtick.minor.width'] = 1.0
pyplot.rcParams['ytick.labelsize'] = 'x-large'
pyplot.rcParams['ytick.major.pad'] = 8
pyplot.rcParams['ytick.major.size'] = 8
pyplot.rcParams['ytick.major.width'] = 1.8
pyplot.rcParams['ytick.minor.pad'] = 8
pyplot.rcParams['ytick.minor.size'] = 4
pyplot.rcParams['ytick.minor.width'] = 1.0 # Make 9 panels of plots
def parse():
#Read in the airmass terms and the MJDs
fs = glob('*.fits')
airmasses = []
mjds = []
sites = []
for f in fs:
airmasses.append(fits.getval(f, 'AIRMASS'))
mjds.append(fits.getval(f, 'MJD-OBS'))
sites.append(f[:3])
mjds = np.array(mjds)
airmasses = np.array(airmasses)
sites = np.array(sites)
ascii.write([mjds, airmasses, sites], 'airmass.dat',
names=['mjd', 'airmass', 'site'])
def plot():
#Read in the airmass terms and the MJDs
data = ascii.read('airmass.dat')
mjds, airmasses, sites = data['mjd'], data['airmass'], data['site']
setup_plot()
colors = {'lsc':'blue', 'cpt':'red', 'coj': 'green'}
for site in ['lsc', 'cpt', 'coj']:
where_site = sites == site
pyplot.plot(mjds[where_site] - 57000, airmasses[where_site],
'o', color=colors[site])
pyplot.xlim(7.7, 10.3)
pyplot.ylim(2.35, 0.95)
pyplot.xlabel('MJD - 57000')
pyplot.ylabel('Airmass')
a = pyplot.annotate("", xy=(8.75, 1.2), xycoords='data',xytext=(8.30, 1.2), textcoords='data',
arrowprops={'arrowstyle':"<->"})
a.arrow_patch.set_linewidth(2)
pyplot.text(8.525, 1.17,'Bad Weather', ha='center', fontsize='medium')
pyplot.legend(labels=['Chile', 'South Africa', 'Australia'], loc=3)
pyplot.savefig('he0435_airmass.pdf', bbox_inches='tight', pad_inches=0.05)
pyplot.show()
pyplot.rcdefaults()
|
14,221 | 984d29009c36797e9db376dbdc94754c38b3f613 | """
Création du TEIheader pour le catalogue d'exposition
D'après un programme récupérant ces mêmes types d'informations réalisées par Claire Jahan.
Author:
Juliette Janès, 2021
Esteban Sánchez Oeconomo, 2022
"""
from lxml import etree as ET
from ..variables import contenu_TEI
def creation_header():
"""
Fonction permettant, pour un catalogue, de créer les balises du teiHeader. Elle récupère des valeurs textuelles
signalées dans les gabarits du dossier variables
:return: tei_header
:rtype: lxml.etree._ElementTree
"""
tei_header = ET.Element("teiHeader")
fileDesc = ET.SubElement(tei_header, "fileDesc")
titleStmt = ET.SubElement(fileDesc, "titleStmt")
title = ET.SubElement(titleStmt, "title")
editor_metadata = ET.SubElement(titleStmt, "editor", role="metadata")
persName_editor_metadata = ET.SubElement(editor_metadata, "persName")
editor_data = ET.SubElement(titleStmt, "editor", role="data")
persName_editor_data = ET.SubElement(editor_data, "persName")
publicationStmt = ET.SubElement(fileDesc, "publicationStmt")
publisher = ET.SubElement(publicationStmt, "publisher")
name_publisher = ET.SubElement(publisher, "name")
commentaire = ET.Comment(' === Nom du projet, directeur/directrice, organisme et adresse === ')
publisher.insert(0, commentaire)
name_publisher.text = contenu_TEI.name
persName_publisher = ET.SubElement(publisher, "persName", type="director")
persName_publisher.text = contenu_TEI.persName
orgName = ET.SubElement(publisher, "orgName")
orgName.text = contenu_TEI.orgName
address = ET.SubElement(publisher, "address")
addrLine = ET.SubElement(address, "addrLine")
addrLine.text = contenu_TEI.addrLine
postCode = ET.SubElement(address, "postCode")
postCode.text = contenu_TEI.postCode
settlement = ET.SubElement(address, "settlement")
settlement.text = contenu_TEI.settlement
date = ET.SubElement(publicationStmt, "date", when=contenu_TEI.date)
date.text = contenu_TEI.date
availability = ET.SubElement(publicationStmt, "availability")
licence_text = ET.SubElement(availability, "licence", target=contenu_TEI.licence_target)
licence_text.text = contenu_TEI.licence
sourceDesc = ET.SubElement(fileDesc, "sourceDesc")
bibl = ET.SubElement(sourceDesc, "bibl", type="exhibition_catalog")
title_source = ET.SubElement(bibl, "title")
author_source = ET.SubElement(bibl, "author")
publisher_source = ET.SubElement(bibl, "publisher")
pubPlace_source = ET.SubElement(bibl, "pubPlace")
date_source = ET.SubElement(bibl, "date")
relatedItem = ET.SubElement(bibl, "relatedItem")
msDesc = ET.SubElement(relatedItem, "msDesc")
commentaire = ET.Comment(" === organisme conservant l'objet numérisé === ")
msDesc.insert(0, commentaire)
msIdentifier = ET.SubElement(msDesc, "msIdentifier")
repository = ET.SubElement(msIdentifier, "repository")
repository.text = contenu_TEI.repository
additional = ET.SubElement(msDesc, "additional")
surrogates = ET.SubElement(additional, "surrogates")
ref = ET.SubElement(surrogates, "ref")
name_dig = ET.SubElement(surrogates, "name", role="digitisation")
extent = ET.SubElement(bibl, "extent")
listEvent = ET.SubElement(sourceDesc, "listEvent")
commentaire = ET.Comment(" === informations sur l'événement référé par le catalogue. Attributs obligatoires === ")
listEvent.insert(0, commentaire)
event = ET.SubElement(listEvent, "event", type="exhibition", subtype=contenu_TEI.event_subtype)
event.attrib["from"] = contenu_TEI.event_from
event.attrib["to"] = contenu_TEI.event_to
head_event = ET.SubElement(event, "head", type=contenu_TEI.event_head_type)
p_event = ET.SubElement(event, "p")
profileDesc = ET.SubElement(tei_header, "profileDesc")
encodingDesc = ET.SubElement(tei_header, "encodingDesc")
# encodingDesc.attrib["{http://www.w3.org/XML/1998/namespace}ns:tei"]="http://www.tei-c.org/ns/1.0"
# encodingDesc.attrib["{http://www.w3.org/XML/1998/namespace}ns:s"]="http://purl.oclc.org/dsdl/schematron"
samplingDesc = ET.SubElement(encodingDesc, "samplingDecl")
p_samplingDesc = ET.SubElement(samplingDesc, "p")
p_samplingDesc.text = """This electronic version of the catalog only reproduces the entries that
correspond to exhibited works. All text preceding or succeeding the list
of documents is not reproduced below."""
appInfo = ET.SubElement(encodingDesc, "appInfo")
commentaire = ET.Comment(" === logiciel utilisé pour la transcription. Attributs obligatoires === ")
appInfo.insert(0, commentaire)
application = ET.SubElement(appInfo, "application", version=contenu_TEI.application_version, ident=contenu_TEI.application_ident)
label = ET.SubElement(application, "label")
label.text = contenu_TEI.application_ident
ptr = ET.SubElement(application, "ptr", target=contenu_TEI.application_pointer)
revisionDesc = ET.SubElement(tei_header, "revisionDesc")
return tei_header
|
14,222 | 9762edbc626d875c750cc8c1cbd196f88f3b1590 | import torch
import jieba
import tensorflow as tf
from bert4keras.tokenizers import Tokenizer
from bert4keras.models import build_transformer_model
from model.modeling_roformer import RoFormerModel
jieba.initialize()
config_path = 'E:/BaiduNetdiskDownload/chinese_roformer_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'E:/BaiduNetdiskDownload/chinese_roformer_L-12_H-768_A-12/bert_model.ckpt'
dict_path = 'E:/BaiduNetdiskDownload/chinese_roformer_L-12_H-768_A-12/vocab.txt'
converted_ckpt_path = "pretrained_models/chinese_roformer_base"
tokenizer = Tokenizer(dict_path,
do_lower_case=True,
pre_tokenize=lambda s: jieba.cut(s, HMM=False))
text = "这里基本保留了唐宋遗留下来的坊巷格局和大量明清古建筑,其中各级文保单位29处,被誉为“里坊制度的活化石”“明清建筑博物馆”!"
inputs = tokenizer.encode(text)
pt_model = RoFormerModel.from_pretrained(converted_ckpt_path)
pt_inputs = {
"input_ids": torch.tensor(inputs[0]).long()[None],
"token_type_ids": torch.tensor(inputs[1]).long()[None]
}
with torch.no_grad():
o1 = pt_model(**pt_inputs)[0]
model = build_transformer_model(config_path=config_path,
checkpoint_path=checkpoint_path,
model='roformer')
tf_inputs = [
tf.convert_to_tensor(inputs[0])[None],
tf.convert_to_tensor(inputs[1])[None]
]
o2 = torch.tensor(model(tf_inputs).numpy())
print("mean diff :", (o1 - o2).abs().mean())
print("max diff :", (o1 - o2).abs().max())
|
14,223 | 19d4011674c461e6150ead02efcb358f489d2bf6 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 1 15:05:04 2019
@author: Lizerhigh
"""
from pandas import DataFrame
from pandas import read_json
def sk(val):
return val[1]
def unpack(s):
if not b'(' in s:
return {s: b''}
#print(s)
se = []
c = 0
st = 0
ret = {}
for i in range(len(s)):
if chr(s[i]) == '(':
if not c:
st = i
c += 1
elif chr(s[i]) == ')':
c -= 1
if not c:
se.append((st, i))
#print(se)
for i in range(len(se)):
for x, y in unpack(s[se[i][0]+1:se[i][1]]).items():
if i+1 == len(se):
ret[x] = y + (s[se[i][1]+1:])
else:
ret[x] = y + (s[se[i][1]+1:se[i+1][0]])
#print(x, ret[x])
return ret
def f_codes(d, base):
if len(d) == 1:
#print(list(d.keys()))
return {k: v[::-1] for k, v in unpack(list(d.keys())[0]).items()}
d = {k:v for k, v in sorted(list(d.items()), key=sk)}
mins = b''
mins_v = 0
d_keys = list(d.keys())
d_vals = list(d.values())
#print(d_keys)
for i in range(base):
mins += b'('+d_keys[i]+b')'+enc(str(base-i-1))+b''
mins_v += d[d_keys[i]]
#print(mins)
#d = {k: v for k, v in d.items() if not k in mins}
d_tmp = {}
for i in range(base, len(d)):
d_tmp[d_keys[i]] = d_vals[i]
d = d_tmp
#print(d)
d[mins] = mins_v
#print(d)
return f_codes(d, base)
def dec(s):
return str(s, encoding='utf-8')
def enc(s):
return bytes(s, encoding='utf-8')
def encf(s):
tmp = hex(int(s, 2))[2:]
tmp = '0'*(len(tmp) % 2) + tmp
#print(f'tmp = {tmp}')
return eval(f'b"\\x{tmp}"')
def p(arg):
return len(arg[0])
def add_z(s):
return b'0'*((8-(len(s)%8))%8)+s
def compress(file, blen, debug = False):
f = open(file, 'rb')
mes = b''.join(f.readlines())
brackets = []
for i in range(len(mes)):
if mes[i] == 40:
brackets.append(enc(str(i)))
brackets.append(b'')
if mes[i] == 41:
brackets.append(enc(str(i)))
brackets.append(b'1')
mes = mes.replace(b'(', b'').replace(b')', b'')
f.close()
add_b = (blen-len(mes)%blen)%blen
#print(add_b)
mes = mes + b'0'*add_b
mes = [mes[i*blen:(i+1)*blen] for i in range(len(mes)//blen)]
if debug:
print(f'Splitted message: {mes} - len = {len(mes)}')
d = {}
for x in mes:
if x in d:
d[x] += 1
else:
d[x] = 1
if debug:
print(f'Freq of block: {d}')
d1 = {k: v for k, v in d.items()}
d = f_codes(d, 2)
if debug:
print(f'Replace table: {d}')
mes = b''.join([d[x] for x in mes])
add_zero = (8-len(mes)%8)%8
#print(add_zero)
mes = b'0'*add_zero+mes
if debug:
print(f'Encoded message (bin): "{mes}" - len = {len(mes)}')
# tmp = []
# for i in range(len(mes)//8):
# tmp.append(encf(mes[i*8:i*8+8]))
# print(tmp)
mes = b''.join(list(map(encf, [mes[i*8:i*8+8] for i in range(len(mes)//8)])))
if debug:
print(f'Encoded message (1byte): "{mes}" - len = {len(mes)}')
#print(ord(mes[-1]))
lines = [enc(str(blen)) + b'\n' + enc(str(add_b)) + b'\n' + enc(str(add_zero)) + b'\n' + b':'.join(brackets) + b'\n' + enc(str(len(d))) + b'\n']
for i in d1:
#print(d[i])
lines.append(i.replace(b'\n', b'\\n').replace(b'\r', b'\\r')+enc(str(d1[i]))+b'\n')
lines.append(mes)
f = open(f'{file}.arc', 'wb')
f.writelines(lines)
f.close()
#df = DataFrame({'add_b': [add_b], 'd': [d], 'mes': [mes], 'add_z': add_zero})
#df.to_json(f'{file}.arc')
def decompress(file):
f = open(file, 'rb')
lines = f.readlines()
f.close()
blen = int(lines[0][:-1])
add_b = int(lines[1][:-1])
add_zero = int(lines[2][:-1])
brackets = lines[3][:-1].split(b':')
#print(brackets)
len_d = int(lines[4][:-1])
d = {}
for i in range(5, len_d+5):
current = lines[i].replace(b'\\n', b'\n').replace(b'\\r', b'\r')
d[current[:blen]] = int(current[blen:-1])
d = f_codes(d, 2)
d = {v: k for k, v in sorted(d.items(), key=p)}
#print(d)
mes = b''.join(lines[len_d+5:])
#print(f'Encoded message (1byte): "{mes}" - len = {len(mes)}')
mes = (b''.join([add_z(enc(bin(x)[2:])) for x in mes]))[add_zero:]
#print(mes)
ret = b''
while mes:
for i in d:
if mes.startswith(i):
ret += d[i]
mes = mes[len(i):]
ret = ret[:len(ret)-add_b]
for i in range(0, len(brackets), 2):
index = int(dec(brackets[i]))
ret = ret[:index] + (b')' if brackets[i+1] else b'(') + ret[index:]
f = open('dec_'+file.replace('.arc', ''), 'wb')
f.writelines([ret])
f.close() |
14,224 | 26e35d4e6d3af654c9d5623e48be9e364cd18e13 | #!/usr/bin/env python3
import re
import os
import sys
import getopt
import shutil
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(sys.argv[1:],"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print (sys.argv[0] + " -i <inputfile> -o <outputfile>")
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print (sys.argv[0] + " -i <inputfile> -o <outputfile>")
print ("Where <inputfile> is a file containing the output of both 'ps -ef' and 'lshal' from an Android target")
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
if inputfile == "":
print ("Missing input file")
print (sys.argv[0] + " -i <inputfile> -o <outputfile>")
sys.exit(2)
foundPidTable = False
pidToName = {}
# The input file containing the output from a 'lshal' and a 'ps -ef' will be read twice.
# to keep the parseing of the two sections independent.
#
# Step 1. Read and parse the process ids and corresponding process names from 'ps -ef'
#
with open(inputfile) as file_object:
for line in file_object:
line = line.rstrip()
matchPidHead = re.match(r' *UID *PID .*CMD', line)
if matchPidHead:
foundPidTable = True
else:
matchPidLine = re.match(r'[^ ]+ +(\d+) +.* \d\d:\d\d:\d\d (.+) *', line)
if matchPidLine and foundPidTable:
pidName = matchPidLine.group(2).split(" ")[0]
if pidName == 'hwservicemanager':
hwservicemanagerPid = matchPidLine.group(1)
pidToName[matchPidLine.group(1)] = pidName
else:
foundPidTable = False
#print(pidToName)
#
# Step 2. Read interfaces and server + client process ids from 'lshal'
#
HIDLTable = []
foundHIDLTable = 0
with open(inputfile) as file_object:
for line in file_object:
line = line.rstrip()
# It might seem/be unnecessary two match two different lines (the 'header' of the relevant part) here
# but it will be done anyway...
matchHIDLHead = re.match(r'\| All( | HIDL )binderized services \(registered with hwservicemanager\)', line)
matchHIDLHead2 = re.match(r'VINTF +R Interface +Thread Use Server Clients', line)
if matchHIDLHead:
foundHIDLTable = 1
elif foundHIDLTable == 1 and matchHIDLHead2:
foundHIDLTable = 2
else:
matchHIDLLine = re.match(r'.{7,13} (.+)::(\S+/\S+) +./.. {7}(\d+|N/A) *(.*)', line)
if matchHIDLLine and foundHIDLTable == 2:
if matchHIDLLine.group(3) != 'N/A':
HIDLTable.append((matchHIDLLine.group(1), matchHIDLLine.group(2), matchHIDLLine.group(3), matchHIDLLine.group(4)))
else:
foundHIDLTable = 0
#print(HIDLTable)
#
# Generating the the output
#
if outputfile != "":
sys.stdout = open(outputfile, "w")
print("digraph hidl {")
print(" graph [rankdir = \"LR\"];")
print(" node [shape=box];")
for HIDLInterface in HIDLTable:
if True:
_interface = HIDLInterface[0]
_server = pidToName.get(str(HIDLInterface[2]))
clients = HIDLInterface[3:][0].split(" ")
if clients.count(hwservicemanagerPid) > 0:
clients.remove(hwservicemanagerPid)
if len(clients) == 0:
print(" \"" + _server + "\" -> \"" + _server + "\" [label=\"" + _interface + "\\n" + HIDLInterface[1] + "\"];")
else:
for client in clients:
_client = pidToName.get(str(client))
if _client == None:
continue
print(" \"" + _client + "\" -> \"" + _server + "\" [label=\"" + _interface + "\"];")
print("}")
sys.stdout = sys.__stdout__
if outputfile != "" and shutil.which("dot") == None:
print("Now run Graphviz dot:")
print("dot -Tpng " + outputfile + " -o <mygraphfile>.png")
if outputfile != "" and shutil.which("dot") != None:
os.system("dot -Tpng " + outputfile + " -o " + outputfile + ".png")
print("Created " + outputfile + ".png")
os.system("rm " + outputfile)
|
14,225 | e6fad67bf775277830f3e0d77e448272af106d93 | from tornado.web import RequestHandler
class BaseHandler(RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("username") |
14,226 | 9c5724a28e4af1264f732b5c3ae5943f5fd05e15 | """Plot the electric field generated by three point charges in a plane."""
import numpy as np
import pylab as plt
import vectorplot as vp
dpi = 100
size = 700
video = False
# Charges : (q, x, y)
charges = [(10, +0.5, +0.5), (-10, -0.5, -0.5), (4, +0.2, -0.2)]
xs = np.linspace(-1, 1, size).astype(np.float32)[None, :]
ys = np.linspace(-1, 1, size).astype(np.float32)[:, None]
u = np.zeros((size, size), dtype=np.float32)
v = np.zeros((size, size), dtype=np.float32)
for (q, x, y) in charges:
rsq = ((xs-x)**2+(ys-y)**2)**1.5
v += -q * (ys-y)/rsq
u += -q * (xs-x)/rsq
texture = np.random.rand(size, size).astype(np.float32)
plt.bone()
frame = 0
if video:
kernellen = 62
steps = 125
turns = 3
for t in np.linspace(0, turns, steps, endpoint=False):
kernel = vp.kernels.hanning_ripples(N=kernellen, shift=t)
kernel = kernel.astype(np.float32)
image = vp.line_integral_convolution(u, v, texture, kernel)
plt.clf()
plt.axis('off')
plt.figimage(image)
plt.gcf().set_size_inches((size/float(dpi), size/float(dpi)))
plt.savefig("efield-%04d.png" % frame, dpi=dpi)
frame += 1
else:
kernellen = 31
kernel = np.sin(np.arange(kernellen)*np.pi/kernellen)
kernel = kernel.astype(np.float32)
print(u.dtype, v.dtype, texture.dtype, kernel.dtype)
print(u.shape, v.shape, texture.shape, kernel.shape)
image = vp.line_integral_convolution(u, v, texture, kernel)
plt.clf()
plt.axis('off')
plt.figimage(image)
plt.gcf().set_size_inches((size/float(dpi), size/float(dpi)))
plt.savefig("efield-image.png", dpi=dpi)
|
14,227 | 0b922dc6048c8b99d778578f385585b2e7a46dab | #! /usr/bin/env python
"""Script to see if some standard files have changed
A 'change' is any change to size, mtime or md5sum
"""
from __future__ import print_function
import os
import csv
import sys
import argparse
from datetime import datetime
from bdb import BdbQuit
__version__ = '1.0.0'
def run_args(args, methods):
"""Run any methods eponymous with args"""
if not args:
return False
valuable_args = {k for k, v in args.__dict__.items() if v}
arg_methods = {methods[a] for a in valuable_args if a in methods}
for method in arg_methods:
method(args)
def version(args):
print('%s %s' % (args, __version__))
raise SystemExit
def parse_args(methods):
"""Parse out command line arguments"""
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
parser.add_argument('-v', '--version', action='store_true',
help='Show version')
args = parser.parse_args()
run_args(args, methods)
return args
def plastic_date():
"""Looks like a date, quacks like a date. Not a date"""
return 'Zun, 99 Zun 9999 99:61:61'
class Signature(object):
"""Identifying details for a file
Includes time, size and md5sum
"""
def __init__(self, t, z, s):
self._time = t
self._size = z
self._sum = s
def __str__(self):
return ', '.join([self._time_str(), str(self._size)])
def __nonzero__(self):
return bool(self._time) or self._size > -1 or bool(self._sum)
def __cmp__(self, other):
return cmp(self.strings(), other.strings())
def _time_str(self):
"""A formatted string for the time"""
try:
if not self._time:
raise ValueError
format_ = '%a, %d %b %Y %H:%M:%S'
return datetime.fromtimestamp(float(self._time)).strftime(format_)
except ValueError:
return plastic_date()
def strings(self):
"""A list of time, size and sum as strings"""
return [str(self._time), str(self._size), str(self._sum)]
class EmptySignature(Signature):
def __init__(self):
super(EmptySignature, self).__init__(0, -1, '')
def pad_keys(items, keys):
"""Make sure all keys are in items
If any key is missing add an empty signature"""
for key in keys:
if key not in items:
items[key] = EmptySignature()
return items
def path_to_data():
"""Where we store old values"""
jab = os.expanduser('~/jab')
return os.path.join(jab, 'local/login_sums.csv')
def write_files(items, path):
"""Write items to csv file"""
with path.open('wb') as stream:
writer = csv.writer(stream)
for k, v in items.iteritems():
if not v:
continue
row = [k] + v.strings()
writer.writerow(row)
def default_values():
"""Gives a dict of default values for the default basenames"""
return pad_keys({}, default_basenames())
def write_default_values():
"""Write default values to the data file"""
values = default_values()
write_files(values, path_to_data())
return values
def read_old_values(basenames):
"""Read old date, size and md5sum for those basenames"""
result = {}
with path_to_data().open() as stream:
reader = csv.reader(stream)
for row in reader:
result[row[0]] = Signature(*row[1:])
return pad_keys(result, basenames)
def old_values(basenames):
"""Read old date, size and md5sum for those basenames
If old values are not present, write defaults
"""
p = path_to_data()
if not p.isfile():
if not p.parent.isdir():
p.parent.makedirs_p()
return write_default_values()
else:
return read_old_values(basenames)
def text_digest(s):
import hashlib
m = hashlib.md5()
m.update(s)
return m.hexdigest()
def new_values(basenames):
"""Get date, size and md5sum for those basenames in $HOME"""
result = {}
for basename in basenames:
home = os.environ['HOME']
p = os.path.join(home, basename)
if not os.path.isfile(p):
continue
size = '%d' % p.size
mtime = '%0.8f' % p.mtime
result[basename] = Signature(mtime, size, text_digest(p.text()))
return pad_keys(result, basenames)
def default_basenames():
return [
'.bashrc',
'.vimrc',
'.gitconfig',
'.netrc',
'.profile',
]
def script(args):
"""Compare date, size and md5sum for known files in $HOME"""
basenames = default_basenames()
old = old_values(basenames)
new = new_values(basenames)
result = True
for basename in basenames:
if old[basename] == new[basename]:
continue
if old[basename]:
print('%s changed:' % basename)
print('\tOld: %s' % old[basename])
print('\tNew: %s' % new[basename])
else:
print('%s registered:' % basename)
print('\tNew: %s' % new[basename])
result = False
if not result:
print('')
write_files(new, path_to_data())
return result
def main():
"""Run the script"""
try:
args = parse_args(globals())
return os.EX_OK if script(args) else not os.EX_OK
except BdbQuit:
pass
except SystemExit as e:
return e.code
return os.EX_OK
if __name__ == '__main__':
sys.exit(main())
|
14,228 | 4178dce5e2040898102fbde56f0b9ffe61903bb3 | from context import of
from context import ls
import numpy as np
def test_Hessian(F,x_0):
return F.Hessian(x_0)
def test_Gradient(F,x_0):
return F.Gradient(x_0)
def test_NewtonStep(F,x_0):
# Quick and dirty Newton step test.
return -1*np.linalg.solve(F.Hessian(x_0),F.Gradient(x_0))
def test_NewtonMethod(F,x_0,iterations):
nm = ls.LineSearch(F,x_0,iterations)
nm.Optimize(x_0)
f = lambda x : 2*(x[0]**2) + 2*x[1]**2
F = of.ObjectiveFunction(f,2,1)
x = [1,1]
#print(test_Hessian(F,x))
#print(test_Gradient(F,x))
#print(test_NewtonStep(F,x))
guess = np.array([300, -12])
test_NewtonMethod(F,guess,10) |
14,229 | 7a3a6570b80a16eb31f1533d5c580af000efd45c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 28 13:49:53 2018
@author: dannilew
"""
import sqlite3
connection = sqlite3.connect("week4.db")
#adding data model
cursor = connection.cursor()
sql_command = """
CREATE TABLE people (
id integer primary key,
name varchar,
position varchar,
phone varchar,
office varchar
);"""
cursor.execute(sql_command)
sql_command = """
CREATE TABLE experiment (
id integer primary key,
name varchar,
researcher integer,
description text,
foreign key(researcher) references people(id)
);"""
cursor.execute(sql_command)
#adding data for people table
people_data = [ ('Alice', 'Research Director', '555-123-0001', '4b'),
('Bob', 'Research Assistant', '555-123-0002', '17'),
('Charles', 'Research Assistant', '555-123-0001', '24'),
('David', 'Research Assistant', '555-123-0001', '8'),
('Edward', 'Toadie', 'None', 'Basement')]
for p in people_data:
format_str = """INSERT INTO people (id, name, position, phone, office)
VALUES (NULL, "{name}", "{position}", "{phone}", "{office}");"""
sql_command = format_str.format(name=p[0], position=p[1], phone=p[2], office = p[3])
cursor.execute(sql_command)
#adding data for experiement table
experiment_data = [('EBV Vaccine trial', 0, 'A vaccine trial'),
('Flu Antibody Study', 2, 'Study of the morphology of flu antibodies')]
for e in experiment_data:
format_str = """INSERT INTO experiment ( id, name, researcher, description)
VALUES (NULL, "{name}", "{researcher}", "{description}");"""
sql_command = format_str.format(name=e[0], researcher=e[1], description=e[2])
cursor.execute(sql_command)
#commit the changes
connection.commit()
connection.close() #close the connection |
14,230 | 91c417ef30ee0739a139e017ee1b783dd7706f6c | from libraryClass import *
class Parser:
def __init__(self, filename='data/a_example.txt'):
self.filename = filename
self.hashcode = None
def parse(self, filename):
with open(filename) as f:
#data = f.readlines() # each element in data is line from input file
data = f.read().splitlines()
# read in header info
header = [i.rstrip() for i in data[0].split(' ')] # read first line into books, libraries, days
timeLimit = header[2] # set days
numOfLibraries = header[1] # set num libraries
bookdata = [i.rstrip() for i in data[1].split(' ')] # book scores
## build up library list
libraries = []
for i in range(2, len(data), 2):
library = [i.rstrip() for i in data[i].split(' ')]
numOfBooks = library[0]
timeToSignup = library[1]
maxBooksPerDay = library[2]
bookline = [i.rstrip() for i in data[i+1].split(' ')]
librarybooks = []
## build book objects for given library
for id in range(len(bookline)):
book = Book(bookline[id], bookdata[int(bookline[id])])
librarybooks.append(book)
libId = (i-2)
# libraryclass init : def __init__(self, numOfBooks, libId, timeToSignup, maxBooksPerDay):
lib = LibraryClass(numOfBooks, libId, timeToSignup, maxBooksPerDay)
lib.books = librarybooks
libraries.append(lib)
# def __init__(self, numOfLibraries):
hashcode = HashCode(numOfLibraries)
hashcode.timeLimit = int(timeLimit)
hashcode.libraries = libraries
self.hashcode = hashcode
return self.hashcode
"""
if __name__ == "__main__":
parser = Parser()
hashcode = parser.parse(parser.filename)
print(hashcode.libraries[0].numOfBooks)
print(hashcode.libraries[0].timeToSignup)
print(hashcode.libraries[0].maxBooksPerDay)
for i in hashcode.libraries[1].books:
print(str(i.bookId)+' '+str(i.score))
#print(hashcode.check())
""" |
14,231 | 1ec802d2b5d1921e06b8e5ee6663d9048f28a9e5 | # Generated by Django 2.2.5 on 2019-11-12 03:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('homepage', '0006_auto_20191111_1825'),
]
operations = [
migrations.AlterField(
model_name='farmowner',
name='email',
field=models.CharField(max_length=200, unique=True),
),
]
|
14,232 | 85e680041900da4e23019c9854a41faaedd76317 | from django.db import models
from django.contrib.auth.models import User
class Category(models.Model):
class Meta:
verbose_name_plural = 'Categories'
name = models.CharField(max_length=254)
friendly_name = models.CharField(max_length=254,
null=True, blank=True)
def __str__(self):
return self.name
def get_friendly_name(self):
return self.friendly_name
class Product(models.Model):
HARDBACK = 'Hardback'
KINDLE = 'Kindle'
PAPERBACK = 'Paperback'
NONE = 'None'
BOOK_TYPE_CHOICES = [
(HARDBACK, 'Hardback'),
(KINDLE, 'Kindle'),
(PAPERBACK, 'Paperback'),
('', '')
]
book_type = models.CharField(max_length=254, choices=BOOK_TYPE_CHOICES,
blank=True, default='')
category = models.ForeignKey('Category', null=True, blank=True,
on_delete=models.SET_NULL)
sku = models.CharField(max_length=254, null=True, blank=True)
name = models.CharField(max_length=254)
author = models.CharField(max_length=254)
description = models.TextField()
is_a_service = models.BooleanField(default=False)
is_a_video = models.BooleanField(default=False)
price = models.DecimalField(max_digits=6, decimal_places=2)
image_url = models.URLField(max_length=1024, null=True, blank=True)
image = models.ImageField(null=True, blank=True)
def __str__(self):
return self.name
def get_rating(self):
"""
this fuction caculates the average customer rating
"""
self.total = sum(int(review['stars']) for review in self.reviews.values())
if self.total > 0:
return round(self.total / self.reviews.count(), 1)
else:
return self.total
class ProductReview(models.Model):
content = models.TextField()
product = models.ForeignKey(Product, on_delete=models.CASCADE,
related_name='reviews',
related_query_name='reviews')
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='reviews',
related_query_name='reviews')
stars = models.IntegerField(null=False, blank=False, default=0)
created = models.DateField(auto_now_add=True)
updated = models.DateField(auto_now=True)
image = models.ImageField(null=True, blank=True)
def __str__(self):
return self.content
class DatesAvailable(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE,
related_name='available',
related_query_name='available')
date_available = models.CharField(max_length=254,
null=True, blank=True)
def __str__(self):
return self.date_available
|
14,233 | 8c8111f9f31f3426d544eb8386274f15caec02c4 | import sys, time, os
import socket
class BufferedMessage():
"""
Setup buffered messaging on socket connection.
"""
def __init__(self, conn, addr):
"""
Initialize the buffered message.
"""
self.conn = conn
self.addr = addr
self.buff = ""
self.recvsize = 1024
def __enter__(self):
"""
Enter overload.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
Exit.
"""
print("__exit__ : closing connection")
self.conn.close()
def __del__(self):
"""
Delete.
"""
print("__del__ : closing connection")
self.conn.close()
def close(self):
"""
Close connection.
"""
print("close : closing connection")
self.conn.close()
def recv_protocol_message(self):
"""
Recieve a message defined by the protocol.
"""
msg = ""
while True:
index = self.buff.find('\0')
if index != -1:
msg = self.buff[:index]
self.buff = self.buff[index + 1:]
break
data = self.conn.recv(self.recvsize)
if not data:
break
self.buff += data
return msg
def send_protocol_message(self, msg):
"""
Send a protocol message.
"""
self.conn.send(msg + "\0")
class BufferedSocket():
"""
Setup a Unix Domain socket with buffered messaging.
"""
def __init__(self, sname):
"""
Initialize a socket.
"""
self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sname = sname
try:
os.remove(self.sname)
except OSError:
pass
self.s.bind(sname)
self.s.listen(1)
def accept(self):
"""
Accept a connection.
"""
conn, addr = self.s.accept()
return BufferedMessage(conn, addr)
|
14,234 | af7c33d1c06d3a9774b2da487acd41265a91611e | #!python3
"""Dictionary and Tuple Exercise... """
__author__ = "Gavin Jones"
"""
# 1 Write a program that allows to store age of your family members.
# Program should ask to enter the person name and age
# Once you are done you should be able to input the name of the person and retrieve the age.
# Now print Name of all your Family Members along with their ages.
# Blank Dict to start
family = {}
# Run the Program get Family Member Input 4 Times
for member in range(4):
name = input("Please enter your name: ").capitalize()
age = int(input("Please enter your age: "))
family[name] = age
# Find out who's age they want:
personName = input("Please enter the Members Age your after: ").capitalize()
# Run the Function to get the Family Members Age
def get_member_age(personName):
if personName == "Gavin":
return "Gavin's Age is", family["Gavin"]
elif personName == "Hayley":
return "Hayley's Age is", family["Hayley"]
elif personName == "Marcia":
return "Marcia's Age is", family["Marcia"]
elif personName == "David":
return "David's Age is", family["David"]
else:
return "This person is NOT a member of the Family!"
# Run the Program:
familyFunction = get_member_age(personName)
print(familyFunction)
# Prints the Family Member and there Ages!
for k, v in family.items():
print("Name:", k, "Age:", v)
"""
"""
# 2 Write a function called add_and_multiply takes two numbers as input and return the sum and multiplication
# as 2 seperate numbers
def add_and_multiply(num1, num2):
sum_task = num1 + num2
multiplication_task = num1 * num2
tupleResult = (sum_task, multiplication_task)
return tupleResult
# Run the Program Get the Users Input
num1 = int(input("Please enter a number: "))
num2 = int(input("Please enter a number: "))
# Save the function as a Variable
result = add_and_multiply(num1, num2)
# Run the function
print(result)
"""
|
14,235 | bcf1ecca0e89eeb9342a67a260aba1fd0962b5f2 | # # 映画レビューのテキスト分類
# # (https://www.tensorflow.org/tutorials/keras/text_classification)
import altair as alt
import pandas as pd
from tensorflow import keras
# ## IMDB datasetのダウンロード
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
# ## データの観察
print(f"Training entries: {len(train_data)}, labels: {len(train_labels)}")
print(train_data[0])
# -
# !レビューごとに長さが異なる。
len(train_data[0]), len(train_data[1])
# ### 整数を単語に戻してみる
# !単語を整数にマッピングする辞書
word_index = imdb.get_word_index()
# !インデックスの最初の方は予約済み
word_index = {k: (v + 3) for k, v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return " ".join([reverse_word_index.get(i, "?") for i in text])
decode_review(train_data[0])
# ## データの準備
# ! 長さを標準化する
train_data = keras.preprocessing.sequence.pad_sequences(
train_data, value=word_index["<PAD>"], padding="post", maxlen=256
)
test_data = keras.preprocessing.sequence.pad_sequences(
test_data, value=word_index["<PAD>"], padding="post", maxlen=256
)
len(train_data[0]), len(train_data[1])
# ## モデルの構築
# !入力の形式は映画レビューで使われている語彙数(10,000語)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation="relu"))
model.add(keras.layers.Dense(1, activation="sigmoid"))
model.summary()
# ### 損失関数とオプティマイザ
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
# ## 検証用データを作る
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
# ## モデルの訓練
history = model.fit(
partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1,
)
# ## モデルの評価
results = model.evaluate(test_data, test_labels, verbose=2)
print(results)
# ## 正解率と損失の時系列グラフを描く
df = pd.DataFrame(history.history)
df.index.name = "epoch"
df.reset_index(inplace=True)
df = pd.melt(df, id_vars=["epoch"], value_vars=["accuracy", "val_accuracy"])
chart = alt.Chart(df).mark_line(point=True).properties(width=200, height=150)
chart.encode(
x="epoch", y=alt.Y("value", scale=alt.Scale(domain=[0.5, 1])), color="variable"
)
|
14,236 | 3356f29e81e3ec947ac9fc55aa6378dc812c7208 | import os
import sys
import random
from torch.utils.data import Dataset
from language import Language
from utils import tokens_to_seq, contains_digit, shuffle_correlated_lists, chunks
from operator import itemgetter
import datetime
currentDT = datetime.datetime.now()
class OneFoldSequencePairDataset(Dataset):
def __init__(self,
unprocessed_data,
maxlen,
vocab_limit,
use_extended_vocab):
self.maxlen = maxlen
self.parser = None
self.use_extended_vocab = use_extended_vocab
self.data = [] # Will hold all data
# Process the data by removing new lines and splitting the words
for i in range(len(unprocessed_data)):
inputs = unprocessed_data[i][0]
outputs = unprocessed_data[i][1]
inputsL = inputs.replace('\n', '').split(' ')
outputsL = outputs.replace('\n', '').split(' ')
self.data.append([inputsL, outputsL])
self.lang = Language(vocab_limit, self.data)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
"""
:arg
idx: int
:returns
input_token_list: list[int]
output_token_list: list[int]
token_mapping: binary array"""
data_pair = self.data[idx]
# Add in the start and end of sentence and chop at the max length
input_token_list = (['<SOS>'] + data_pair[0] + ['<EOS>'])[:self.maxlen]
output_token_list = (['<SOS>'] + data_pair[1] + ['<EOS>'])[:self.maxlen]
# Turn the words to tokens
input_seq = tokens_to_seq(input_token_list, self.lang.tok_to_idx, self.maxlen, self.use_extended_vocab)
output_seq = tokens_to_seq(output_token_list, self.lang.tok_to_idx, self.maxlen, self.use_extended_vocab, input_tokens=input_token_list)
return input_seq, output_seq, ' '.join(input_token_list), ' '.join(output_token_list)
def generateKFoldDatasets(data_name,
seed,
maxlen=200,
lang=None,
vocab_limit=None,
use_extended_vocab=True,
k=5):
with open('./data/' + data_name + '_src.txt', "r") as sf:
src_lines = sf.readlines()
with open('./data/' + data_name + '_tar.txt', "r") as tf:
tgt_lines = tf.readlines()
if not len(src_lines) == len(tgt_lines):
sys.exit("ERROR: Data files have inconsistent lengths. Make sure your labels are aligned correctly.")
# Shuffle the dataset before partitioning
src_lines, tgt_lines, order = shuffle_correlated_lists(src_lines, tgt_lines, seed=seed)
data = [(src_lines[i], tgt_lines[i]) for i in range(len(src_lines))]
# Uncomment to get logs on the order of the data points
#f = open("./logs/log_ordering" + currentDT.strftime("%Y%m%d%H%M%S") + ".txt", "w")
#for i in order:
# f.write("{}\n".format(i))
#f.close()
# Divide the data into k chunks
chunked = chunks(data, k)
folds = []
for _ in range(k):
folds.append(next(chunked))
# Build the k training and testing datasets
datasets = []
for i in range(k):
# Build out data for the datasets
train_data = []
test_data = []
# For each fold
for j in range(len(folds)):
if i == j: # Pick one fold for testing data
test_data += folds[j]
else: # Add other folds to training data
train_data += folds[i]
# Make the testing and training dataset objects
training_dataset = OneFoldSequencePairDataset(train_data, maxlen, vocab_limit, use_extended_vocab)
test_dataset = OneFoldSequencePairDataset(test_data, maxlen, vocab_limit, use_extended_vocab)
datasets.append((training_dataset, test_dataset))
return datasets
|
14,237 | 2e978f80ce15fc6b14941c4672056b41ad9cbe40 | # AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['SCOPES', 'KEY_FILE_LOCATION', 'ga_to_df']
# Cell
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
import os
from dotenv import load_dotenv
import pandas as pd
load_dotenv()
SCOPES = ['https://www.googleapis.com/auth/analytics.readonly']
KEY_FILE_LOCATION = os.getenv('GOOGLE_APPLICATION_CREDENTIALS')
if KEY_FILE_LOCATION:
credentials = ServiceAccountCredentials.from_json_keyfile_name(KEY_FILE_LOCATION, SCOPES)
else:
credentials = None
# Cell
def ga_to_df(start_date: str, end_date: str, metrics: list, dimensions: list, filters: list = None, view_id = None) -> pd.DataFrame:
if not view_id:
view_id = os.getenv('GOOGLE_ANALYTICS_VIEW_ID')
qry = {
'dateRanges': [{'startDate': start_date, 'endDate': end_date}],
'metrics': [{'expression': m} for m in metrics],
'dimensions': [{'name': m} for m in dimensions],
}
if filters:
qry['dimensionFilterClauses'] = [
{'operator': f[0], 'filters': [{'dimensionName': f[1][0], 'operator': f[1][1], 'expressions': f[1][2]}]}
for f in filters
]
qry['viewId'] = view_id
if credentials:
analytics = build('analyticsreporting', 'v4', credentials=credentials)
else:
analytics = build('analyticsreporting', 'v4')
request_list = [qry]
response = analytics.reports().batchGet(body={'reportRequests': request_list}).execute()
data = []
for report in response.get('reports'):
dimensions = report.get('columnHeader').get('dimensions')
metrics = [x['name'] for x in report.get('columnHeader').get('metricHeader').get('metricHeaderEntries')]
for row in report.get('data').get('rows'):
dimensions_data = row.get('dimensions', [])
metrics_data = [x['values'] for x in row.get('metrics', [])][0]
metrics_data = [float(x) for x in metrics_data]
colnames = dimensions + metrics
data_row = dimensions_data + metrics_data
data.append(data_row)
df = pd.DataFrame(data, columns=colnames)
return df
|
14,238 | 5b24417c1beee1622c368c393b6e64902090326b | #!/usr/bin/env python
"""
Renames image files to label + file creation date + count
Creation date is read if EXIF data is available
Otherwise get_exif_date returns nullstring
Example 1: img_140015.jpg -> label_2021_01_02_1_.jpg
Example 2: scann_15.jpg -> label.jpg
"""
import os
from tkinter import Tk, Label, Entry, Button, filedialog, scrolledtext, INSERT
from pathlib import Path
import PIL.Image
def get_exif_date(user_img: str) -> str:
"""
"""
path = Path(LBL3.cget("text"))
image = PIL.Image.open(path/user_img)
exif_data = image._getexif()
try:
ldate = exif_data[306][:10]
ldate = ldate.replace(':', '_')
return ldate
except TypeError as TE:
print(TE)
ldate = ""
return ldate
def get_directory() -> None:
"""
"""
user_dir = filedialog.askdirectory()
LBL3.configure(text=user_dir)
return user_dir
def main() -> None:
"""
"""
path = Path(LBL3.cget("text"))
count = 0
label = LABEL_TXT1.get()
files = [f for f in os.listdir(LBL3.cget("text")) \
if os.path.isfile(os.path.join(LBL3.cget("text"), f))]
for img in files:
date = get_exif_date(img)
count += 1
ext = Path(img).suffix
new_title = f"{label}_{date}_{count}{ext}"
os.rename(path/img, path/new_title)
#update GUI
SCROLL_TXT.insert(INSERT, f"\n{img}\n renamed to: \n{new_title} \n")
return
#############################
bg = "black"
fg = "white"
WINDOW = Tk()
WINDOW.title("EXIF 2 TITLE")
WINDOW.configure(background=bg)
WINDOW.geometry('400x400')
LBL_HEAD_TEXT = Label(WINDOW,
text="Label",
font=("Verdana", 12),
background=bg,
foreground=fg)
LBL_HEAD_TEXT.grid(column=0, row=1)
ENT_LABEL_ENTRY = Entry(WINDOW,
width=10,
font=("Verdana", 11))
ENT_LABEL_ENTRY.grid(column=0, row=2)
BTN_BROWSE= Button(WINDOW,
text="Browse Folder",
command=get_directory,
background=bg,
foreground=fg,
font=("Verdana", 11))
BTN_BROWSE.grid(column=0, row=5)
LBL_SELECTED_FOLDER = Label(WINDOW,
text="Selected folder:",
font=("Verdana", 12),
background=bg,
foreground=fg)
LBL_SELECTED_FOLDER.grid(column=0, row=6)
LBL3 = Label(WINDOW,
text="...",
font=("Verdana", 12),
background=bg,
foreground=fg)
LBL3.grid(column=0, row=7)
BTN_rename = Button(WINDOW,
text="Rename Files!",
command=main,
background=bg,
foreground=fg,
font=("Verdana", 11))
BTN_rename.grid(column=0, row=8)
SCROLL_TXT = scrolledtext.ScrolledText(WINDOW,
width=35,
height=8,
font=("Verdana", 11))
SCROLL_TXT.configure(background=bg, foreground=fg)
SCROLL_TXT.grid(column=0, row=9)
WINDOW.mainloop()
|
14,239 | 5ef45c059e63d1b5f98aaa05a06267a9f373b287 | #coding=utf-8
'''
4. 编写程序,完成“名片管理器”项目
需要完成的基本功能:
添加名片
删除名片
修改名片
查询名片
退出系统
程序运行后,除非选择退出系统,否则重复执行功能
'''
#初始化一个名片存储
cards = {}
i = 0
while i == 0:
print("=" * 10 + "欢迎使用名片管理器" + "=" * 10)
print("1.添加名片")
print("2.删除名片")
print("3.修改名片")
print("4.查询名片")
print("5.退出系统")
j = input("请选择使用功能系统的编号")
j = int(j)
#退出系统
if j == 5:
break
#新增名片
if j == 1:
addContinue = "1"
while addContinue == "1":
print("=" * 20)
info = {"name": {}, "phonenumber": {}, "wechat": {}}
if 1 not in cards.keys():
idKey = 1 #编号
info["name"] = input("添加第%d个名片的姓名"%idKey)
info["phonenumber"] = input("添加第%d个名片的电话号"%idKey)
info["wechat"] = input("添加第%d个名片的微信号"%idKey)
cards[idKey] = info #添加进名片中
print("新增名片编号%d:姓名:%s||电话号:%s||微信号:%s"%(idKey,info["name"],info["phonenumber"],info["wechat"]))
addContinue = input("是否继续添加名片[1.填加/0.返回主菜单]")
if addContinue == "0":
break
else:
idKey += 1
info["name"] = input("添加第%d个名片的姓名" % idKey)
info["phonenumber"] = input("添加第%d个名片的电话号" % idKey)
info["wechat"] = input("添加第%d个名片的微信号" % idKey)
cards[idKey] = info # 添加进名片中
print("新增名片编号%d:姓名:%s||电话号:%s||微信号:%s" %(idKey, info["name"], info["phonenumber"], info["wechat"]))
addContinue = input("是否继续添加名片[1.填加/0.返回主菜单]")
if addContinue == "0":
break
continue
#删除名片,先实现最基本的功能,是否存在和删除哪一项。小逻辑bug是编号1的名片不可以删除
if j == 2:
delContinue = "1"
while delContinue == "1":
print("=" * 20)
if 1 not in cards.keys():
print("没有名片可以删除!请返回添加名片")
break
else:
print("名片库中有以下名片:")
for id,item in cards.items():
print("名片编号%d:姓名:%s||电话号:%s||微信号:%s" %(id, item["name"], item["phonenumber"], item["wechat"]))
delId = input("请输入要删除的编号:")
delId = int(delId)
del(cards[delId]) #删除名片
idKey -= 1
delContinue = input("是否继续删除?[1.继续删除/0.返回主菜单]")
if delContinue == "0":
break
continue
#修改名片
if j == 3:
updateContinue = "1"
while updateContinue == "1":
print("=" * 20)
info = {"name": {}, "phonenumber": {}, "wechat": {}}
if 1 not in cards.keys():
print("没有名片可以修改!请返回添加名片")
break
else:
print("名片库中有以下名片:")
for id,item in cards.items():
print("名片编号%d:姓名:%s||电话号:%s||微信号:%s" %(id, item["name"], item["phonenumber"], item["wechat"]))
updateId = input("请输入要修改的编号:")
updateId = int(updateId)
info["name"] = input("修改编号%d名片的姓名" % updateId)
info["phonenumber"] = input("修改编号%d名片的电话号" % updateId)
info["wechat"] = input("修改编号%d名片的微信号" % updateId)
cards[updateId] = info
updateContinue = input("是否继续修改?[1.继续修改/0.返回主菜单]")
if updateContinue == "0":
break
continue
#查询名片
if j == 4:
if 1 not in cards.keys():
print("没有名片可以修改!请返回添加名片")
else:
print("名片库中有以下名片:")
for id, item in cards.items():
print("名片编号%d:姓名:%s||电话号:%s||微信号:%s" % (id, item["name"], item["phonenumber"], item["wechat"]))
continue
print("系统已关闭!")
|
14,240 | e4a23ff1d6b8177c6d8abf2b8b9ce510efb8abd7 | import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._yref import YrefValidator
from ._yanchor import YanchorValidator
from ._y import YValidator
from ._xref import XrefValidator
from ._xanchor import XanchorValidator
from ._x import XValidator
from ._visible import VisibleValidator
from ._templateitemname import TemplateitemnameValidator
from ._source import SourceValidator
from ._sizing import SizingValidator
from ._sizey import SizeyValidator
from ._sizex import SizexValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._layer import LayerValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._yref.YrefValidator",
"._yanchor.YanchorValidator",
"._y.YValidator",
"._xref.XrefValidator",
"._xanchor.XanchorValidator",
"._x.XValidator",
"._visible.VisibleValidator",
"._templateitemname.TemplateitemnameValidator",
"._source.SourceValidator",
"._sizing.SizingValidator",
"._sizey.SizeyValidator",
"._sizex.SizexValidator",
"._opacity.OpacityValidator",
"._name.NameValidator",
"._layer.LayerValidator",
],
)
|
14,241 | a44f3b2249c59be3af2bbe6df49038a7ce823188 | #!/usr/bin/env python3
import rospy
from std_msgs.msg import Float32
n = 0
def cb(message):
global n
n = message.data*2
if n > 100:
n = message.data*3
if n > 300:
n = message.data/5
rospy.init_node('twice')
sub = rospy.Subscriber('count_up', Float32, cb)
pub = rospy.Publisher('twice', Float32, queue_size=1)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
pub.publish(n)
rate.sleep()
|
14,242 | bddf7a70eda5f1771bbbd93775141c5e73d23409 | from flask import Blueprint
from scsr.api import ScsrAPI
class ScsrView:
scsr_app = None
def __init__(self):
self.scsr_app = Blueprint('scsr_app', __name__)
self.games_view = ScsrAPI.as_view('games')
self.scsr_app.add_url_rule('/games/', defaults={'game_id':None},
view_func = self.games_view, methods=['GET',])
self.scsr_app.add_url_rule('/games/', view_func=self.games_view, methods=['POST',])
self.scsr_app.add_url_rule('/games/<int:game_id>', view_func=self.games_view,
methods=['GET', 'PUT', 'DELETE',])
def registerBlueprints(self,APP):
APP.register_blueprint(self.scsr_app) |
14,243 | b72f8c6544bf9f2b27b76dc7bcb1282b5d567472 | from element_operate.UnionLotto.UnionLotto_choosenumber import UnionLottoChooseNumber_lelun
from element_operate.UnionLotto.confirm_lottery import ConfirmLottery_lelun
from element_operate.baofoo_payment import BaofooPayment_lelun
from element_operate.choose_bank import ChooseBank_lelun
from element_operate.confirm_pay import ConfirmPay_lelun
from element_operate.homepage import HomePage_lelun
from element_operate.less_pay_sucess import LessPaySucess_lelun
from element_operate.login import Login_lelun
from element_operate.my_ticket import MyTicket_lelun
from element_operate.payment_mode import PaymetMode_lelun
from test_case.Base.mytest import MyTest
from element_operate.UnionLotto.submit_order_sucess import SubmitOrderSuccess_lelun
from time import sleep
from element_operate.UnionLotto.order_details import OrderDetails_lelun
from selenium.webdriver.common.action_chains import ActionChains
class LessPayment_case(MyTest):
"""=单线场景"""
def test_less_payment_shortfall_case(self):
'''验证差额支付流程'''
###点击进入双色球选号页面###
hp = HomePage_lelun(self.driver)
hp.open()
l=Login_lelun(self.driver)
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
hp.Moveable_float_close()
hp.UnionLotto_link()#点击双色球链接
ulcn = UnionLottoChooseNumber_lelun(self.driver)
ulcn.u_red_label6() # 任意选择6个红球
ulcn.u_bule_label1() # 任意选择1个蓝球
ulcn.Confirm_button() # 点击确认选号按钮
cl = ConfirmLottery_lelun(self.driver)
cl.submit_order_to_station_owner_button() # 点击“提交订单给站主”按钮
l.login_nomoney_lelun()#点击登录
cl.submit_order_to_station_owner_button() # 点击“提交订单给站主”按钮
cl.confirm_and_pay_button() # 点击“确认并支付”按钮
pm = PaymetMode_lelun(self.driver)
mur = pm.Top_up() ##获取充值成功文本
self.assertEqual('充值', mur)
def test_recharge_case(self):
"""验证充值流程"""
hp = HomePage_lelun(self.driver)
hb=MyTicket_lelun(self.driver)
hc=PaymetMode_lelun(self.driver)
hd=BaofooPayment_lelun(self.driver)
he=ChooseBank_lelun(self.driver)
hf=ConfirmPay_lelun(self.driver)
hl = Login_lelun(self.driver)
hp.open()
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
hp.Moveable_float_close()
hp.My_lottery_ticket()##点击我的彩票
hl.login_lelun()##登录
hb.Recharge()#点击充值
hb.Recharge_Money()##点击充值数字按钮
hb.Next()##点击下一步
mur = hc.Top_up() ##获取充值成功文本
self.assertEqual('充值', mur)
def test_recharge_input_case(self):
"""验证手动输入金额充值流程"""
hp = HomePage_lelun(self.driver)
hb = MyTicket_lelun(self.driver)
hc = PaymetMode_lelun(self.driver)
hd = BaofooPayment_lelun(self.driver)
he = ChooseBank_lelun(self.driver)
hf = ConfirmPay_lelun(self.driver)
hl = Login_lelun(self.driver)
hp.open()
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
hp.Moveable_float_close()
hp.My_lottery_ticket() ##点击我的彩票
hl.login_lelun() ##登录
hb.Recharge() # 点击充值
hb.Recharge_input(888)###输入888
hb.Next() ##点击下一步
mur = hc.Top_up() ##获取充值成功文本
self.assertEqual('充值', mur)
def test_Login_case(self):
"""验证登录流程"""
hp = HomePage_lelun(self.driver)
hb = MyTicket_lelun(self.driver)
hl = Login_lelun(self.driver)
hp.open()
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
hp.Moveable_float_close()
hp.My_lottery_ticket() ##点击我的彩票
hl.login_lelun() ##登录
mur = hb.Account()
self.assertEqual("17602882784", mur)
def test_after_shortfall_case(self):
'''验证差额支付,复式号码流程'''
###点击进入双色球选号页面###
ha = HomePage_lelun(self.driver)
hl = Login_lelun(self.driver)
hb = UnionLottoChooseNumber_lelun(self.driver)
hc= ConfirmLottery_lelun(self.driver)
hd = PaymetMode_lelun(self.driver)
he = BaofooPayment_lelun(self.driver)
hf = ChooseBank_lelun(self.driver)
hf1 = ConfirmPay_lelun(self.driver)
hf2= LessPaySucess_lelun(self.driver)
ha.open()
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
ha.Moveable_float_close()
ha.UnionLotto_link()#点击双色球链接
hb.u_red_label6() # 任意选择6个红球
hb.u_bule_label1_too() # 任意选择2个蓝球
hb.Confirm_button() # 点击确认选号按钮
hc.submit_order_to_station_owner_button() # 点击“提交订单给站主”按钮
hl.login_nomoney_lelun()#点击登录
hc.submit_order_to_station_owner_button() # 点击“提交订单给站主”按钮
hc.confirm_and_pay_button() # 点击“确认并支付”按钮
mur=hd.Top_up()##获取充值成功文本
self.assertEqual('充值',mur)
"""def test_one_note_pause_one_shortfall_case(self):
'''手选一注,确认页机选1注,差额支付流程'''
###点击进入双色球选号页面###
ha = HomePage_lelun(self.driver)
hl = Login_lelun(self.driver)
hb = UnionLottoChooseNumber_lelun(self.driver)
hc= ConfirmLottery_lelun(self.driver)
hd = PaymetMode_lelun(self.driver)
he = BaofooPayment_lelun(self.driver)
hf = ChooseBank_lelun(self.driver)
hf1 = ConfirmPay_lelun(self.driver)
hf2= LessPaySucess_lelun(self.driver)
ha.open()
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
ha.Moveable_float_close()
ha.UnionLotto_link()#点击双色球链接
hb.u_red_label6() # 任意选择6个红球
hb.u_bule_label1() # 任意选择1个蓝球
hb.Confirm_button() # 点击确认选号按钮
hc.machine_choose_one_button()#点击机选一注
hc.submit_order_to_station_owner_button() # 点击“提交订单给站主”按钮
hl.login_nomoney_lelun()#点击登录
hc.submit_order_to_station_owner_button() # 点击“提交订单给站主”按钮
hc.confirm_and_pay_button() # 点击“确认并支付”按钮
mur = hd.Top_up() ##获取充值成功文本
self.assertEqual('充值', mur)"""
def test_one_note_five_double_ten_period_shortfall_case(self):
'''手选一注,1注单式号码,修改倍数5,修改期数10,差额支付流程'''
###点击进入双色球选号页面###
ha = HomePage_lelun(self.driver)
hl = Login_lelun(self.driver)
hb = UnionLottoChooseNumber_lelun(self.driver)
hc= ConfirmLottery_lelun(self.driver)
hd = PaymetMode_lelun(self.driver)
he = BaofooPayment_lelun(self.driver)
hf = ChooseBank_lelun(self.driver)
hf1 = ConfirmPay_lelun(self.driver)
hf2= LessPaySucess_lelun(self.driver)
ha.open()
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
ha.Moveable_float_close()
ha.UnionLotto_link()#点击双色球链接
hb.u_red_label6() # 任意选择6个红球
hb.u_bule_label1() # 任意选择1个蓝球
hb.Confirm_button() # 点击确认选号按钮
hc.chase_ticket_input("10")#输入追10期
hc.throw_times_input("5")#输入投注倍数为5倍
hc.submit_order_to_station_owner_button() # 点击“提交订单给站主”按钮
hl.login_nomoney_lelun()#点击登录
hc.submit_order_to_station_owner_button() # 点击“提交订单给站主”按钮
hc.confirm_and_pay_button() # 点击“确认并支付”按钮
mur = hd.Top_up() ##获取充值成功文本
self.assertEqual('充值', mur)
def test_after_Continue_buy(self):
"""打开追号订单详情,点击继续购买该方案"""
ha = HomePage_lelun(self.driver)
hl = Login_lelun(self.driver)
hb = UnionLottoChooseNumber_lelun(self.driver)
hc = ConfirmLottery_lelun(self.driver)
hd=MyTicket_lelun(self.driver)
he = OrderDetails_lelun(self.driver)
hf = SubmitOrderSuccess_lelun(self.driver)
ha.open()
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
ha.Moveable_float_close()
ha.My_lottery_ticket() # 点击我的彩票
hl.login_lelun() # 点击登录
hd.After_nu()##点击追号订单
hd.After_nu_record()###点击追号记录的订单
he.Scheme()#点击继续购买该方案
mur = hc.confirm_num_page_text()
self.assertEqual('提交订单给站主', mur)
hc.submit_order_to_station_owner_button() # 点击提交订单给站主
hc.confirm_and_pay_button() # 点击确认支付
text1 = hf.submit_order_success() # 获取提交订单成功文本
self.assertEqual('订单提交成功', text1)
def test_unionlotto_dantuo_continue_case(self):
"""胆拖选号页面,选择1注号码,在投注确认页面,继续选1注"""
ha = HomePage_lelun(self.driver)
hl = Login_lelun(self.driver)
hb = UnionLottoChooseNumber_lelun(self.driver)
hc = ConfirmLottery_lelun(self.driver)
hd = PaymetMode_lelun(self.driver)
he = BaofooPayment_lelun(self.driver)
hf = ChooseBank_lelun(self.driver)
hf1 = ConfirmPay_lelun(self.driver)
hf2 = LessPaySucess_lelun(self.driver)
ha.open()
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
ha.Moveable_float_close()
ha.UnionLotto_link() # 点击双色球链接
hb.DanTuo_mode() # 选择胆拖模式
hb.u_red_five_two() # 红球选取5个胆码2个拖码
hb.u_bule_one() # 选取一个蓝球
hb.Confirm_button() # 确认选号
hc.continue_choose_num_button() ##继续选号
hb.u_red_five_two() # 红球选取5个胆码2个拖码
hb.u_bule_one() # 选取一个蓝球
hb.Confirm_button() # 确认选号
hc.submit_order_to_station_owner_button() # 点击“提交订单给站主”按钮
hl.login_nomoney_lelun() # 点击登录
hc.submit_order_to_station_owner_button() # 点击“提交订单给站主”按钮
hc.confirm_and_pay_button() # 点击“确认并支付”按钮
mur = hd.Top_up() ##获取充值成功文本
self.assertEqual('充值', mur)
def test_Lottery_information_buy_case(self):
"""进入开奖公告,点击投注双色球,进入双色球投注页面,选1注号码"""
ha = HomePage_lelun(self.driver)
hl = Login_lelun(self.driver)
hb = UnionLottoChooseNumber_lelun(self.driver)
hc = ConfirmLottery_lelun(self.driver)
hd = PaymetMode_lelun(self.driver)
he = BaofooPayment_lelun(self.driver)
hf = ChooseBank_lelun(self.driver)
hf1 = SubmitOrderSuccess_lelun(self.driver)
hf2 = LessPaySucess_lelun(self.driver)
ha.open()
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
ha.Moveable_float_close()
ha.Lottery_information()##点击开奖信息
hb.Lottery_information_unionlotto()##点击开奖信息中的双色球
hc.Unionlotto_History_buy()###点击双色球最近的开奖信息
hc.Buy_unionlotto()##点击投注双色球
hb.u_red_label6() # 任意选择6个红球
hb.u_bule_label1() # 任意选择1个蓝球
hb.Confirm_button() # 点击确认选号按钮
hc.submit_order_to_station_owner_button() # 点击提交给站主
hl.login_lelun() # 输入账号,密码
hc.submit_order_to_station_owner_button() # 点击提交给站主
hc.confirm_and_pay_button() # 点击确认支付
text1 = hf1.submit_order_success() # 获取提交订单成功文本
self.assertEqual('订单提交成功', text1)
'''def test_Lottery_information_buy_pause_one_case(self):
"""进入开奖公告,打开双色球开奖详情,点击投注双色球,进入双色球投注页面,选1注号码,在投注确认页,机选1注"""
ha = HomePage_lelun(self.driver)
hl = Login_lelun(self.driver)
hb = UnionLottoChooseNumber_lelun(self.driver)
hc = ConfirmLottery_lelun(self.driver)
hd = PaymetMode_lelun(self.driver)
he = BaofooPayment_lelun(self.driver)
hf = ChooseBank_lelun(self.driver)
hf1 = SubmitOrderSuccess_lelun(self.driver)
hf2 = LessPaySucess_lelun(self.driver)
ha.open()
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
ha.Moveable_float_close()
ha.Lottery_information()##点击开奖信息
hb.Lottery_information_unionlotto()##点击开奖信息中的双色球
hc.Unionlotto_History_buy()###点击双色球最近的开奖信息
hc.Buy_unionlotto()##点击投注双色球
hb.u_red_label6() # 任意选择6个红球
hb.u_bule_label1() # 任意选择1个蓝球
hb.Confirm_button() # 点击确认选号按钮
hc.machine_choose_one_button()##点击机选1注
lottery_number_text = hc.lottery_number_text() # 获取投注注数文本
self.assertIn("2", lottery_number_text) # 检查投注注数为1注
hc.submit_order_to_station_owner_button() # 点击提交给站主
hl.login_lelun() # 输入账号,密码
hc.submit_order_to_station_owner_button() # 点击提交给站主
hc.confirm_and_pay_button() # 点击确认支付
text1 = hf1.submit_order_success() # 获取提交订单成功文本
self.assertEqual('订单提交成功', text1)'''
def test_Lottery_information_buy_pause_five_case(self):
"""进入开奖公告,打开双色球开奖详情,点击投注双色球,进入双色球投注页面,选1注号码,在投注确认页,机选5注,修改倍数,期数"""
ha = HomePage_lelun(self.driver)
hl = Login_lelun(self.driver)
hb = UnionLottoChooseNumber_lelun(self.driver)
hc = ConfirmLottery_lelun(self.driver)
hd = PaymetMode_lelun(self.driver)
he = BaofooPayment_lelun(self.driver)
hf = ChooseBank_lelun(self.driver)
hf1 = SubmitOrderSuccess_lelun(self.driver)
hf2 = LessPaySucess_lelun(self.driver)
ha.open()
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
ha.Moveable_float_close()
ha.Lottery_information()##点击开奖信息
hb.Lottery_information_unionlotto()##点击开奖信息中的双色球
hc.Unionlotto_History_buy()###点击双色球最近的开奖信息
hc.Buy_unionlotto()##点击投注双色球
hb.u_red_label6() # 任意选择6个红球
hb.u_bule_label1() # 任意选择1个蓝球
hb.Confirm_button() # 点击确认选号按钮
hc.machine_choose_five_button()##点击机选5注
hc.chase_ticket_input("4") # 输入追4期
hc.throw_times_input("3")#输入投注倍数为3倍
lottery_number_text = hc.lottery_number_text() # 获取投注注数文本
self.assertIn("6", lottery_number_text) # 检查投注注数为1注
chase_time_text = hc.chase_time_text() # 获取追号期数
self.assertIn("4", chase_time_text) # 检查追号期数为4期
throw_times = hc.throw_time_text() # 获取投注倍数定位
self.assertIn("3", throw_times) # 检查投注倍数为3倍
hc.submit_order_to_station_owner_button() # 点击提交给站主
hl.login_lelun() # 输入账号,密码
hc.submit_order_to_station_owner_button() # 点击提交给站主
hc.confirm_and_pay_button() # 点击确认支付
text1 = hf1.submit_order_success() # 获取提交订单成功文本
self.assertEqual('订单提交成功', text1)
'''def test_aaa(self):
"""幸运选号"""
ha = HomePage_lelun(self.driver)
hl = Login_lelun(self.driver)
hb = UnionLottoChooseNumber_lelun(self.driver)
hc = ConfirmLottery_lelun(self.driver)
hd = PaymetMode_lelun(self.driver)
he = BaofooPayment_lelun(self.driver)
hf = ChooseBank_lelun(self.driver)
hf1 = SubmitOrderSuccess_lelun(self.driver)
hf2 = LessPaySucess_lelun(self.driver)
action = ActionChains(self.driver)
ha.open()
######判断是否出现浮层弹框,如果出现浮层点击X,然后执行下一步操作
ha.Moveable_float_close()
ha.Activity_zone()###点击幸运选号
hb.Lucky_number()##点击幸运选号
hc.Choose_color()#点击选择彩种
hc.Lucky_unionlotto(action)
hc.Complete()##点击完成
hc.Choose_note()#点击选择注数
sleep(5)
hc.Complete()##点击完成'''
|
14,244 | fa63590e543a5c53c28090a8f8d6cc9d165bee7d | import glob
import logging
import os
import shutil
import subprocess
import time
import pytest
import yaml
from cekit.descriptor import Image
from cekit.errors import CekitError
from cekit.generator.docker import DockerGenerator
from cekit.tools import Map
from tests.utils import merge_dicts
try:
from unittest.mock import call
except ImportError:
from mock import call
from cekit.builders.docker_builder import DockerBuilder
from cekit.config import Config
config = Config()
@pytest.fixture(autouse=True)
def reset_config():
config.cfg["common"] = {}
def merge_container_yaml(dist_git_dir, src, dest):
# FIXME - this is temporary needs to be refactored to proper merging
with open(src, "r") as _file:
generated = yaml.safe_load(_file)
target = {}
if os.path.exists(dest):
with open(dest, "r") as _file:
target = yaml.safe_load(_file)
target.update(generated)
if glob.glob(os.path.join(dist_git_dir, "repos", "*.repo")):
if "platforms" in target:
target["platforms"]["only"] = ["x86_64"]
else:
target["platforms"] = {"only": ["x86_64"]}
with open(dest, "w") as _file:
yaml.dump(target, _file, default_flow_style=False)
def test_osbs_builder_defaults(mocker):
mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "osbs", {})
assert builder._fedpkg == "fedpkg"
assert builder._koji == "koji"
assert builder._koji_url == "https://koji.fedoraproject.org/koji"
def test_osbs_builder_redhat(mocker):
config.cfg["common"] = {"redhat": True}
mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "osbs", {})
assert builder._fedpkg == "rhpkg"
assert builder._koji == "brew"
assert builder._koji_url == "https://brewweb.engineering.redhat.com/brew"
def test_osbs_builder_use_rhpkg_stage(mocker):
config.cfg["common"] = {"redhat": True}
mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "osbs", {"stage": True})
assert builder._fedpkg == "rhpkg-stage"
assert builder._koji == "brew-stage"
assert builder._koji_url == "https://brewweb.stage.engineering.redhat.com/brew"
def test_osbs_builder_custom_commit_msg(mocker):
mocker.patch.object(subprocess, "run")
builder = create_builder_object(
mocker, "osbs", {"stage": True, "commit_message": "foo"}
)
assert builder.params.commit_message == "foo"
def test_osbs_builder_nowait(mocker):
mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "osbs", {"nowait": True})
assert builder.params.nowait is True
def test_osbs_builder_user(mocker):
mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "osbs", {"user": "UserFoo"})
assert builder.params.user == "UserFoo"
def test_merge_container_yaml_no_limit_arch(mocker, tmpdir):
mocker.patch.object(glob, "glob", return_value=False)
mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "osbs", {})
builder.dist_git_dir = str(tmpdir.mkdir("target"))
container_yaml_f = "container.yaml"
source = "souce_cont.yaml"
with open(source, "w") as file_:
yaml.dump({"tags": ["foo"]}, file_)
merge_container_yaml(builder.dist_git_dir, source, container_yaml_f)
with open(container_yaml_f, "r") as file_:
container_yaml = yaml.safe_load(file_)
os.remove(container_yaml_f)
os.remove(source)
assert "paltforms" not in container_yaml
def test_merge_container_yaml_limit_arch(mocker, tmpdir):
mocker.patch.object(glob, "glob", return_value=True)
mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "osbs", {})
builder.dist_git_dir = str(tmpdir.mkdir("target"))
container_yaml_f = "container.yaml"
source = "souce_cont.yaml"
with open(source, "w") as file_:
yaml.dump({"tags": ["foo"]}, file_)
merge_container_yaml(builder.dist_git_dir, source, container_yaml_f)
with open(container_yaml_f, "r") as file_:
container_yaml = yaml.safe_load(file_)
os.remove(container_yaml_f)
os.remove(source)
assert "x86_64" in container_yaml["platforms"]["only"]
assert len(container_yaml["platforms"]["only"]) == 1
class GitMock(object):
def add(self, artifacts):
pass
def stage_modified(self):
pass
@staticmethod
def repo_info(path):
pass
def prepare(self, stage, user=None):
pass
def clean(self, artifacts):
pass
def create_builder_object(
mocker, builder, params, common_params={"target": "something"}
):
# TODO: Remove mutable default argument
if "docker" == builder:
from cekit.builders.docker_builder import DockerBuilder as BuilderImpl
elif "osbs" == builder:
from cekit.builders.osbs import OSBSBuilder as BuilderImpl
elif "podman" == builder:
from cekit.builders.podman import PodmanBuilder as BuilderImpl
elif "buildah" == builder:
from cekit.builders.buildah import BuildahBuilder as BuilderImpl
else:
raise Exception("Builder engine %s is not supported" % builder)
mocker.patch("cekit.tools.decision")
builder = BuilderImpl(Map(merge_dicts(common_params, params)))
builder.dist_git_dir = "/tmp"
builder.git = GitMock()
builder.artifacts = []
return builder
def test_osbs_builder_run_brew_stage(mocker):
config.cfg["common"] = {"redhat": True}
params = {"stage": True}
run = mocker.patch.object(
subprocess,
"run",
autospec=True,
side_effect=[
subprocess.CompletedProcess(
"", 0, "ssh://user:password@something.redhat.com/containers/openjdk"
),
subprocess.CompletedProcess(
"", 0, "c5a0731b558c8a247dd7f85b5f54462cd5b68b23"
),
subprocess.CompletedProcess("", 0, "12345"),
],
)
builder = create_builder_object(mocker, "osbs", params)
builder.generator = Map({"image": Map({})})
mocker.patch.object(builder, "_wait_for_osbs_task")
builder.git.branch = "some-branch"
builder.run()
run.assert_has_calls(
[
call(
["git", "config", "--get", "remote.origin.url"],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
),
call(
["git", "rev-parse", "HEAD"],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
),
call(
[
"brew-stage",
"call",
"--python",
"buildContainer",
"--kwargs",
"{'src': 'git://something.redhat.com/containers/openjdk#c5a0731b558c8a247dd7f85b5f54462cd5b68b23', 'target': 'some-branch-containers-candidate', 'opts': {'scratch': True, 'git_branch': 'some-branch', 'yum_repourls': []}}",
],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
),
]
)
builder._wait_for_osbs_task.assert_called_once_with("12345")
def test_osbs_builder_run_brew(mocker):
config.cfg["common"] = {"redhat": True}
run = mocker.patch.object(
subprocess,
"run",
autospec=True,
side_effect=[
subprocess.CompletedProcess(
"", 0, "ssh://user:password@something.redhat.com/containers/openjdk"
),
subprocess.CompletedProcess(
"", 0, "c5a0731b558c8a247dd7f85b5f54462cd5b68b23"
),
subprocess.CompletedProcess("", 0, "12345"),
],
)
builder = create_builder_object(mocker, "osbs", {})
builder.generator = Map({"image": Map({})})
mocker.patch.object(builder, "_wait_for_osbs_task")
builder.git.branch = "some-branch"
builder.run()
run.assert_has_calls(
[
call(
["git", "config", "--get", "remote.origin.url"],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
),
call(
["git", "rev-parse", "HEAD"],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
),
call(
[
"brew",
"call",
"--python",
"buildContainer",
"--kwargs",
"{'src': 'git://something.redhat.com/containers/openjdk#c5a0731b558c8a247dd7f85b5f54462cd5b68b23', 'target': 'some-branch-containers-candidate', 'opts': {'scratch': True, 'git_branch': 'some-branch', 'yum_repourls': []}}",
],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
),
]
)
builder._wait_for_osbs_task.assert_called_once_with("12345")
def test_osbs_builder_run_koji(mocker):
run = mocker.patch.object(
subprocess,
"run",
autospec=True,
side_effect=[
subprocess.CompletedProcess(
"", 0, "ssh://user:password@something.redhat.com/containers/openjdk"
),
subprocess.CompletedProcess(
"", 0, "c5a0731b558c8a247dd7f85b5f54462cd5b68b23"
),
subprocess.CompletedProcess("", 0, "12345"),
],
)
builder = create_builder_object(
mocker, "osbs", {}, {"redhat": False, "target": "something"}
)
builder.generator = Map({"image": Map({})})
mocker.patch.object(builder, "_wait_for_osbs_task")
builder.git.branch = "some-branch"
builder.run()
run.assert_has_calls(
[
call(
["git", "config", "--get", "remote.origin.url"],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
),
call(
["git", "rev-parse", "HEAD"],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
),
call(
[
"koji",
"call",
"--python",
"buildContainer",
"--kwargs",
"{'src': 'git://something.redhat.com/containers/openjdk#c5a0731b558c8a247dd7f85b5f54462cd5b68b23', 'target': 'some-branch-containers-candidate', 'opts': {'scratch': True, 'git_branch': 'some-branch', 'yum_repourls': []}}",
],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
),
]
)
builder._wait_for_osbs_task.assert_called_once_with("12345")
def test_osbs_builder_run_brew_nowait(mocker):
params = {"nowait": True}
mocker.patch.object(
subprocess,
"run",
autospec=True,
side_effect=[
subprocess.CompletedProcess(
"", 0, "ssh://user:password@something.redhat.com/containers/openjdk"
),
subprocess.CompletedProcess(
"", 0, "c5a0731b558c8a247dd7f85b5f54462cd5b68b23"
),
subprocess.CompletedProcess("", 0, "12345"),
],
)
builder = create_builder_object(mocker, "osbs", params)
builder.generator = Map({"image": Map({})})
mocker.patch.object(builder, "_wait_for_osbs_task")
builder.git.branch = "some-branch"
builder.run()
builder._wait_for_osbs_task.assert_not_called()
def test_osbs_builder_run_brew_user(mocker):
config.cfg["common"] = {"redhat": True}
params = {"user": "Foo"}
run = mocker.patch.object(
subprocess,
"run",
autospec=True,
side_effect=[
subprocess.CompletedProcess(
"", 0, "ssh://user:password@something.redhat.com/containers/openjdk"
),
subprocess.CompletedProcess(
"", 0, "c5a0731b558c8a247dd7f85b5f54462cd5b68b23"
),
subprocess.CompletedProcess("", 0, "12345"),
],
)
builder = create_builder_object(mocker, "osbs", params)
builder.generator = Map({"image": Map({})})
mocker.patch.object(builder, "_wait_for_osbs_task")
builder.git.branch = "some-branch"
builder.run()
run.assert_called_with(
[
"brew",
"--user",
"Foo",
"call",
"--python",
"buildContainer",
"--kwargs",
"{'src': 'git://something.redhat.com/containers/openjdk#c5a0731b558c8a247dd7f85b5f54462cd5b68b23', 'target': 'some-branch-containers-candidate', 'opts': {'scratch': True, 'git_branch': 'some-branch', 'yum_repourls': []}}",
],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
)
def test_osbs_builder_run_brew_target_defined_in_descriptor(mocker):
config.cfg["common"] = {"redhat": True}
run = mocker.patch.object(
subprocess,
"run",
autospec=True,
side_effect=[
subprocess.CompletedProcess(
"", 0, "ssh://user:password@something.redhat.com/containers/openjdk"
),
subprocess.CompletedProcess(
"", 0, "c5a0731b558c8a247dd7f85b5f54462cd5b68b23"
),
subprocess.CompletedProcess("", 0, "12345"),
],
)
builder = create_builder_object(mocker, "osbs", {})
builder.generator = Map(
{"image": Map({"osbs": Map({"koji_target": "some-target"})})}
)
mocker.patch.object(builder, "_wait_for_osbs_task")
builder.git.branch = "some-branch"
builder.run()
run.assert_called_with(
[
"brew",
"call",
"--python",
"buildContainer",
"--kwargs",
"{'src': 'git://something.redhat.com/containers/openjdk#c5a0731b558c8a247dd7f85b5f54462cd5b68b23', 'target': 'some-target', 'opts': {'scratch': True, 'git_branch': 'some-branch', 'yum_repourls': []}}",
],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
)
def test_osbs_wait_for_osbs_task_finished_successfully(mocker):
config.cfg["common"] = {"redhat": True}
builder = create_builder_object(mocker, "osbs", {})
sleep = mocker.patch.object(time, "sleep")
run = mocker.patch.object(
subprocess,
"run",
side_effect=[
subprocess.CompletedProcess(
"",
0,
"""{
"state": 2,
"create_time": "2019-02-15 13:14:58.278557",
"create_ts": 1550236498.27856,
"owner": 2485,
"host_id": 283,
"method": "buildContainer",
"completion_ts": 1550237431.0166,
"arch": "noarch",
"id": 20222655
}""",
)
],
)
assert builder._wait_for_osbs_task("12345") is True
run.assert_called_with(
["brew", "call", "--json-output", "getTaskInfo", "12345"],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
)
sleep.assert_not_called()
def test_osbs_wait_for_osbs_task_in_progress(mocker):
config.cfg["common"] = {"redhat": True}
builder = create_builder_object(mocker, "osbs", {})
sleep = mocker.patch.object(time, "sleep")
run = mocker.patch.object(
subprocess,
"run",
side_effect=[
subprocess.CompletedProcess(
"",
0,
"""{
"state": 1,
"create_time": "2019-02-15 13:14:58.278557",
"create_ts": 1550236498.27856,
"owner": 2485,
"host_id": 283,
"method": "buildContainer",
"completion_ts": 1550237431.0166,
"arch": "noarch",
"id": 20222655
}""",
),
subprocess.CompletedProcess(
"",
0,
"""{
"state": 2,
"create_time": "2019-02-15 13:14:58.278557",
"create_ts": 1550236498.27856,
"owner": 2485,
"host_id": 283,
"method": "buildContainer",
"completion_ts": 1550237431.0166,
"arch": "noarch",
"id": 20222655
}""",
),
],
)
assert builder._wait_for_osbs_task("12345") is True
run.assert_has_calls(
[
call(
["brew", "call", "--json-output", "getTaskInfo", "12345"],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
),
call(
["brew", "call", "--json-output", "getTaskInfo", "12345"],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
),
]
)
sleep.assert_called_once_with(20)
def test_osbs_wait_for_osbs_task_failed(mocker):
config.cfg["common"] = {"redhat": True}
builder = create_builder_object(mocker, "osbs", {})
sleep = mocker.patch.object(time, "sleep")
run = mocker.patch.object(
subprocess,
"run",
side_effect=[
subprocess.CompletedProcess(
"",
0,
"""{
"state": 5,
"create_time": "2019-02-15 13:14:58.278557",
"create_ts": 1550236498.27856,
"owner": 2485,
"host_id": 283,
"method": "buildContainer",
"completion_ts": 1550237431.0166,
"arch": "noarch",
"id": 20222655
}""",
)
],
)
with pytest.raises(
CekitError,
match="Task 12345 did not finish successfully, please check the task logs!",
):
builder._wait_for_osbs_task("12345")
run.assert_called_with(
["brew", "call", "--json-output", "getTaskInfo", "12345"],
stderr=-1,
stdout=-1,
check=True,
universal_newlines=True,
)
sleep.assert_not_called()
@pytest.mark.parametrize(
"artifact,src,target",
[
(
{"path": "some-path.jar", "md5": "aaabbb"},
"image/some-path.jar",
"osbs/repo/some-path.jar",
),
(
{"name": "some-name", "path": "some-path.jar", "md5": "aaabbb"},
"image/some-name",
"osbs/repo/some-name",
),
(
{"target": "some-target.jar", "path": "some-path.jar", "md5": "aaabbb"},
"image/some-target.jar",
"osbs/repo/some-target.jar",
),
(
{"name": "some-name", "md5": "aaabbb"},
"image/some-name",
"osbs/repo/some-name",
),
(
{"name": "some-name", "target": "some-target.jar", "md5": "aaabbb"},
"image/some-target.jar",
"osbs/repo/some-target.jar",
),
],
)
def test_osbs_copy_artifacts_to_dist_git(mocker, tmpdir, artifact, src, target):
os.makedirs(os.path.join(str(tmpdir), "image"))
mocker.patch("cekit.builders.osbs.OSBSBuilder._sync_with_dist_git")
mocker.patch("cekit.tools.DependencyHandler.handle")
mocker.patch("cekit.descriptor.resource.Resource.copy")
copy_mock = mocker.patch("cekit.builders.osbs.shutil.copy2")
dist_git_class = mocker.patch("cekit.builders.osbs.Git")
dist_git_class.return_value = GitMock()
config.cfg["common"] = {"redhat": True, "work_dir": str(tmpdir)}
config.cfg["doc"] = {"addhelp": False}
image_descriptor = {
"schema_version": 1,
"from": "centos:7",
"name": "test/image",
"version": "1.0",
"labels": [{"name": "foo", "value": "bar"}, {"name": "labela", "value": "a"}],
"osbs": {"repository": {"name": "repo", "branch": "branch"}},
"artifacts": [artifact],
}
builder = create_builder_object(
mocker,
"osbs",
{"assume_yes": False},
{"descriptor": yaml.dump(image_descriptor), "target": str(tmpdir)},
)
mocker.patch(
"cekit.tools.get_brew_url",
side_effect=subprocess.CalledProcessError(1, "command"),
)
builder.prepare()
builder.before_generate()
builder.generate()
builder.before_build()
dist_git_class.assert_called_once_with(
os.path.join(str(tmpdir), "osbs", "repo"),
str(tmpdir),
"repo",
"branch",
"osbs_extra",
False,
)
copy_mock.assert_has_calls(
[
mocker.call(
os.path.join(str(tmpdir), "image", "Dockerfile"),
os.path.join(str(tmpdir), "osbs/repo/Dockerfile"),
)
]
)
def test_docker_builder_defaults():
builder = DockerBuilder(
Map(merge_dicts({"target": "something"}, {"tags": ["foo", "bar"]}))
)
assert builder.params.tags == ["foo", "bar"]
def test_osbs_dist_git_sync_called(mocker, tmpdir):
mocker.patch("cekit.tools.DependencyHandler.handle")
image_descriptor = {
"schema_version": 1,
"from": "centos:7",
"name": "test/image",
"version": "1.0",
"labels": [{"name": "foo", "value": "bar"}, {"name": "labela", "value": "a"}],
"osbs": {"repository": {"name": "repo", "branch": "branch"}},
}
builder = create_builder_object(
mocker,
"osbs",
{},
{"descriptor": yaml.dump(image_descriptor), "target": str(tmpdir)},
)
prepare_dist_git = mocker.patch.object(builder, "_prepare_dist_git")
copy_to_dist_git = mocker.patch.object(builder, "_copy_to_dist_git")
run = mocker.patch.object(builder, "run")
builder.execute()
prepare_dist_git.assert_called_once_with()
copy_to_dist_git.assert_called_once_with()
run.assert_called_once_with()
def test_osbs_dist_git_sync_NOT_called_when_dry_run_set(mocker, tmpdir):
mocker.patch("cekit.tools.DependencyHandler.handle")
image_descriptor = {
"schema_version": 1,
"from": "centos:7",
"name": "test/image",
"version": "1.0",
"labels": [{"name": "foo", "value": "bar"}, {"name": "labela", "value": "a"}],
"osbs": {"repository": {"name": "repo", "branch": "branch"}},
}
builder = create_builder_object(
mocker,
"osbs",
{"dry_run": True},
{"descriptor": yaml.dump(image_descriptor), "target": str(tmpdir)},
)
prepare_dist_git = mocker.patch.object(builder, "_prepare_dist_git")
copy_to_dist_git = mocker.patch.object(builder, "_copy_to_dist_git")
sync_with_dist_git = mocker.patch.object(builder, "_sync_with_dist_git")
run = mocker.patch.object(builder, "run")
builder.execute()
prepare_dist_git.assert_not_called()
copy_to_dist_git.assert_not_called()
sync_with_dist_git.assert_not_called()
run.assert_not_called()
def test_docker_build_default_tags(mocker):
builder = DockerBuilder(Map({"target": "something"}))
docker_client_class = mocker.patch("cekit.builders.docker_builder.APIClientClass")
docker_client = docker_client_class.return_value
mock_generator = mocker.patch.object(builder, "generator")
mock_generator.get_tags.return_value = ["image/test:1.0", "image/test:latest"]
mocker.patch.object(builder, "_build_with_docker")
mocker.patch.object(builder, "_squash", return_value="112321312imageID")
builder._build_with_docker.return_value = "1654234sdf56"
builder.run()
builder._build_with_docker.assert_called_once_with(docker_client)
tag_calls = [
mocker.call("112321312imageID", "image/test", tag="1.0"),
mocker.call("112321312imageID", "image/test", tag="latest"),
]
docker_client.tag.assert_has_calls(tag_calls)
def test_docker_squashing_enabled(mocker):
builder = DockerBuilder(
Map(merge_dicts({"target": "something"}, {"tags": ["foo", "bar"]}))
)
# None is fine here, default values for params are tested in different place
assert builder.params.no_squash is None
assert builder.params.tags == ["foo", "bar"]
docker_client_class = mocker.patch("cekit.builders.docker_builder.APIClientClass")
docker_client = docker_client_class.return_value
mocker.patch.object(builder, "_build_with_docker")
mocker.patch.object(builder, "_squash")
builder._build_with_docker.return_value = "1654234sdf56"
builder.run()
builder._build_with_docker.assert_called_once_with(docker_client)
builder._squash.assert_called_once_with(docker_client, "1654234sdf56")
def test_docker_squashing_disabled(mocker):
builder = DockerBuilder(
Map(
merge_dicts(
{"target": "something"}, {"no_squash": True, "tags": ["foo", "bar"]}
)
)
)
assert builder.params.no_squash is True
docker_client_class = mocker.patch("cekit.builders.docker_builder.APIClientClass")
docker_client = docker_client_class.return_value
mocker.patch.object(builder, "_build_with_docker")
mocker.patch.object(builder, "_squash")
builder._build_with_docker.return_value = "1654234sdf56"
builder.run()
builder._build_with_docker.assert_called_once_with(docker_client)
builder._squash.assert_not_called()
def test_docker_squashing_parameters(mocker):
builder = DockerBuilder(
Map(merge_dicts({"target": "something"}, {"tags": ["foo", "bar"]}))
)
# None is fine here, default values for params are tested in different place
assert builder.params.no_squash is None
docker_client_class = mocker.patch("cekit.builders.docker_builder.APIClientClass")
squash_class = mocker.patch("cekit.builders.docker_builder.Squash")
squash = squash_class.return_value
docker_client = docker_client_class.return_value
mocker.patch.object(builder, "_build_with_docker", return_value="1654234sdf56")
builder.generator = Map({"image": {"from": "FROM"}})
builder.run()
squash_class.assert_called_once_with(
cleanup=True,
docker=docker_client,
from_layer="FROM",
image="1654234sdf56",
log=logging.getLogger("cekit"),
)
squash.run.assert_called_once_with()
builder._build_with_docker.assert_called_once_with(docker_client)
def test_buildah_builder_run(mocker):
params = {"tags": ["foo", "bar"]}
run = mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "buildah", params)
builder.run()
run.assert_called_once_with(
[
"buildah",
"build-using-dockerfile",
"--squash",
"-t",
"foo",
"-t",
"bar",
"something/image",
],
stderr=None,
stdout=None,
check=True,
universal_newlines=True,
)
def test_buildah_builder_run_platform(mocker):
params = {"tags": ["foo", "bar"], "platform": "linux/amd64,linux/arm64"}
run = mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "buildah", params)
builder.run()
run.assert_called_once_with(
[
"buildah",
"build-using-dockerfile",
"--squash",
"--platform",
"linux/amd64,linux/arm64",
"-t",
"foo",
"-t",
"bar",
"something/image",
],
stderr=None,
stdout=None,
check=True,
universal_newlines=True,
)
def test_buildah_builder_run_pull(mocker):
params = {"tags": ["foo", "bar"], "pull": True}
run = mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "buildah", params)
builder.run()
run.assert_called_once_with(
[
"buildah",
"build-using-dockerfile",
"--squash",
"--pull-always",
"-t",
"foo",
"-t",
"bar",
"something/image",
],
stderr=None,
stdout=None,
check=True,
universal_newlines=True,
)
def test_podman_builder_run(mocker):
params = {"tags": ["foo", "bar"]}
run = mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "podman", params)
builder.run()
run.assert_called_once_with(
[
shutil.which("podman"),
"build",
"--squash",
"-t",
"foo",
"-t",
"bar",
"something/image",
],
stderr=None,
stdout=None,
check=True,
universal_newlines=True,
)
def test_podman_builder_run_pull(mocker):
params = {"tags": ["foo", "bar"], "pull": True}
run = mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "podman", params)
builder.run()
run.assert_called_once_with(
[
shutil.which("podman"),
"build",
"--pull-always",
"--squash",
"-t",
"foo",
"-t",
"bar",
"something/image",
],
stderr=None,
stdout=None,
check=True,
universal_newlines=True,
)
def test_podman_builder_run_platform(mocker):
params = {
"tags": ["foo", "bar"],
"pull": True,
"platform": "linux/amd64,linux/arm64",
}
run = mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "podman", params)
builder.run()
run.assert_called_once_with(
[
shutil.which("podman"),
"build",
"--pull-always",
"--squash",
"--platform",
"linux/amd64,linux/arm64",
"-t",
"foo",
"-t",
"bar",
"something/image",
],
stderr=None,
stdout=None,
check=True,
universal_newlines=True,
)
def test_podman_builder_run_with_generator(mocker):
params = Map({"tags": []})
run = mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "podman", params)
builder.generator = DockerGenerator("", "", "", {})
builder.generator.image = Image(
yaml.safe_load(
"""
name: foo
version: 1.9
labels:
- name: test
value: val1
- name: label2
value: val2
envs:
- name: env1
value: env1val
"""
),
"foo",
)
builder.run()
run.assert_called_once_with(
[
shutil.which("podman"),
"build",
"--squash",
"-t",
"foo:1.9",
"-t",
"foo:latest",
"something/image",
],
stderr=None,
stdout=None,
check=True,
universal_newlines=True,
)
def test_buildah_builder_run_with_generator(mocker):
params = Map({"tags": []})
run = mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "buildah", params)
builder.generator = DockerGenerator("", "", "", {})
builder.generator.image = Image(
yaml.safe_load(
"""
name: foo
version: 1.9
labels:
- name: test
value: val1
- name: label2
value: val2
envs:
- name: env1
value: env1val
"""
),
"foo",
)
builder.run()
run.assert_called_once_with(
[
"buildah",
"build-using-dockerfile",
"--squash",
"-t",
"foo:1.9",
"-t",
"foo:latest",
"something/image",
],
stderr=None,
stdout=None,
check=True,
universal_newlines=True,
)
def test_buildah_builder_with_squashing_disabled(mocker):
params = {"tags": ["foo", "bar"], "no_squash": True}
run = mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "buildah", params)
builder.run()
run.assert_called_once_with(
[
"buildah",
"build-using-dockerfile",
"-t",
"foo",
"-t",
"bar",
"something/image",
],
stderr=None,
stdout=None,
check=True,
universal_newlines=True,
)
def test_podman_builder_with_squashing_disabled(mocker):
params = {"tags": ["foo", "bar"], "no_squash": True}
run = mocker.patch.object(subprocess, "run")
builder = create_builder_object(mocker, "podman", params)
builder.run()
run.assert_called_once_with(
[shutil.which("podman"), "build", "-t", "foo", "-t", "bar", "something/image"],
stderr=None,
stdout=None,
check=True,
universal_newlines=True,
)
def test_docker_squashing_disabled_dependencies(mocker, tmpdir, caplog):
caplog.set_level(logging.DEBUG, logger="cekit")
result = (
"Required CEKit library 'docker-squash' was found as a 'docker_squash' module"
)
image_descriptor = {
"schema_version": 1,
"from": "centos:7",
"name": "test/image",
"version": "1.0",
"labels": [{"name": "foo", "value": "bar"}, {"name": "labela", "value": "a"}],
}
builder = create_builder_object(
mocker,
"docker",
Map({"no_squash": True, "tags": ["foo", "bar"]}),
Map({"descriptor": yaml.dump(image_descriptor), "target": str(tmpdir)}),
)
assert builder.params.no_squash is True
builder.prepare()
builder.before_build()
assert result not in caplog.text
builder = create_builder_object(
mocker,
"docker",
Map({"tags": ["foo", "bar"]}),
Map({"descriptor": yaml.dump(image_descriptor), "target": str(tmpdir)}),
)
assert builder.params.no_squash is None
builder.prepare()
builder.before_build()
assert result in caplog.text
|
14,245 | cbb18c178cd56f7eef3c8f1655e6166ea61b76bf | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Core RAMP-UA model.
Created on Wed Apr 29 19:59:25 2020
@author: nick
"""
import sys
import os
os.environ['R_HOME'] = 'C:/Users/gy17m2a/AppData/Local/Programs/R/R-4.2.0' #path to your R installation
os.environ['R_USER'] = 'C:/ProgramData/Anaconda3/envs/analyse_results/Lib/site-packages/rpy2' #path depends on where you installed Python. Mine is the Anaconda distribution
sys.path.append("microsim") # This is only needed when testing. I'm so confused about the imports
sys.path.append("C:/users/gy17m2a/OneDrive - University of Leeds/Project/RAMP-UA-new/")
print(os.getcwd())
print(sys.path)
import multiprocessing
import pandas as pd
pd.set_option('display.expand_frame_repr', False) # Don't wrap lines when displaying DataFrames
# pd.set_option('display.width', 0) # Automatically find the best width
import os
import click # command-line interface
import pickle # to save data
from yaml import load, SafeLoader # pyyaml library for reading the parameters.yml file
from shutil import copyfile
from microsim.quant_api import QuantRampAPI
from microsim.population_initialisation import PopulationInitialisation
from microsim.microsim_model import Microsim
from microsim.opencl.ramp.run import run_opencl
from microsim.opencl.ramp.snapshot_convertor import SnapshotConvertor
from microsim.opencl.ramp.snapshot import Snapshot
from microsim.opencl.ramp.params import Params, IndividualHazardMultipliers, LocationHazardMultipliers
from microsim.initialisation_cache import InitialisationCache
from microsim.utilities import data_setup, unpack_data
# ********
# PROGRAM ENTRY POINT
# Uses 'click' library so that it can be run from the command line
# ********
@click.command()
@click.option('-p', '--parameters_file', default="./model_parameters/default.yml", type=click.Path(exists=True),
help="Parameters file to use to configure the model. Default: ./model_parameters/default.yml")
@click.option('-npf', '--no-parameters-file', is_flag=True,
help="Don't read a parameters file, use command line arguments instead")
@click.option('-init', '--initialise', is_flag=True,
help="Just initialise the model and create caches and snapshots. Dont' run it.")
@click.option('-i', '--iterations', default=10, help='Number of model iterations. 0 means just run the initialisation')
@click.option('-s', '--scenario', default="default", help="Name this scenario; output results will be put into a "
"directory with this name.")
@click.option('--data-dir', default="devon_data", help='Root directory to load data from')
@click.option('--output/--no-output', default=True,
help='Whether to generate output data (default yes).')
@click.option('--output-every-iteration/--no-output-every-iteration', default=False,
help='Whether to generate output data at every iteration rather than just at the end (default no).')
@click.option('--debug/--no-debug', default=False, help="Whether to run some more expensive checks (default no debug)")
@click.option('-r', '--repetitions', default=1, help="How many times to run the model (default 1)")
@click.option('-l', '--lockdown-file', default="google_mobility_lockdown_daily.csv",
help="Optionally read lockdown mobility data from a file (default use google mobility). To have no "
"lockdown pass an empty string, i.e. --lockdown-file='' ")
@click.option('-c', '--use-cache/--no-use-cache', default=True,
help="Whether to cache the population data initialisation")
@click.option('-ocl', '--opencl/--no-opencl', default=False, help="Run OpenCL model (runs in headless mode by default")
@click.option('-gui', '--opencl-gui/--no-opencl-gui', default=False,
help="Run the OpenCL model with GUI visualisation for OpenCL model")
@click.option('-gpu', '--opencl-gpu/--no-opencl-gpu', default=False,
help="Run OpenCL model on the GPU (if false then run using CPU")
def main(parameters_file, no_parameters_file, initialise, iterations, scenario, data_dir, output, output_every_iteration,
debug, repetitions, lockdown_file, use_cache, opencl, opencl_gui, opencl_gpu):
"""
Main function which runs the population initialisation, then chooses which model to run, either the Python/R
model or the OpenCL model
"""
# If we are running with opencl_gui then set opencl to True, so you only need to pass one flag
if opencl_gui:
opencl = True
# First see if we're reading a parameters file or using command-line arguments.
if no_parameters_file:
print("Not reading a parameters file")
else:
print(f"Reading parameters file: {parameters_file}. "
f"Any other model-related command-line arguments are being ignored")
with open(parameters_file, 'r') as f:
parameters = load(f, Loader=SafeLoader)
sim_params = parameters["microsim"] # Parameters for the dynamic microsim (python)
calibration_params = parameters["microsim_calibration"]
disease_params = parameters["disease"] # Parameters for the disease model (r)
# TODO Implement a more elegant way to set the parameters and pass them to the model. E.g.:
# self.params, self.params_changed = Model._init_kwargs(params, kwargs)
# [setattr(self, key, value) for key, value in self.params.items()]
# Utility parameters
scenario = sim_params["scenario"]
iterations = sim_params["iterations"]
data_dir = sim_params["data-dir"]
output = sim_params["output"]
output_every_iteration = sim_params["output-every-iteration"]
debug = sim_params["debug"]
repetitions = sim_params["repetitions"]
lockdown_file = sim_params["lockdown-file"]
# Check the parameters are sensible
if iterations < 1:
raise ValueError("Iterations must be > 1. If you want to just initialise the model and then exit, use"
"the --initialise flag")
if repetitions < 1:
raise ValueError("Repetitions must be greater than 0")
if (not output) and output_every_iteration:
raise ValueError("Can't choose to not output any data (output=False) but also write the data at every "
"iteration (output_every_iteration=True)")
print(f"Running model with the following parameters:\n"
f"\tParameters file: {parameters_file}\n"
f"\tScenario directory: {scenario}\n"
f"\tInitialise (and then exit?): {initialise}\n"
f"\tNumber of iterations: {iterations}\n"
f"\tData dir: {data_dir}\n"
f"\tOutputting results?: {output}\n"
f"\tOutputting results at every iteration?: {output_every_iteration}\n"
f"\tDebug mode?: {debug}\n"
f"\tNumber of repetitions: {repetitions}\n"
f"\tLockdown file: {lockdown_file}\n",
f"\tUse cache?: {use_cache}\n",
f"\tUse OpenCL version?: {opencl}\n",
f"\tUse OpenCL GUI?: {opencl_gui}\n",
f"\tUse OpenCL GPU for processing?: {opencl_gpu}\n",
f"\tCalibration parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(calibration_params)}\n",
f"\tDisease parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(disease_params)}\n")
# To fix file path issues, use absolute/full path at all times
# Pick either: get working directory (if user starts this script in place, or set working directory
# Option A: copy current working directory:
base_dir = os.getcwd() # get current directory
data_dir = os.path.join(base_dir, data_dir)
r_script_dir = os.path.join(base_dir, "R", "py_int")
### section for fetching data
if not os.path.isdir(data_dir):
print(f"No data directory detected.")
if os.path.isfile(data_dir + ".tar.gz"):
print(f"An archive file matching the name of the data directory has been detected!")
print(f"Unpacking this archive file now.")
unpack_data(data_dir + ".tar.gz")
else:
print(f"{data_dir} does not exist. Downloading devon_data.")
data_setup()
# Temporarily only want to use Devon MSOAs
# devon_msoas = pd.read_csv(os.path.join(data_dir, "devon_msoas.csv"), header=None,
# names=["x", "y", "Num", "Code", "Desc"])
# Prepare the QUANT api (for estimating school and retail destinations)
# we only need 1 QuantRampAPI object even if we do multiple iterations
# the quant_object object will be called by each microsim object
quant_path = os.path.join(data_dir, "QUANT_RAMP")
if not os.path.isdir(quant_path):
raise Exception("QUANT directory does not exist, please check input")
quant_object = QuantRampAPI(quant_path)
# args for population initialisation
population_args = {"data_dir": data_dir, "debug": debug,
"quant_object": quant_object}
# args for Python/R Microsim. Use same arguments whether running 1 repetition or many
msim_args = {"data_dir": data_dir, "r_script_dir": r_script_dir, "scen_dir": scenario, "output": output,
"output_every_iteration": output_every_iteration}
if not no_parameters_file: # When using a parameters file, include the calibration parameters
msim_args.update(**calibration_params) # python calibration parameters are unpacked now
# Also read the R calibration parameters (this is a separate section in the .yml file)
if disease_params is not None:
# (If the 'disease_params' section is included but has no calibration variables then we want to ignore it -
# it will be turned into an empty dictionary by the Microsim constructor)
msim_args["disease_params"] = disease_params # R parameters kept as a dictionary and unpacked later
# Temporarily use dummy data for testing
# data_dir = os.path.join(base_dir, "dummy_data")
# m = Microsim(data_dir=data_dir, testing=True, output=output)
# cache to hold previously calculate population data
cache = InitialisationCache(cache_dir=os.path.join(data_dir, "caches"))
# generate new population dataframes if we aren't using the cache, or if the cache is empty
if not use_cache or cache.is_empty():
print(f'Reading population data because {"caching is disabled" if not use_cache else "the cache is empty"}')
population = PopulationInitialisation(**population_args)
individuals = population.individuals
activity_locations = population.activity_locations
# store in cache so we can load later
cache.store_in_cache(individuals, activity_locations)
else: # load from cache
print("Loading data from previous cache")
individuals, activity_locations = cache.read_from_cache()
# Calculate the time-activity multiplier (this is for implementing lockdown)
time_activity_multiplier = None
if lockdown_file != "":
print(f"Implementing a lockdown with time activities from {lockdown_file}")
time_activity_multiplier: pd.DataFrame = \
PopulationInitialisation.read_time_activity_multiplier(os.path.join(data_dir, lockdown_file))
# Select which model implementation to run
if opencl:
run_opencl_model(individuals, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
opencl_gui, opencl_gpu, use_cache, initialise, calibration_params, disease_params)
else:
# If -init flag set the don't run the model. Note for the opencl model this check needs to happen
# after the snapshots have been created in run_opencl_model
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exitting")
return
run_python_model(individuals, activity_locations, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file)
def run_opencl_model(individuals_df, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
use_gui, use_gpu, use_cache, initialise, calibration_params, disease_params):
snapshot_cache_filepath = base_dir + "/microsim/opencl/snapshots/cache.npz"
# Choose whether to load snapshot file from cache, or create a snapshot from population data
if not use_cache or not os.path.exists(snapshot_cache_filepath):
print("\nGenerating Snapshot for OpenCL model")
snapshot_converter = SnapshotConvertor(individuals_df, activity_locations, time_activity_multiplier, data_dir)
snapshot = snapshot_converter.generate_snapshot()
snapshot.save(snapshot_cache_filepath) # store snapshot in cache so we can load later
else: # load cached snapshot
snapshot = Snapshot.load_full_snapshot(path=snapshot_cache_filepath)
# set the random seed of the model
snapshot.seed_prngs(42)
# set params
if calibration_params is not None and disease_params is not None:
snapshot.update_params(create_params(calibration_params, disease_params))
if disease_params["improve_health"]:
print("Switching to healthier population")
snapshot.switch_to_healthier_population()
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exiting")
return
run_mode = "GUI" if use_gui else "headless"
print(f"\nRunning OpenCL model in {run_mode} mode")
run_opencl(snapshot, iterations, data_dir, use_gui, use_gpu, num_seed_days=disease_params["seed_days"], quiet=False)
def run_python_model(individuals_df, activity_locations_df, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file):
print("\nRunning Python / R model")
# Create a microsim object
m = Microsim(individuals_df, activity_locations_df, time_activity_multiplier, **msim_args)
copyfile(parameters_file, os.path.join(m.SCEN_DIR, "parameters.yml"))
# Run the Python / R model
if repetitions == 1:
m.run(iterations, 0)
elif repetitions >= 1: # Run it multiple times on lots of cores
try:
with multiprocessing.Pool(processes=int(os.cpu_count())) as pool:
# Copy the model instance so we don't have to re-read the data each time
# (Use a generator so we don't need to store all the models in memory at once).
models = (Microsim._make_a_copy(m) for _ in range(repetitions))
pickle_out = open(os.path.join("Models_m.pickle"), "wb")
pickle.dump(m, pickle_out)
pickle_out.close()
# models = ( Microsim(msim_args) for _ in range(repetitions))
# Also need a list giving the number of iterations for each model (same for each model)
iters = (iterations for _ in range(repetitions))
repnr = (r for r in range(repetitions))
# Run the models by passing each model and the number of iterations
pool.starmap(_run_multicore, zip(models, iters, repnr))
finally: # Make sure they get closed (shouldn't be necessary)
pool.close()
def _run_multicore(m, iter, rep):
return m.run(iter, rep)
def create_params(calibration_params, disease_params):
current_risk_beta = disease_params["current_risk_beta"]
# NB: OpenCL model incorporates the current risk beta by pre-multiplying the hazard multipliers with it
location_hazard_multipliers = LocationHazardMultipliers(
retail=calibration_params["hazard_location_multipliers"]["Retail"] * current_risk_beta,
primary_school=calibration_params["hazard_location_multipliers"]["PrimarySchool"] * current_risk_beta,
secondary_school=calibration_params["hazard_location_multipliers"]["SecondarySchool"] * current_risk_beta,
home=calibration_params["hazard_location_multipliers"]["Home"] * current_risk_beta,
work=calibration_params["hazard_location_multipliers"]["Work"] * current_risk_beta,
)
individual_hazard_multipliers = IndividualHazardMultipliers(
presymptomatic=calibration_params["hazard_individual_multipliers"]["presymptomatic"],
asymptomatic=calibration_params["hazard_individual_multipliers"]["asymptomatic"],
symptomatic=calibration_params["hazard_individual_multipliers"]["symptomatic"]
)
obesity_multipliers = [disease_params["overweight"], disease_params["obesity_30"], disease_params["obesity_35"],
disease_params["obesity_40"]]
return Params(
location_hazard_multipliers=location_hazard_multipliers,
individual_hazard_multipliers=individual_hazard_multipliers,
obesity_multipliers=obesity_multipliers,
cvd_multiplier=disease_params["cvd"],
diabetes_multiplier=disease_params["diabetes"],
bloodpressure_multiplier=disease_params["bloodpressure"],
)
if __name__ == "__main__":
main()
print("End of program")
|
14,246 | 2279a901451487517a0d981d22c4bb2ce8c0848f | ways = []
amount = 4
def count_change(change, way):
global ways
for i in range(len(change)):
way_new = way + [change[i]]
if sum(way_new) < amount:
count_change(change[i:], way_new)
elif sum(way_new) == amount:
print(way_new)
ways.append(way_new)
if __name__ == "__main__":
count_change([2, 1], [])
print(amount, "can be changed in ", str(len(ways)) + " ways" )
|
14,247 | b8947e3517934910d47a1868c217ec08e72991b7 | # Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import pytest
import yaml
from clx.workflow.workflow import Workflow
from mockito import spy, verify, when
from cudf import DataFrame
class TestWorkflowImpl(Workflow):
def __init__(self, name, source=None, destination=None, custom_workflow_param=None):
self.custom_workflow_param = custom_workflow_param
Workflow.__init__(self, name, source, destination)
def workflow(self, dataframe):
dataframe["enriched"] = "enriched"
return dataframe
dirname = os.path.split(os.path.abspath(__file__))[0]
input_path = dirname + "/input/person.csv"
input_path_empty = dirname + "/input/empty.csv"
output_path_param_test = dirname + "/output/output_parameters.csv"
output_path_benchmark_test = dirname + "/output/output_benchmark.csv"
output_path_config_test = dirname + "/output/output_config.csv"
output_path_empty = dirname + "/output/empty.csv"
@pytest.fixture
def set_workflow_config():
"""Sets the workflow config dictionary used for the unit tests"""
source = {
"type": "fs",
"input_format": "csv",
"input_path": "/path/to/input",
"names": ["firstname", "lastname", "gender"],
"delimiter": ",",
"usecols": ["firstname", "lastname", "gender"],
"dtype": ["str", "str", "str"],
"header": 0,
}
destination = {
"type": "fs",
"output_format": "csv",
"output_path": "/path/to/output",
"index": False
}
workflow_config = {
"source": source,
"destination": destination,
"custom_workflow_param": "param_value",
}
return workflow_config, source, destination
@pytest.fixture
def mock_env_home(monkeypatch):
dirname, filename = os.path.split(os.path.abspath(__file__))
monkeypatch.setenv("HOME", dirname)
@pytest.mark.parametrize("input_path", [input_path])
@pytest.mark.parametrize("output_path", [output_path_param_test])
def test_workflow_parameters(
mock_env_home, set_workflow_config, input_path, output_path
):
"""Tests the initialization and running of a workflow with passed in parameters"""
# Create source and destination configurations
source = set_workflow_config[1]
destination = set_workflow_config[2]
source["input_path"] = input_path
destination["output_path"] = output_path
# Create new workflow with source and destination configurations
test_workflow = TestWorkflowImpl(
source=source,
destination=destination,
name="test-workflow",
custom_workflow_param="test_param",
)
# Run workflow and check output data
if os.path.exists(output_path):
os.remove(output_path)
test_workflow.run_workflow()
with open(output_path) as f:
reader = csv.reader(f)
data = []
for row in reader:
data.append(row)
assert data[0] == ["firstname", "lastname", "gender", "enriched"]
assert data[1] == ["Emma", "Olivia", "F", "enriched"]
assert data[2] == ["Ava", "Isabella", "F", "enriched"]
assert data[3] == ["Sophia", "Charlotte", "F", "enriched"]
assert test_workflow.custom_workflow_param == "test_param"
@pytest.mark.parametrize("input_path", [input_path])
@pytest.mark.parametrize("output_path", [output_path_config_test])
def test_workflow_config(mock_env_home, set_workflow_config, input_path, output_path):
"""Tests the initialization and running of a workflow with a configuration yaml file"""
# Write workflow.yaml file
workflow_name = "test-workflow-config"
workflow_config = set_workflow_config[0]
workflow_config["destination"]["output_path"] = output_path
workflow_config["destination"]["index"] = False
workflow_config["source"]["input_path"] = input_path
workflow_config["custom_workflow_param"] = "param_value"
write_config_file(workflow_config, workflow_name)
if os.path.exists(output_path):
os.remove(output_path)
# Run workflow
test_workflow = TestWorkflowImpl(workflow_name)
test_workflow.run_workflow()
with open(output_path) as f:
reader = csv.reader(f)
data = []
for row in reader:
data.append(row)
assert data[0] == ["firstname", "lastname", "gender", "enriched"]
assert data[1] == ["Emma", "Olivia", "F", "enriched"]
assert data[2] == ["Ava", "Isabella", "F", "enriched"]
assert data[3] == ["Sophia", "Charlotte", "F", "enriched"]
# Check that custom workflow parameter was set from config file
assert test_workflow.custom_workflow_param == "param_value"
def test_workflow_config_error(mock_env_home, set_workflow_config):
"""Tests the error handling on incomplete workflow.yaml configuration file"""
workflow_name = "test-workflow-error"
test_config = {}
test_config["source"] = set_workflow_config[1]
write_config_file(test_config, workflow_name)
with pytest.raises(Exception):
test_workflow = TestWorkflowImpl(workflow_name)
test_config = {}
test_config["destination"] = set_workflow_config[2]
write_config_file(test_config, workflow_name)
with pytest.raises(Exception):
test_workflow = TestWorkflowImpl(workflow_name)
@pytest.mark.parametrize("input_path", [input_path_empty])
@pytest.mark.parametrize("output_path", [output_path_empty])
def test_workflow_no_data(mock_env_home, set_workflow_config, input_path, output_path):
""" Test confirms that workflow is not run and output not written if no data is returned from the workflow io_reader
"""
# Create source and destination configurations
source = set_workflow_config[1]
destination = set_workflow_config[2]
source["input_path"] = input_path
destination["output_path"] = output_path
# Create new workflow with source and destination configurations
test_workflow = spy(TestWorkflowImpl(
source=source, destination=destination, name="test-workflow-no-data", custom_workflow_param="test_param"
))
test_workflow.run_workflow()
# Verify workflow not run
verify(test_workflow, times=0).workflow(...)
# Verify that no output file created.
assert os.path.exists(output_path) == False
@pytest.mark.parametrize("input_path", [input_path])
@pytest.mark.parametrize("output_path", [output_path_empty])
def test_workflow_no_enriched_data(mock_env_home, set_workflow_config, input_path, output_path):
""" Test confirms that if workflow produces no enriched data that no output file is created
"""
# Create source and destination configurations
source = set_workflow_config[1]
destination = set_workflow_config[2]
source["input_path"] = input_path
destination["output_path"] = output_path
# Create new workflow with source and destination configurations
test_workflow = spy(TestWorkflowImpl(
source=source, destination=destination, name="test-workflow-no-data", custom_workflow_param="test_param"
))
io_writer = spy(test_workflow._io_writer)
# Return empty dataframe when workflow runs
when(test_workflow).workflow(...).thenReturn(DataFrame())
# Verify io_writer does not write data
verify(io_writer, times=0).write_data(...)
# Verify that no output file created.
assert os.path.exists(output_path) == False
@pytest.mark.parametrize("input_path", [input_path])
@pytest.mark.parametrize("output_path", [output_path_benchmark_test])
def test_benchmark_decorator(
mock_env_home, set_workflow_config, input_path, output_path
):
# Dummy function
def func(self):
return DataFrame()
benchmarked_func = Workflow.benchmark(func)
source = set_workflow_config[1]
destination = set_workflow_config[2]
source["input_path"] = input_path
destination["output_path"] = output_path
# Create new workflow with source and destination configurations
tb = spy(
TestWorkflowImpl(source=source, destination=destination, name="test-workflow")
)
benchmarked_func(tb.run_workflow)
# Verify that run_workflow was not called, instead expect that benchmark wrapper function will be called
verify(tb, times=0).run_workflow(...)
def write_config_file(workflow_config, workflow_name):
"""Helper function to write workflow.yaml configuration file"""
workflow_dir = "{0}/.config/clx/{1}".format(dirname, workflow_name)
if not os.path.exists(workflow_dir):
os.makedirs(workflow_dir)
with open(workflow_dir + "/workflow.yaml", "w") as f:
yaml.dump(workflow_config, f)
|
14,248 | 62f329b40d6017552c3655a9ac49709b9c533904 | import requests
import time
import json
from django.conf import settings
import errno
from socket import error as socket_error
import logging
log = logging.getLogger(__name__)
def change_eva(teacher, form, eva):
# ++++++++++++++++++ Obtencion de token ++++++++++++++++++++++++++++++
username = form.cleaned_data['username']
password = form.cleaned_data['password']
result = requests.post(
"https://{0}:{1}/api-token-auth/".format(settings.SERVER_IP, settings.SERVER_PORT),
data={"username": username, "password": password},
headers={'Accept': 'application/json'}, verify=False)
if result.status_code != 200:
return 1
result_json = json.loads(result.content)
token = result_json.get('token')
result_test = requests.put(
"https://{0}:{1}/api/user/".format(settings.SERVER_IP, settings.SERVER_PORT),
data={"eva": eva},
headers={'Accept': 'application/json',
'Authorization': 'Token {}'.format(token)}, verify=False) |
14,249 | 6b47dfb436733105f06561eb03809526de760de3 | class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
m, n = len(matrix), len(matrix[0])
firstRow, firstCol = False, False
for i in range(m):
for j in range(n):
if i == 0 and matrix[i][j] == 0:
firstRow = True
if j == 0 and matrix[i][j] == 0:
firstCol = True
if i != 0 and j != 0 and matrix[i][j] == 0:
matrix[i][0] = 0
matrix[0][j] = 0
for i in range(1, m):
for j in range(1, n):
if matrix[i][0] == 0 or matrix[0][j] == 0:
matrix[i][j] = 0
if firstRow:
for i in range(n):
matrix[0][i] = 0
if firstCol:
for i in range(m):
matrix[i][0] = 0
|
14,250 | b89137c61c9b500ab5407e81d9d45b00df39ffc4 | my_list = [2323,4344,2325,324413,21234,24531,2123,42234,544,456,345,42,5445,23,5656,423]
#Your code here:
aux = 0
for number in my_list:
aux += number
aux = aux / len(my_list)
print(aux) |
14,251 | c2d18c772301e2ede1ff7d1af35fdcc7ec121dd9 | #!/usr/bin/env python
# coding: utf-8
# # Data Visualization: Trump's Winning Rate
# This report provides detailed facts about Trump's Winning Rate and its correlation between economics indicators, both at national level and at state level
# In[5]:
import pandas as pd
import datetime
import time
import matplotlib.pyplot as plt
import matplotlib.dates as dt
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
from matplotlib import cm
import numpy as np
import math
# ## 1. Trump vs. Biden Winning Rate by Time
# In[6]:
pnt = pd.read_csv('presidential_national_toplines_2020.csv')
pnt['date'] = pnt['modeldate'].apply(lambda x: datetime.datetime(int(x.split('/')[2]),int(x.split('/')[0]),int(x.split('/')[1])))
date = pnt['date']
y1 = pnt['ecwin_inc']
y2 = pnt['ecwin_chal']
pnt_win=pd.DataFrame({'date': date, 'Trump': y1, 'Biden': y2})
# In[7]:
import seaborn as sns
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_win
y = [df['Trump'].tolist(),df['Biden']-df['Trump']]
pal = sns.color_palette("Set1")
ax.stackplot(df['date'], y ,labels=['Trump','Biden'], colors=pal, alpha=0.4 )
ax.legend(loc='upper left')
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.show()
# In[8]:
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_win
# Add x-axis and y-axis
ax.plot(df['date'],df['Trump'], color='red', label = 'Trump Winning Rate')
ax.plot(df['date'],df['Biden'],color='darkblue', label = 'Biden Winning Rate', linestyle='dashed')
# Set title and labels for axes
ax.set(xlabel="date",
ylabel="Winning rate of the election 2020",
title="Trump vs. Biden")
# Define the date format
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.legend()
plt.show()
# * Trump's winning rate has a declining trend and the winning rate gap between Biden and Trump is enlarging since the beginning of September
# ## 2. Trump's Winning Rate Changes over Time
# In[9]:
pnt_diff = pnt_win.set_index('date').sort_values(by='date').diff().dropna().reset_index()
# In[10]:
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_diff
# Add x-axis and y-axis
ax.scatter(df['date'],df['Trump'],c= df['Trump'].apply(lambda x: math.floor(-10000*x)))
ax.plot(df['date'], df['Trump'],color='grey')
ax.plot(df['date'],[0]*132,color='lightblue')
# Set title and labels for axes
ax.set(xlabel="date", ylabel="Daily changes in winning rate",title="Variation in winning rate: Trump")
# Define the date format
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.show()
# * Trump's winning rate fluctuate a lot from June to August: There are large jumps and drops.
# * However, since the beginning of September, the fluctuation is smaller and the winning rate drops for most of the times.
# ## 3. Trump's Winning Rate at State Level
# In[11]:
us_state_abbrev = {'Alabama': 'AL', 'Alaska': 'AK','American Samoa': 'AS','Arizona': 'AZ', 'Arkansas': 'AR',
'California': 'CA','Colorado': 'CO','Connecticut': 'CT', 'Delaware': 'DE','District of Columbia': 'DC','Florida': 'FL',
'Georgia': 'GA','Guam': 'GU','Hawaii': 'HI','Idaho': 'ID','Illinois': 'IL','Indiana': 'IN','Iowa': 'IA','Kansas': 'KS','Kentucky': 'KY','Louisiana': 'LA',
'Maine': 'ME','Maryland': 'MD','Massachusetts': 'MA','Michigan': 'MI','Minnesota': 'MN','Mississippi': 'MS','Missouri': 'MO',
'Montana': 'MT','Nebraska': 'NE','Nevada': 'NV','New Hampshire': 'NH','New Jersey': 'NJ','New Mexico': 'NM','New York': 'NY',
'North Carolina': 'NC','North Dakota': 'ND','Northern Mariana Islands':'MP','Ohio': 'OH','Oklahoma': 'OK','Oregon': 'OR','Pennsylvania': 'PA',
'Puerto Rico': 'PR','Rhode Island': 'RI','South Carolina': 'SC','South Dakota': 'SD', 'Tennessee': 'TN','Texas': 'TX','Utah': 'UT','Vermont': 'VT','Virgin Islands': 'VI','Virginia': 'VA',
'Washington': 'WA','West Virginia': 'WV','Wisconsin': 'WI', 'Wyoming': 'WY'
}
# In[12]:
pst = pd.read_csv('presidential_state_toplines_2020.csv')
pst['date'] = pst['modeldate'].apply(lambda x:datetime.datetime(int(x.split('/')[2]),int(x.split('/')[0]),int(x.split('/')[1])))
pst['code'] = pst['state'].apply(lambda x: 'no' if '-' in x else us_state_abbrev[x])
pst = pst[pst['code']!='no']
# ### 3.1 Winning Rate by State-Month
# In[14]:
import plotly.graph_objects as go
pst_cat = pst[['date','winstate_inc','code']]
datevar = pst_cat['date'].drop_duplicates()[0:-1:30]
for date in datevar:
pst_sub = pst_cat[pst_cat['date'] == date]
fig = go.Figure(data=go.Choropleth(
locations=pst_sub['code'], # Spatial coordinates
z = pst_sub['winstate_inc'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar_title = "Winning Rate",))
fig.update_layout(
title_text = 'Trump winning rate by state:'+ str(date.date()),
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * From the above graphs, we can see the Trump's winning rates by state and month
# * The winning rates are particularly low in Northeast and West coast while higher in the middle states
# ### 3.2 States with the Largest Uncertainties
# In[15]:
pst_cat = pst[['date','winstate_inc','code']]
datevar = pst_cat['date'].drop_duplicates()[0:-1:30]
for date in datevar:
pst_sub = pst_cat[pst_cat['date'] == date]
fig = go.Figure(data=go.Choropleth(
locations=pst_sub['code'], # Spatial coordinates
z = -abs(pst_sub['winstate_inc']-0.5), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'Reds',
colorbar_title = "Uncertainty Level",))
fig.update_layout(
title_text = 'Uncertainties by state:'+ str(date.date()),
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * This graphs show the uncertainty levels of Trump winning of each states by month
# * OH, GA, and IA are the three states that Trump's winning rate is close to 50%
# ### 3.3 Correlation between Trump's State level Winning Rate with National level Winning Rate
# In[16]:
pst_cat = pst_cat.set_index(['code','date'])
pst_cat = pst_cat.sort_values(by = ['code','date'])
pst_diff = pst_cat.diff().dropna()
pst_diff = pst_diff.reset_index()
# In[17]:
pst_pnt_diff_merge = pst_diff.merge(pnt_diff, on='date')
pst_pnt_merge = pst_cat.reset_index().merge(pnt_win, on='date')
# In[18]:
pst_pnt_corr = pst_pnt_merge.groupby('code')['winstate_inc','Trump'].corr().reset_index()
# In[19]:
pst_pnt_corr = pst_pnt_corr[pst_pnt_corr['level_1']=='Trump'][['code','winstate_inc']]
# In[20]:
fig = go.Figure(data=go.Choropleth(
locations=pst_pnt_corr['code'], # Spatial coordinates
z = pst_pnt_corr['winstate_inc'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'blues',
colorbar_title = "Correlation with State Winning Rate",))
fig.update_layout(
title_text = "Correlation between Trump's State level Winning Rate with National level Winning Rate",
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * The correlation is the highest in PA, UT, CO: If Trump wins these three states, he is likely to win the national election.
# * The correlation is the lowest in KY: Trump is more likely to win the national election if he lost in KY.
# ## 4. Comovement between Trump's Winning Rate and Economic Indicators
# In[21]:
ei = pd.read_csv('economic_index.csv')
ei['date'] = ei['modeldate'].apply(lambda x: datetime.datetime(int(x.split('/')[2]),int(x.split('/')[0]),int(x.split('/')[1])) )
# In[22]:
idx1 = ei[ei['indicator']=='S&P 500'].set_index('date').rename(columns = {'current_zscore':'S&P 500'})['S&P 500']
idx2 = ei[ei['indicator']=='Personal consumption expenditures'].set_index('date').rename(columns = {'current_zscore':'Personal consumption expenditures'})['Personal consumption expenditures']
idx3 = ei[ei['indicator']=='Industrial production'].set_index('date').rename(columns = {'current_zscore':'Industrial production'})['Industrial production']
idx4 = ei[ei['indicator']=='Nonfarm payrolls'].set_index('date').rename(columns = {'current_zscore':'Nonfarm payrolls'})['Nonfarm payrolls']
idx5 = ei[ei['indicator']=='Consumer price index'].set_index('date').rename(columns = {'current_zscore':'Consumer price index'})['Consumer price index']
idx6 = ei[ei['indicator']=='Real disposable personal income'].set_index('date').rename(columns = {'current_zscore':'Real disposable personal income'})['Real disposable personal income']
idx = ei[ei['indicator']=='Average of all six indicators'].set_index('date').rename(columns = {'current_zscore':'Average of all six indicators'})['Average of all six indicators']
idx_merge = pd.merge(idx1,idx2,on='date').merge(idx3, on='date').merge(idx4, on='date').merge(idx5, on='date').merge(idx6, on='date').merge(idx, on='date')
idx_merge = idx_merge.reset_index()
# ### 4.1 National Level Comovement
# In[23]:
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_win
ax.scatter(df['date'],
df['Trump'],
c= -df['Trump'])
ax.plot(df['date'],
df['Trump'],
color='blue')
ax2=ax.twinx()
df = idx_merge
ax2.plot(df['date'],
df['S&P 500'],
color='green')
ax2.scatter(df['date'],
df['S&P 500'],
c= -df['S&P 500'])
# Set title and labels for axes
ax.set_ylabel("S&P 500", color="green")
ax2.set_ylabel("Trump winning rate", color="blue")
ax.set(title = "Trump winning rate vs. S&P 500")
# Define the date format
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.show()
# * There are some comovements between Trump's winning rate and S&P 500 before the end of September
# * The correlation between Trump's winning rate and S&P 500 becomes negative since around 9/29/2020
# In[24]:
fig, ax = plt.subplots(figsize=(14, 4))
df = pnt_win
ax.scatter(df['date'],
df['Trump'],
c= -df['Trump'])
ax.plot(df['date'],
df['Trump'],
color='blue')
ax2=ax.twinx()
df = idx_merge
ax2.plot(df['date'],
df['Average of all six indicators'],
color='green')
ax2.scatter(df['date'],
df['Average of all six indicators'],
c= -df['Average of all six indicators'])
# Set title and labels for axes
ax.set_ylabel("Average of all six indicators", color="green")
ax2.set_ylabel("Trump winning rate", color="blue")
ax.set(title = "Trump winning rate vs. Average of all six indicators")
# Define the date format
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
plt.show()
# * There are some comovements between Trump's winning rate and average of indicators before the mid August
#
# ### 4.2 Comovement between Trump's State Level Winning Rate and Economic Indicators
# In[25]:
pst_merge = pst_cat.reset_index().merge(pnt_win, on = 'date').merge(idx1, on ='date')
pst_corr = pst_merge.groupby('code')['winstate_inc','S&P 500'].corr().reset_index()
pst_corr = pst_corr[pst_corr['level_1']=='S&P 500'][['code','winstate_inc']]
# In[26]:
fig = go.Figure(data=go.Choropleth(
locations=pst_sub['code'], # Spatial coordinates
z = pst_corr['winstate_inc'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'blues',
colorbar_title = "Correlation with S&P 500",))
fig.update_layout(
title_text = 'Trump winning rate correlation with stock market',
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * The states whose winning rates are highly positively correlated with S&P 500 are: OH, WI, OR...
# * The states whose winning rates are highly negatively correlated with S&P 500 are: NM, OK, KS...
# * States such as MD are not likely to be affected by stock market
# In[27]:
pst_merge = pst_cat.reset_index().merge(pnt_win, on = 'date').merge(idx, on ='date')
pst_corr = pst_merge.groupby('code')['winstate_inc','Average of all six indicators'].corr().reset_index()
pst_corr = pst_corr[pst_corr['level_1']=='Average of all six indicators'][['code','winstate_inc']]
fig = go.Figure(data=go.Choropleth(
locations=pst_sub['code'], # Spatial coordinates
z = pst_corr['winstate_inc'].astype(float), # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
colorscale = 'blues',
colorbar_title = "Correlation with Average of all six indicators",))
fig.update_layout(
title_text = 'Trump winning rate correlation with economics indicators',
geo_scope='usa', # limite map scope to USA
)
fig.show()
# * The states whose winning rates are highly positively correlated with economics indicators are: OR, OH, CT...
# * The states whose winning rates are highly negatively correlated with economics indicators: NM, OK, KS...
# * States such as NE, VT, WY are not likely to be affected by economics indicators
|
14,252 | 97146cebb2e77a2be542b75ccacc479f04ce8b14 | import sys
import pathlib
try:
PATH_HERE = pathlib.Path(__file__).parent
PATH_ABFS = PATH_HERE.joinpath("../../data/abfs/").resolve()
PATH_SRC = PATH_HERE.joinpath("../../src/").resolve()
print(PATH_SRC)
sys.path.insert(0, str(PATH_SRC))
import pyabf
except:
raise EnvironmentError()
if __name__ == "__main__":
for abf_path in pathlib.Path(PATH_ABFS).glob("*.abf"):
print(abf_path)
abf = pyabf.ABF(abf_path)
pathlib.Path(f"test-{abf.abfID}.txt").write_text(abf.headerText)
pathlib.Path(f"test-{abf.abfID}.html").write_text(abf.headerHTML)
|
14,253 | e56f24a4b50d49b40428262d76126f5690454848 | from flask import Flask
import app |
14,254 | 918e0c66a75126d922e0af4ad1abf1461e42219c | from Gui import *
g = Gui()
g.title('MyFirst GUI')
#g.widget(Button, text="trying", command=None)
g.mainloop()
|
14,255 | aeb898ec847b967652e183f13b1cbd36ca08220f | import time
from rest_framework import generics
from rest_framework import permissions
from post.models import Post
from post.serializer import PostSerializer
from utils.pagination import PostPagination
__all__ = (
'PostList',
'PostDetail',
)
class PostList(generics.ListCreateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
pagination_class = PostPagination
def perform_create(self, serializer):
serializer.save(author=self.request.user)
def list(self, request, *args, **kwargs):
time.sleep(2)
return super().list(request, *args, **kwargs)
class PostDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
|
14,256 | fa1de198efedb6f134ffbb4c4f838a297108a180 | from flask import Flask, render_template
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
app = Flask(__name__)
app.config['SECRET_KEY'] = 'thisissceret!'
class LoginForm(FlaskForm):
username = StringField('username')
password = PasswordField('password')
@app.route('/form', methods=['GET', 'POST'])
def form():
form = LoginForm()
if form.validate_on_submit():
return '<h1>The Username is {}. The Password is {}.'.format(form.username.data, form.password.data)
return render_template('index.html',form=form)
if __name__ == '__main__':
app.run(debug=True) |
14,257 | a605fefc1fb9c4cd20799280a71278cf8b2d7702 | #!/usr/bin/env python
# encoding: utf-8
"""
visitortracker.py
Created by Jeremiah Shirk on 2012-09-05.
Copyright (c) 2012 Monitis. All rights reserved.
"""
from monitis.api import get, post, MonitisError, validate_kwargs
def visitor_tracking_tests(**kwargs):
''' Get a user's visitor trackers '''
required = []
optional = []
req_args = validate_kwargs(required, optional, **kwargs)
return get(action='visitorTrackingTests', **req_args)
def visitor_tracking_info(**kwargs):
''' get information about the specified Visitor Tracker
Results are a list, but the order may differ from the docs
[siteId, url, name, monitorId], though it isn't
entirely clear.
'''
required = ['siteId']
optional = []
req_args = validate_kwargs(required, optional, **kwargs)
return get(action='visitorTrackingInfo', **req_args)
def visitor_tracking_results(**kwargs):
''' Get results of the specified Visitor Tracker '''
required = ['siteId', 'year', 'month', 'day']
optional = ['timezoneoffset']
req_args = validate_kwargs(required, optional, **kwargs)
return get(action='visitorTrackingResults', **req_args)
|
14,258 | 1db2f74ebcea3a0863e9b269e05a95db7ff95d0f | # Code by Jaime Eduardo Sttivend Velez
# Date:03/24/2019
def queens_attack(n, k, r_q, c_q, obstacles):
""" Function to calculate te attack movements possible for a queen located in
row r_q column r_c of a n size board with obstacles located on the
"obstacles" tuple array.
"""
queen = (r_q, c_q)
size = n
# Preparing a dictionary with all the distances to the obstacles or
# board limits along with the unitary vector representation of all directions.
directions = {
# Horizontals.
'u': {'unitary_vector': (1, 0), 'distance': -1}, # Up.
'd': {'unitary_vector': (-1, 0), 'distance': -1}, # Down.
# Verticals.
'l': {'unitary_vector': (0, -1), 'distance': -1}, # Left.
'r': {'unitary_vector': (0, 1), 'distance': -1}, # Right.
# Diagonals.
'ur': {'unitary_vector': (1, 1), 'distance': -1}, # Up right.
'ul': {'unitary_vector': (1, -1), 'distance': -1}, # Up left.
'dr': {'unitary_vector': (-1, 1), 'distance': -1}, # Down right.
'dl': {'unitary_vector': (-1, -1), 'distance': -1}, # Down left.
}
# For all the obstacles check if they are within the queen's reach
for pair in obstacles:
diagonal = abs(pair[0] - queen[0]) - abs(pair[1] - queen[1]) == 0
vertical = pair[0] == queen[0]
horizontal = pair[1] == queen[1]
if diagonal or vertical or horizontal:
for direction in directions.values():
substraction = vector_substraction(pair, queen)
unit = unit_vector(substraction)
# Check in which direction is the obstacle is
if direction['unitary_vector'] == unit:
# Save the distance to the obstacle only if there is no distance is been saved (-1).
if direction['distance'] == -1:
direction['distance'] = direct_distance(queen, pair)
direction['pair'] = pair
break
else:
# Or if no lesser distance has been saved.
if direction['distance'] > direct_distance(queen, pair):
direction['distance'] = direct_distance(queen, pair)
direction['pair'] = pair
break
distance_sumatory = 0
for direction in directions.items():
# For directions without recorded distance to an obstacle, the distance to the edge of the board is saved.
if direction[1]['distance'] == -1:
if direction[0] == "u":
direction[1]['distance'] = direct_distance(queen, (size + 1, queen[1]))
elif direction[0] == "d":
direction[1]['distance'] = direct_distance(queen, (0, queen[1]))
elif direction[0] == "r":
direction[1]['distance'] = direct_distance(queen, (queen[0], size + 1))
elif direction[0] == "l":
direction[1]['distance'] = direct_distance(queen, (queen[0], 0))
else:
distance_up = direct_distance(queen, (size + 1, queen[1]))
distance_down = direct_distance(queen, (0, queen[1]))
distance_right = direct_distance(queen, (queen[0], size + 1))
distance_left = direct_distance(queen, (queen[0], 0))
if direction[0] == "ur":
direction[1]['distance'] = min(distance_up, distance_right)
elif direction[0] == "ul":
direction[1]['distance'] = min(distance_up, distance_left)
elif direction[0] == "dr":
direction[1]['distance'] = min(distance_down, distance_right)
elif direction[0] == "dl":
direction[1]['distance'] = min(distance_down, distance_left)
# Sum of all the distances.
distance_sumatory += direction[1]['distance']
steps = []
for direction in directions.values():
steps += get_points_in_distance(direction["unitary_vector"], direction["distance"], queen)
print("st")
print(steps.__str__())
directions["steps"] = steps
return directions
def get_board_from_text(text):
""" Validates and returns a dictionary from an queen's attack test input text or the
corresponding validation errors.
"""
text = text.rstrip()
lines = text.split("\n")
line_board = lines[0]
# Input format validation.
if len(lines) < 2:
return "Input not valid, Insufficient number of lines."
if len(line_board.split(" ")) != 2:
return "Wrong number of parameters for board status."
line_queen = lines[1]
# Queen position format validation.
if len(line_queen.split(" ")) != 2:
return "Wrong number of parameters for queen's position."
size = int(line_board.split(" ")[0])
obstacle_number = int(line_board.split(" ")[1])
queen = (int(line_queen.split(" ")[0]), int(line_queen.split(" ")[1]))
# Input value validation.
if size < 1 or size > 10**5:
return "Board's size is not valid."
if obstacle_number < 0 or obstacle_number > 10**5:
return "Number of obstacles is not valid."
for dimension in queen:
if dimension < 0 or dimension > size:
return "Queen's location is no valid."
obstacle_lines = lines[2:]
obstacles_array = []
for obstacle_line in obstacle_lines:
obstacle = (int(obstacle_line.split(" ")[0]), int(obstacle_line.split(" ")[1]))
# Obstacles validation.
if obstacle == queen:
return "Obstacles cannot be located on the same spaces as the queen"
for dimension in obstacle:
if dimension < 0 or dimension > size:
return "Obstacle's location "+str(obstacle)+" is no valid."
obstacles_array.append(obstacle)
# Obstacles quantity validation.
if obstacle_number != len(obstacles_array):
return "Different number of obstacles :"+len(obstacles_array).__str__() + \
" than specified:"+obstacle_number.__str__()+"."
# Output dictionary.
board = {'size': size,
'obstacle_number': obstacle_number,
'queen': queen,
'obstacles': obstacles_array}
return board
def direct_distance(a, b):
""" Counts the number of positions between vector(board position) a and b. """
if a[0] == b[0]:
return abs(a[1] - b[1]) - 1
if a[1] == b[1]:
return abs(a[0] - b[0]) - 1
return abs(a[0] - b[0]) - 1
def unit_vector(vector):
""" Returns a unitary vector representation of vector."""
return 0 if vector[0] == 0 else vector[0]/abs(vector[0]), 0 if vector[1] == 0 else vector[1]/abs(vector[1])
def vector_substraction(a, b):
""" Subtracts 2 two dimensional vectors element by element.
"""
return a[0] - b[0], a[1] - b[1]
def vector_sum(a, b):
""" Subtracts 2 two dimensional vectors element by element.
"""
return a[0] + b[0], a[1] + b[1]
def vector_multiplication(a,b):
return a[0]* b, a[1]*b
def extract_text_from_file():
with open('input') as fp:
return fp.read()
def get_points_in_distance(unit_vector,distance,queen):
if distance == 0:
return []
return_vector = []
for i in range(1, distance+1):
return_vector.append(vector_sum(queen, vector_multiplication(unit_vector, i)))
print("fin x")
return return_vector
def get_distance_to_wall(distance, direction, unitvector, queen):
return 0 |
14,259 | 5fb1bebe46e751730bb5efaed950e2a1d6bcee4e | # Generated by Django 2.2.13 on 2020-07-01 17:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('indexer', '0002_indexrun_indexrunerror'),
]
operations = [
migrations.AlterField(
model_name='indexrun',
name='object_type',
field=models.CharField(blank=True, choices=[('agent', 'Agent'), ('collection', 'Collection'), ('object', 'Object'), ('term', 'Term')], max_length=100, null=True),
),
]
|
14,260 | 74e117393b8b2a18f6c21f53e4d0bb3cd4adafec | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Problem definition:
Write three functions that compute the sum of the numbers in a given list using
a for-loop, a while-loop, and recursion.
"""
import sys
def calculate_sum_using_for_loop(numbers):
result = 0
for number in numbers:
result += number
return result
def calculate_sum_using_while_loop(numbers):
result = 0
index = 0
while index < len(numbers):
result += numbers[index]
index += 1
return result
def calculate_sum_using_recursion(numbers):
if len(numbers) == 1:
return numbers[0]
else:
return numbers[0] + calculate_sum_using_recursion(numbers[1:])
def main():
numbers = [1, 5, 6, 31, 55, 0, -5, 6]
result_using_for_loop = calculate_sum_using_for_loop(numbers)
result_using_while_loop = calculate_sum_using_while_loop(numbers)
result_using_recursion = calculate_sum_using_recursion(numbers)
assert result_using_for_loop == result_using_while_loop == result_using_recursion, \
"Results form sum operations do not match!!: " \
"For loop: {}, While loop {}, Recursion {}".format(result_using_for_loop,
result_using_while_loop,
result_using_recursion)
if __name__ == '__main__':
sys.exit(main())
|
14,261 | 24576904c878f10e016801455d0fba04f8540b0d | #Prints the whole google page
import urllib.request
page = urllib.request.urlopen('https://google.com')
print(page.read()) |
14,262 | 15a92fce6410e392790cdef8ba6009128b752e42 | ### Author: Roholt
### 10-2020
from ConfigLoader import ConfigLoader
from KeyboardWatcher import KeyboardWatcher
def main():
settings = ConfigLoader("./config.yml").config
keyboardWatcher = KeyboardWatcher()
for setting in settings:
keyboardWatcher.addShortcut(setting["shortcut"], setting["script"], setting["suppress"])
keyboardWatcher.run()
if __name__ == "__main__":
main()
|
14,263 | 63ecd260dbe8b8709b7004ea67f66f1de40ba836 | # Hecho por William, si vas a ver este codigo espero que mi documentacion sea de ayuda :D
# Importancion de las librerias
from flask import Flask
from flask import render_template
from flask import request
from flask import url_for
from flask import redirect
from flask import flash
from flask import session
from flask import make_response
from flask_mysqldb import MySQL
from flask_mail import Mail, Message
import pygal
import pdfkit
# Intancia de la app de flask
app = Flask(__name__)
mail = Mail(app)
# Mysql conexiones
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'protesis'
mysql = MySQL(app)
# Consiguraciones de la app para los correos
app.config['DEBUG'] = True
app.config['TESTING'] = False
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
#app.config['MAIL_DEBUG'] = True
app.config['MAIL_USERNAME'] = ''
app.config['MAIL_PASSWORD'] = None
app.config['MAIL_DEFAULT_SENDER'] = ''
app.config['MAIL_MAX_EMAILS'] = None
#app.config['MAIL_SUPPRESS_SEND'] = False
app.config['MAIL_ASCII_ATTACHMENTS'] = False
mail = Mail(app)
# pruebas de correo
@app.route("/correo")
def correo():
msg = Message("Hello", recipients=["haseke4061@lerwfv.com"])
msg.body = ("Hola qeu tal")
print(msg)
mail.send(msg)
return 'Correo enviado'
# Opciones es la llave para la sessiones
app.secret_key = 'llaveSecreta'
# Funcion que redireciona al login
@app.route('/')
def login():
return render_template('login.html')
# Funcion de validar el usuario en el login
@app.route('/validar', methods=['GET', 'POST'])
def validar():
if request.method == 'POST':
usuario = request.form['cedula']
contraseña = request.form['pass']
cur = mysql.connection.cursor()
cur.execute(
'SELECT * FROM user WHERE cedula = %s AND pass = %s', (usuario, contraseña))
mysql.connection.commit()
data = cur.fetchall()
if len(data) > 0:
cuenta = data[0]
if (cuenta[0]) == 1:
# Registra la sesscion para el usuario
session['user'] = usuario
session['pass'] = contraseña
return redirect(url_for('inicioUser'))
elif (cuenta[0]) == 2:
# Registra la sesscion para el doctor
session['user'] = usuario
session['pass'] = contraseña
return redirect(url_for('inicioDoc'))
else:
# Registra la sesscion para el doctor
session['user'] = usuario
session['pass'] = contraseña
return redirect(url_for('inicioEnfermera'))
else:
return redirect(url_for('login'))
# Funcion de cerrar session
@app.route('/logout')
def logout():
# Elimina la sesion
session.pop('user', None)
session.pop('pass', None)
return redirect(url_for('login'))
# Inicio de seciones del paciente ----------------------------------------------
# Funcion que redireciona a agendar cita
@app.route('/agendar')
def agendar():
if 'user' in session:
usuario = session['user']
return render_template('agendar.html', user=usuario)
else:
return render_template('login.html')
# Funcion de agregar una cita del usuario
@app.route('/agregarCita', methods=['POST'])
def agregarCita():
if request.method == 'POST':
nombre = request.form['nombre']
apellido = request.form['apellido']
cedula = request.form['cedula']
email = request.form['email']
dia = request.form['dia']
hora = request.form['hora']
cur = mysql.connection.cursor()
cur.execute('INSERT INTO citas (nombre, apellido, cedula, email, dia, hora) VALUES (%s, %s, %s, %s, %s, %s)',
(nombre, apellido, cedula, email, dia, hora))
mysql.connection.commit()
flash('Cita Agregada Satisfactoriamente')
return redirect(url_for('agendar'))
# Funcion que redireciona al inicio del usuario
@app.route('/inicioUser')
def inicioUser():
if 'user' in session:
usuario = session['user']
return render_template('inicioUser.html', user=usuario)
else:
return render_template('login.html')
# Funcion que redireciona a los servicios
@app.route('/servicios')
def servicios():
return render_template('servicios.html')
# Funcion que redireciona a la galeria
@app.route('/galeria')
def galeria():
if 'user' in session:
usuario = session['user']
return render_template('galeria.html', user=usuario)
else:
return render_template('login.html')
# Final de las seciones del paciente ---------------------------------------------------------
# Inicio de Secion de enfermeras --------------------------------------------------------------
# Funcion de la pagina del inicio del doctor
@app.route('/inicioEnfermera')
def inicioEnfermera():
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM citas')
cita = cur.fetchall()
return render_template('inicioEnfermera.html', citas=cita)
else:
return render_template('login.html')
# Funcion que redireciona a registar pacientes
@app.route('/registrarPaciente')
def registrarPaciente():
if 'user' in session:
usuario = session['user']
return render_template('registrarPaciente.html', user=usuario)
else:
return render_template('login.html')
# Funcion de registrar una cita de parte de la enfermera
@app.route('/agregarPaciente', methods=['POST'])
def agregarPaciente():
if request.method == 'POST':
# ---------------------------
# General del Paciente
# ---------------------------
# ---------------------------
# Datos de identificacion
# ---------------------------
cedula = request.form['cedula']
numeroSS = request.form['numeroSS']
nacionalidad = request.form['nacionalidad']
# ---------------------------
# Datos personal del paciente
# ---------------------------
primerNombre = request.form['primerNombre']
segundoNombre = request.form['segundoNombre']
primerApellido = request.form['primerApellido']
segundoApellido = request.form['segundoApellido']
sexo = request.form['sexo']
estadoCivil = request.form['estadoCivil']
tipodeSangre = request.form['tipodeSangre']
temperatura = request.form['temperatura']
edad = request.form['edad']
estatura = request.form['estatura']
peso = request.form['peso']
fechadeNacimiento = request.form['fechadeNacimiento']
# ---------------------------
# Contacto / Dirección del paciente
# ---------------------------
provincia = request.form['provincia']
distrito = request.form['distrito']
corregimiento = request.form['corregimiento']
direcionDetallada = request.form['direcionDetallada']
telefonodeCasa = request.form['telefonodeCasa']
telefonoCelular = request.form['telefonoCelular']
telefonodeTrabajo = request.form['telefonodeTrabajo']
email = request.form['email']
# ---------------------------
# Datos universitarios
# ---------------------------
tipodePaciente = request.form['tipodePaciente']
facultad = request.form['facultad']
centro = request.form['centro']
instancia = request.form['instancia']
# ---------------------------
# Antecedentes
# ---------------------------
# ---------------------------
# Patologicos
# ---------------------------
enfermedades = request.form['enfermedades']
medicamentos = request.form['medicamentos']
alergias = request.form['alergias']
cirugiaPrevia = request.form['cirugiaPrevia']
hospitalPrevia = request.form['hospitalPrevia']
accidentes = request.form['accidentes']
transfucion = request.form['transfucion']
# ---------------------------
# No patologicos
# ---------------------------
tabaco = request.form['tabaco']
cantidadTabaco = request.form['cantidadTabaco']
alcohol = request.form['alcohol']
cantidadAlcohol = request.form['cantidadAlcohol']
droga = request.form['droga']
cantidadDroga = request.form['cantidadDroga']
# ---------------------------
# Familiares
# ---------------------------
antecedentePadre = request.form['antecedentePadre']
antecedenteMadre = request.form['antecedenteMadre']
antecedenteHermano = request.form['antecedenteHermano']
abuelosMaternos = request.form['abuelosMaternos']
abuelosPaternos = request.form['abuelosPaternos']
tiosMaternos = request.form['tiosMaternos']
tiosPaternos = request.form['tiosPaternos']
# ---------------------------
# Gineco-obstétricos
# ---------------------------
menarcaEdad = request.form['menarcaEdad']
fechaMestruacion = request.form['fechaMestruacion']
fechaUltimoParto = request.form['fechaUltimoParto']
gestas = request.form['gestas']
partos = request.form['partos']
abortos = request.form['abortos']
cesarea = request.form['cesarea']
cur = mysql.connection.cursor()
cur.execute('INSERT INTO generalpaciente (cedula, numeroSS, nacionalidad, primerNombre, segundoNombre, primerApellido, segundoApellido, sexo, estadoCivil, tipodeSangre, temperatura, edad, estatura, peso, fechadeNacimiento, provincia, distrito, corregimiento, direcionDetallada, telefonodeCasa, telefonoCelular, telefonodeTrabajo, email, tipodePaciente, facultad, centro, instancia, enfermedades, medicamentos, alergias, cirugiaPrevia, hospitalPrevia, accidentes, transfucion, tabaco, cantidadTabaco, alcohol, cantidadAlcohol, droga, cantidadDroga, antecedentePadre, antecedenteMadre, antecedenteHermano, abuelosMaternos, abuelosPaternos, tiosMaternos, tiosPaternos, menarcaEdad, fechaMestruacion, fechaUltimoParto, gestas, partos, abortos, cesarea) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',
(cedula, numeroSS, nacionalidad, primerNombre, segundoNombre, primerApellido, segundoApellido, sexo, estadoCivil, tipodeSangre, temperatura, edad, estatura, peso, fechadeNacimiento, provincia, distrito, corregimiento, direcionDetallada, telefonodeCasa, telefonoCelular, telefonodeTrabajo, email, tipodePaciente, facultad, centro, instancia, enfermedades, medicamentos, alergias, cirugiaPrevia, hospitalPrevia, accidentes, transfucion, tabaco, cantidadTabaco, alcohol, cantidadAlcohol, droga, cantidadDroga, antecedentePadre, antecedenteMadre, antecedenteHermano, abuelosMaternos, abuelosPaternos, tiosMaternos, tiosPaternos, menarcaEdad, fechaMestruacion, fechaUltimoParto, gestas, partos, abortos, cesarea))
mysql.connection.commit()
flash('Paciente Registrado Satisfactoriamente')
return redirect(url_for('registrarPaciente'))
# Funcion de aceptar una cita de parte de la enfermera
@app.route('/aceptarCita/<id>')
def aceptarCita(id):
cur = mysql.connection.cursor()
cur.execute('DELETE FROM citas WHERE id = {0}'.format(id))
mysql.connection.commit()
flash('Cita Aceptada Satisfactoriamente')
return redirect(url_for('inicioEnfermera'))
# Funcion para elimnar una cita de parte de la enfermera
@app.route('/eliminarCita/<string:id>')
def eliminarCita(id):
cur = mysql.connection.cursor()
cur.execute('DELETE FROM citas WHERE id = {0}'.format(id))
mysql.connection.commit()
flash('Cita Eliminada Satisfactoriamente')
return redirect(url_for('inicioEnfermera'))
# Final de seciones de la enfermera -------------------------------
# Sesion de los Doctores ------------------------------------------
# Funcion del inicio del doctor
@app.route('/inicioDoc')
def inicioDoc():
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM generalpaciente')
paciente = cur.fetchall()
return render_template('inicioDoc.html', pacientes=paciente)
else:
return render_template('login.html')
# Funcion del inicio del doctor para poder ver los pacientes
@app.route('/inicioDocVer/<id>')
def inicioDocVer(id):
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM generalpaciente where id = {0}'.format(id))
paciente = cur.fetchall()
return render_template('inicioDocVer.html', pacientes=paciente)
else:
return render_template('login.html')
# Funcion del inicio del doctor para editar los pacientes ingresar la modificacion
@app.route('/inicioDocEdit/<id>')
def inicioDocEdit(id):
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM generalpaciente where id = {0}'.format(id))
paciente = cur.fetchall()
return render_template('inicioDocEdit.html', pacientes=paciente)
else:
return render_template('login.html')
# Funcion del inicio del doctor para editar los pacientes realizar la modificacion esta es la funcion del uptade
@app.route('/inicioDocEditModi/<id>', methods=['POST'])
def inicioDocEditModi(id):
if 'user' in session:
if request.method == 'POST':
# ---------------------------
# General del Paciente
# ---------------------------
# ---------------------------
# Datos de identificacion
# ---------------------------
cedula = request.form['cedula']
numeroSS = request.form['numeroSS']
nacionalidad = request.form['nacionalidad']
# ---------------------------
# Datos personal del paciente
# ---------------------------
primerNombre = request.form['primerNombre']
segundoNombre = request.form['segundoNombre']
primerApellido = request.form['primerApellido']
segundoApellido = request.form['segundoApellido']
sexo = request.form['sexo']
estadoCivil = request.form['estadoCivil']
tipodeSangre = request.form['tipodeSangre']
temperatura = request.form['temperatura']
edad = request.form['edad']
estatura = request.form['estatura']
peso = request.form['peso']
fechadeNacimiento = request.form['fechadeNacimiento']
# ---------------------------
# Contacto / Dirección del paciente
# ---------------------------
provincia = request.form['provincia']
distrito = request.form['distrito']
corregimiento = request.form['corregimiento']
direcionDetallada = request.form['direcionDetallada']
telefonodeCasa = request.form['telefonodeCasa']
telefonoCelular = request.form['telefonoCelular']
telefonodeTrabajo = request.form['telefonodeTrabajo']
email = request.form['email']
# ---------------------------
# Datos universitarios
# ---------------------------
tipodePaciente = request.form['tipodePaciente']
facultad = request.form['facultad']
centro = request.form['centro']
instancia = request.form['instancia']
# ---------------------------
# Antecedentes
# ---------------------------
# ---------------------------
# Patologicos
# ---------------------------
enfermedades = request.form['enfermedades']
medicamentos = request.form['medicamentos']
alergias = request.form['alergias']
cirugiaPrevia = request.form['cirugiaPrevia']
hospitalPrevia = request.form['hospitalPrevia']
accidentes = request.form['accidentes']
transfucion = request.form['transfucion']
# ---------------------------
# No patologicos
# ---------------------------
tabaco = request.form['tabaco']
cantidadTabaco = request.form['cantidadTabaco']
alcohol = request.form['alcohol']
cantidadAlcohol = request.form['cantidadAlcohol']
droga = request.form['droga']
cantidadDroga = request.form['cantidadDroga']
# ---------------------------
# Familiares
# ---------------------------
antecedentePadre = request.form['antecedentePadre']
antecedenteMadre = request.form['antecedenteMadre']
antecedenteHermano = request.form['antecedenteHermano']
abuelosMaternos = request.form['abuelosMaternos']
abuelosPaternos = request.form['abuelosPaternos']
tiosMaternos = request.form['tiosMaternos']
tiosPaternos = request.form['tiosPaternos']
# ---------------------------
# Gineco-obstétricos
# ---------------------------
menarcaEdad = request.form['menarcaEdad']
fechaMestruacion = request.form['fechaMestruacion']
fechaUltimoParto = request.form['fechaUltimoParto']
gestas = request.form['gestas']
partos = request.form['partos']
abortos = request.form['abortos']
cesarea = request.form['cesarea']
cur = mysql.connection.cursor()
cur.execute("""
UPDATE generalpaciente
SET cedula = %s,
numeroSS = %s,
nacionalidad = %s,
primerNombre = %s,
segundoNombre = %s,
primerApellido = %s,
segundoApellido = %s,
sexo = %s,
estadoCivil = %s,
tipodeSangre = %s,
temperatura = %s,
edad = %s,
estatura = %s,
peso = %s,
fechadeNacimiento = %s,
provincia = %s,
distrito = %s,
corregimiento = %s,
direcionDetallada = %s,
telefonodeCasa = %s,
telefonoCelular = %s,
telefonodeTrabajo = %s,
email = %s,
tipodePaciente = %s,
facultad = %s,
centro = %s,
instancia = %s,
enfermedades = %s,
medicamentos = %s,
alergias = %s,
cirugiaPrevia = %s,
hospitalPrevia = %s,
accidentes = %s,
transfucion = %s,
tabaco = %s,
cantidadTabaco = %s,
alcohol = %s,
cantidadAlcohol = %s,
droga = %s,
cantidadDroga = %s,
antecedentePadre = %s,
antecedenteMadre = %s,
antecedenteHermano = %s,
abuelosMaternos = %s,
abuelosPaternos = %s,
tiosMaternos = %s,
tiosPaternos = %s,
menarcaEdad = %s,
fechaMestruacion = %s,
fechaUltimoParto = %s,
gestas = %s,
partos = %s,
abortos = %s,
cesarea = %s
WHERE id = %s
""", (cedula, numeroSS, nacionalidad, primerNombre, segundoNombre, primerApellido,
segundoApellido, sexo, estadoCivil, tipodeSangre, temperatura, edad,
estatura, peso, fechadeNacimiento, provincia, distrito, corregimiento,
direcionDetallada, telefonodeCasa, telefonoCelular, telefonodeTrabajo, email, tipodePaciente,
facultad, centro, instancia, enfermedades, medicamentos, alergias,
cirugiaPrevia, hospitalPrevia, accidentes, transfucion, tabaco, cantidadTabaco,
alcohol, cantidadAlcohol, droga, cantidadDroga, antecedentePadre, antecedenteMadre,
antecedenteHermano, abuelosMaternos, abuelosPaternos, tiosMaternos, tiosPaternos, menarcaEdad,
fechaMestruacion, fechaUltimoParto, gestas, partos, abortos, cesarea, id))
mysql.connection.commit()
flash('Paciente Editado Satisfactoriamente')
return redirect(url_for('inicioDoc'))
else:
return render_template('login.html')
# Funcion para eliminar paciente de parte de la doctora
@app.route('/eliminarPaciente/<string:id>')
def eliminarPaciente(id):
cur = mysql.connection.cursor()
cur.execute('DELETE FROM generalpaciente WHERE id = {0}'.format(id))
mysql.connection.commit()
flash('Paciente Eliminado Satisfactoriamente')
return redirect(url_for('inicioDoc'))
# Funcion de archivo clinico
@app.route('/archivoClinico')
def archivoClinico():
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM citas')
cita = cur.fetchall()
return render_template('archivoClinico.html', citas=cita)
else:
return render_template('login.html')
# Funcion regitrar un archivo clinico
@app.route('/agregarArchivo', methods=['POST'])
def agregarArchivo():
if 'user' in session:
if request.method == 'POST':
paciente = request.form['paciente']
cedula = request.form['cedula']
numeross = request.form['numeroSS']
fecha = request.form['fecha']
afeccion = request.form['afeccion']
cie = request.form['cie']
diagnostico = request.form['diagnostico']
tratamiento = request.form['tratamiento']
procedimiento = request.form['procedimiento']
cur = mysql.connection.cursor()
cur.execute('INSERT INTO archivoclinico (paciente, cedula, numeross, fecha, afeccion, cie, diagnostico, tratamiento, procedimiento) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)',
(paciente, cedula, numeross, fecha, afeccion, cie, diagnostico, tratamiento, procedimiento))
mysql.connection.commit()
flash('Archivo Clínico Agregada Satisfactoriamente')
return redirect(url_for('archivoClinico'))
else:
return render_template('login.html')
# Funcion de mostra el menu de los archivos clinicos
@app.route('/archivoClinicoMenu')
def archivoClinicoMenu():
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM archivoclinico')
paciente = cur.fetchall()
return render_template('archivoClinicoMenu.html', pacientes=paciente)
else:
return render_template('login.html')
# Funcion de ver los archivos clinicos
@app.route('/archivoClinicoVer/<id>')
def archivoClinicoVer(id):
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM archivoclinico where id = {0}'.format(id))
archi = cur.fetchall()
return render_template('archivoClinicoVer.html', archivos=archi)
else:
return render_template('login.html')
# Funcion de ver las fererencias con el id
@app.route('/archivoClinicoEdit/<id>')
def archivoClinicoEdit(id):
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM archivoclinico where id = {0}'.format(id))
archi = cur.fetchall()
return render_template('archivoClinicoEdit.html', archivos=archi)
else:
return render_template('login.html')
# Funcion de ver las fererencias con el id
@app.route('/archivoClinicoEditModi/<id>', methods=['POST'])
def archivoClinicoEditModi(id):
if 'user' in session:
if request.method == 'POST':
paciente = request.form['paciente']
cedula = request.form['cedula']
numeross = request.form['numeroSS']
fecha = request.form['fecha']
afeccion = request.form['afeccion']
cie = request.form['cie']
diagnostico = request.form['diagnostico']
tratamiento = request.form['tratamiento']
procedimiento = request.form['procedimiento']
cur = mysql.connection.cursor()
cur.execute("""
UPDATE archivoclinico
SET paciente = %s,
cedula = %s,
numeross = %s,
fecha = %s,
afeccion = %s,
cie = %s,
diagnostico = %s,
tratamiento = %s,
procedimiento = %s
WHERE id = %s
""", (paciente, cedula, numeross, fecha, afeccion, cie, diagnostico, tratamiento, procedimiento, id))
mysql.connection.commit()
flash('Archivo Clínico Editada Satisfactoriamente')
return redirect(url_for('archivoClinicoMenu'))
else:
return render_template('login.html')
# Funcion para eliminar referencias
@app.route('/archivoClinicoEli/<string:id>')
def archivoClinicoEli(id):
cur = mysql.connection.cursor()
cur.execute('DELETE FROM archivoclinico WHERE id = {0}'.format(id))
mysql.connection.commit()
flash('Archivo Clínico Eliminada Satisfactoriamente')
return redirect(url_for('archivoClinicoMenu'))
# Funcion de las hoja de referencias
@app.route('/referencia')
def referencia():
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM citas')
cita = cur.fetchall()
return render_template('referencia.html', citas=cita)
else:
return render_template('login.html')
# Funcion de agregar una referencia de parte del doctor al paciente
@app.route('/agregarRefe', methods=['POST'])
def agregarRefe():
if request.method == 'POST':
paciente = request.form['paciente']
doctor = request.form['doctor']
referir = request.form['referir']
fecha = request.form['fecha']
sintomas = request.form['sintomas']
tratamiento = request.form['tratamiento']
examenfisico = request.form['examenfisico']
diagnostico = request.form['diagnostico']
laboratorio = request.form['laboratorio']
cur = mysql.connection.cursor()
cur.execute('INSERT INTO referencia (paciente, doctor, refiere, fecha, sintomas, tratamiento, examenfisico, diagnostico, laboratorios) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)',
(paciente, doctor, referir, fecha, sintomas, tratamiento, examenfisico, diagnostico, laboratorio))
mysql.connection.commit()
flash('Referencia Agregada Satisfactoriamente')
return redirect(url_for('referencia'))
# Funcion de mostra el menu de las referencias
@app.route('/referenciaMenu')
def referenciaMenu():
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM referencia')
paciente = cur.fetchall()
return render_template('referenciaMenu.html', pacientes=paciente)
else:
return render_template('login.html')
# Funcion de ver las fererencias con el id
@app.route('/referenciaVer/<id>')
def referenciaVer(id):
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM referencia where id = {0}'.format(id))
refe = cur.fetchall()
return render_template('referenciaVer.html', referencia=refe)
else:
return render_template('login.html')
# Funcion de ver las fererencias con el id
@app.route('/referenciaEdit/<id>')
def referenciaEdit(id):
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM referencia where id = {0}'.format(id))
refe = cur.fetchall()
return render_template('referenciaEdit.html', referencia=refe)
else:
return render_template('login.html')
# Funcion de ver las fererencias con el id
@app.route('/referenciaEditModi/<id>', methods=['POST'])
def referenciaEditModi(id):
if 'user' in session:
if request.method == 'POST':
paciente = request.form['paciente']
doctor = request.form['doctor']
referir = request.form['referir']
fecha = request.form['fecha']
sintomas = request.form['sintomas']
tratamiento = request.form['tratamiento']
examenfisico = request.form['examenfisico']
diagnostico = request.form['diagnostico']
laboratorio = request.form['laboratorio']
cur = mysql.connection.cursor()
cur.execute("""
UPDATE referencia
SET paciente = %s,
doctor = %s,
refiere = %s,
fecha = %s,
sintomas = %s,
tratamiento = %s,
examenfisico = %s,
diagnostico = %s,
laboratorios = %s
WHERE id = %s
""", (paciente, doctor, referir, fecha, sintomas, tratamiento, examenfisico, diagnostico, laboratorio, id))
mysql.connection.commit()
flash('Referencia Editada Satisfactoriamente')
return redirect(url_for('referenciaMenu'))
else:
return render_template('login.html')
# Funcion para eliminar referencias
@app.route('/referenciaEli/<string:id>')
def referenciaEli(id):
cur = mysql.connection.cursor()
cur.execute('DELETE FROM referencia WHERE id = {0}'.format(id))
mysql.connection.commit()
flash('Referencia Eliminada Satisfactoriamente')
return redirect(url_for('referenciaMenu'))
# Funcion de recetar los laboratorios
@app.route('/recetarLab')
def recetarLab():
if 'user' in session:
return render_template('recetarLab.html')
else:
return render_template('login.html')
# Funcion de agregar una receta de laboratorio
@app.route('/agregarLab', methods=['POST'])
def agregarLab():
if request.method == 'POST':
paciente = request.form['paciente']
cedula = request.form['cedula']
numeroSS = request.form['numeroSS']
fecha = request.form['fecha']
parasitologia = request.form['parasitologia']
urinalisis = request.form['urinalisis']
bacteriologia = request.form['bacteriologia']
hematologia = request.form['hematologia']
serologia = request.form['serologia']
cur = mysql.connection.cursor()
cur.execute('INSERT INTO laboratorios (paciente, cedula, numeroSS, fecha, parasitologia, urinalisis, bacteriologia, hematologia, serologia) VALUES ( %s, %s, %s, %s, %s, %s, %s, %s, %s)',
( paciente, cedula, numeroSS, fecha, parasitologia, urinalisis, bacteriologia, hematologia, serologia))
mysql.connection.commit()
flash('Receta de Laboratorio Agregada Satisfactoriamente')
return redirect(url_for('recetarLab'))
# Funcion de mostra el menu de las recetas de los laboratorios
@app.route('/recetarLabMenu')
def recetarLabMenu():
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM laboratorios')
laboratorios = cur.fetchall()
return render_template('recetarLabMenu.html', laboratorios=laboratorios)
else:
return render_template('login.html')
# Funcion de ver las recetas del laboratorio conel id
@app.route('/recetarLabVer/<id>')
def recetarLabVer(id):
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM laboratorios where id = {0}'.format(id))
laboratorios = cur.fetchall()
return render_template('recetarLabVer.html', laboratorios=laboratorios)
else:
return render_template('login.html')
# Funcion de editar las recetas de los laboratorios
@app.route('/recetarLabEdit/<id>')
def recetarLabEdit(id):
if 'user' in session:
cur = mysql.connection.cursor()
cur.execute('SELECT * FROM laboratorios where id = {0}'.format(id))
laboratorios = cur.fetchall()
return render_template('recetarLabEdit.html', laboratorios=laboratorios)
else:
return render_template('login.html')
# Funcion de ejecutar la modificaciones de la recetas
@app.route('/recetarLabEditModi/<id>', methods=['POST'])
def recetarLabEditModi(id):
if 'user' in session:
if request.method == 'POST':
paciente = request.form['paciente']
cedula = request.form['cedula']
numeroSS = request.form['numeroSS']
fecha = request.form['fecha']
parasitologia = request.form['parasitologia']
urinalisis = request.form['urinalisis']
bacteriologia = request.form['bacteriologia']
hematologia = request.form['hematologia']
serologia = request.form['serologia']
cur = mysql.connection.cursor()
cur.execute("""
UPDATE laboratorios
SET paciente = %s,
cedula = %s,
numeroSS = %s,
fecha = %s,
parasitologia = %s,
urinalisis = %s,
bacteriologia = %s,
hematologia = %s,
serologia = %s
WHERE id = %s
""", (paciente, cedula, numeroSS, fecha, parasitologia, urinalisis, bacteriologia, hematologia, serologia, id))
mysql.connection.commit()
flash('Laboratorio Editada Satisfactoriamente')
return redirect(url_for('recetarLabMenu'))
else:
return render_template('login.html')
# Funcion para eliminar recetas de laboratorios
@app.route('/recetarLabEli/<string:id>')
def recetarLabEli(id):
cur = mysql.connection.cursor()
cur.execute('DELETE FROM laboratorios WHERE id = {0}'.format(id))
mysql.connection.commit()
flash('Receta Eliminada Satisfactoriamente')
return redirect(url_for('recetarLabMenu'))
# Funcion del documentos clinicos
@app.route('/docuClinico')
def docuClinico():
if 'user' in session:
return render_template('docuClinico.html')
else:
return render_template('login.html')
# Funcion de las estadisticas
@app.route('/estadis')
def estadis():
if 'user' in session:
# Lineal
line_chart = pygal.Line()
line_chart.title = 'No. Pacientes'
line_chart.x_labels = ['Ene', 'Feb', 'Mar', 'Abr', 'May', 'Jun', 'Jul', 'Ago', 'Sep', 'Oct', 'Nov', 'Dic']
line_chart.add('Sistemas', [7.0, 6.9, 9.5, 14.5, 18.4, 21.5, 25.2, 26.5, 23.3, 18.3, 13.9, 9.6])
line_chart.add('Civil', [3.9, 4.2, 5.7, 8.5, 11.9, 15.2, 17.0, 16.6, 14.2, 10.3, 6.6, 4.8])
line_chart.add('Mecanica', [9.0, 5.2, 7.7, 9.5, 1.9, 25.2, 18.0, 16.6, 14.2, 11.3, 7.6, 9.8])
line_chart.add('Industrial', [7.0, 6.0, 9.7, 6.5, 4.9, 10.2, 1.0, 9.6, 12.2, 9.3, 10.6, 8.8])
line_chart.add('Ciencia y Tec.', [2.0, 5.0, 3.7, 2.5, 2.9, 7.0, 3.0, 7.6, 9.2, 7.0, 8.6, 6.8])
line_chart.add('Electrica', [5.0, 3.0, 2.7, 7.3, 2.0, 7.0, 5.3, 2.1, 2.2, 3.1, 4.6, 4.0])
line_chart = line_chart.render_data_uri()
# Barra
bar = pygal.Bar()
bar.title = 'Estudiantes Por Facultad'
bar.x_labels = ['Ene', 'Feb', 'Mar', 'Abr', 'May', 'Jun', 'Jul', 'Ago', 'Sep', 'Oct', 'Nov', 'Dic']
bar.add('Sistemas', [7.0, 6.9, 9.5, 14.5, 18.4, 21.5, 25.2, 26.5, 23.3, 18.3, 13.9, 9.6])
bar.add('Civil', [3.9, 4.2, 5.7, 8.5, 11.9, 15.2, 17.0, 16.6, 14.2, 10.3, 6.6, 4.8])
bar.add('Mecanica', [9.0, 5.2, 7.7, 9.5, 1.9, 25.2, 18.0, 16.6, 14.2, 11.3, 7.6, 9.8])
bar.add('Industrial', [7.0, 6.0, 9.7, 6.5, 4.9, 10.2, 1.0, 9.6, 12.2, 9.3, 10.6, 8.8])
bar.add('Ciencia y Tec.', [2.0, 5.0, 3.7, 2.5, 2.9, 7.0, 3.0, 7.6, 9.2, 7.0, 8.6, 6.8])
bar.add('Electrica', [5.0, 3.0, 2.7, 7.3, 2.0, 7.0, 5.3, 2.1, 2.2, 3.1, 4.6, 4.0])
bar = bar.render_data_uri()
# Pie
pie_chart = pygal.Pie()
pie_chart.title = 'Afecciones Mas Comunes'
pie_chart.add('Resfriados', 45.0)
pie_chart.add('Alergias', 26.8)
pie_chart.add('Diabetes', 8.5)
pie_chart.add('Virus', 12.8)
pie_chart.add('Heridas y moretones', 6.2)
pie_chart.add('Otras', 0.8)
pie_chart = pie_chart.render_data_uri()
return render_template('estadis.html', line_chart=line_chart, bar=bar, pie_chart=pie_chart)
else:
return render_template('login.html')
# Cosa que aun no he tocado de aqui hacia abajo --------------------------
# Funcion de los certificados de buena salud
@app.route('/certificado')
def certificado():
if 'user' in session:
return render_template('certificado.html')
else:
return render_template('login.html')
# Funcion de generar el pdf para el certificado de buena salud
@app.route('/certificadoPDF/<name>/<ubi>')
def certificadoPDF(name, ubi):
config = pdfkit.configuration(wkhtmltopdf='C:/Program Files/wkhtmltopdf/bin/wkhtmltopdf.exe')
rendered = render_template('certificadoAsisPDF.html', name=name, ubi=ubi)
#pdf = pdfkit.from_string(rendered, False)
pdf = pdfkit.from_string(rendered, False, configuration=config)
response = make_response(pdf)
response.headers['Content-Type'] = 'application/pdf'
response.headers['Content-Disposition'] = 'inline; filename=salida.pdf'
flash('Certificado Generada Satisfactoriamente')
return rendered
# Funcion de menu de las constancia
@app.route('/constancia')
def constancia():
if 'user' in session:
return render_template('constancia.html')
else:
return render_template('login.html')
# Funcion de mostra la forma de generar la costancia de asistencia
@app.route('/constanciaAsis')
def constanciaAsis():
if 'user' in session:
return render_template('constanciaAsis.html')
else:
return render_template('login.html')
# Funcion de mostra la forma de generar la costancia de incapacidad
@app.route('/constanciaInca')
def constanciaInca():
if 'user' in session:
return render_template('constanciaInca.html')
else:
return render_template('login.html')
# Final de la seccion de los doctores ------------------------------------------------
if __name__ == '__main__':
app.run(debug=True)
|
14,264 | 6570b8fbdadbaaa6e2263932d148b53f10073a5c | from graph.Node import Node
class Graph:
def __init__(self):
self.graph = {}
self.nodes_no = 0
self.nodes = []
def create(self, sinks):
for sink in iter(sinks):
self.process_operator(sink)
def process_operator(self, operator):
self.add_node(operator.kind, operator.id, operator)
if len(operator.previous) > 0:
for parent in operator.previous:
if parent:
self.add_node(parent.kind, parent.id, parent)
self.add_link(operator.id, parent.id, 1)
self.process_operator(parent)
def add_node(self, name, id, operator):
if id in self.nodes:
return
self.nodes_no += 1
self.nodes.append(id)
new_node = Node(name, id, operator)
self.graph[id] = new_node
def add_link(self, id_child, id_parent, e):
#print("id_child: ", id_child)
#print("id_parent: ", id_parent)
if id_child in self.nodes:
#print("existe hijo")
if id_parent in self.nodes:
#print("existe padre")
self.graph[id_child].add_predecessor(id_parent, e)
self.graph[id_parent].add_successor(id_child, e)
def print_adjlist(self):
for key in self.graph:
print("Node: ", self.graph[key].kind, " - ", key)
for key2 in self.graph[key].predecessors:
print("Papi: ", self.graph[key2].kind, " - ", self.graph[key].predecessors[key2], " - ", key2)
for key2 in self.graph[key].successors:
print("Hijo: ", self.graph[key2].kind, " - ", self.graph[key].successors[key2], " - ", key2)
def get_node(self, id):
#print("looking for id: ", id)
return self.graph[id] |
14,265 | e80a555c879bddef72373c0d7997a2d144d9fea8 | #!/usr/bin/python
import sys, re, codecs
'''
Created by Raj Nath, 24th Sept 2016
Purpose: Created to get the text from the conll format file
Usage: python prep_test_data.py TEXT.conll
'''
if len(sys.argv) !=2:
print 'Usage: python', sys.argv[0], 'TEXT.conll'
exit()
fname = sys.argv[1]
fin = codecs.open(fname, 'r', 'utf-8')
fwords = codecs.open(fname+'.temp', 'w', 'utf-8')
#ftags = codecs.open(fname+'.tags', 'w', 'utf-8')
#pos = ''
text = ''
for line in fin:
words = line.strip().split("\t")
print words
if line.strip() == "":
print 'its empty line'
if text.strip() != "":
fwords.write(text.strip().lower() + '\n')
#ftags.write(pos.strip() + '\n')
#pos = ''
text = ''
else:
#flag = re.search(r'\W', words[0].strip())
#if flag:
# print line
#else:
text += words[0] + ' '
# pos += words[2] + ' '
fin.close()
fwords.close()
#ftags.close()
|
14,266 | 5e8854274d51a1566673e9fcb17aa210c7380a30 | '''Write a Python program to sum of three given integers. However, if two values are equal sum will be zero'''
num1=int(input("enter the number:"))
num2=int(input("enter the number"))
num3=int(input("enter the number:"))
if num1!= num2!=num3:
add=num1+num2+num3
print(add)
if num1==num2:
print("zero")
elif num2==num3:
print("zero")
elif num1==num3:
print("zero") |
14,267 | ce8638af48e65d85979c0724bfe3a5604037c452 | from aski import двоцифрени
def test0():
assert двоцифрени([64, 65, 66]) == '@AB'
def test1():
assert двоцифрени(list(range(27, 33))) == ''
def test2():
assert двоцифрени([-3, -2, -1]) == ''
def test3():
assert двоцифрени([100, 101, 200, 3000]) == ''
def test4():
assert двоцифрени(list(range(33, 100))) == '!#$%&()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_abc'
def test5():
assert двоцифрени(list(range(0, 27))) == 'defghijklmnopqrstuvwxyz{|}~'
|
14,268 | dc5eea23cb93b2aefc417ab924e6e877f079d78d | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
# sys.path.append("..")
import pywt
import pywt.data
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import signal
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
def create_DS(ds_num, v_pre, v_post, cleanse=False):
#ds1_files = ['101','106','108','109','112','114','115','116','118','119','122','124','201','203','205','207','208','209','215','220','223','230','107','217']
ds1_files = ['101','106']
#ds2_files = ['100','103','105','111','113','117','121','123','200','202','210','212','213','214','219','221','222','228','231','232','233','234','102','104']
ds2_files = ['100','103']
freq = 360
preX = v_pre
postX = v_post
dfall = {}
dfann = {}
dfseg = {}
segment_data = []
segment_labels = []
if (ds_num == "1"):
ds_list = ds1_files;
else:
ds_list = ds2_files;
# Load the necessary patient inputs
for patient_num in ds_list:
dfall[patient_num] = pd.read_csv('data/DS' + ds_num + '/' + patient_num + '_ALL_samples.csv', sep=',', header=0, squeeze=False)
dfann[patient_num] = pd.read_csv('data/DS' + ds_num + '/' + patient_num + '_ALL_ANN.csv', sep=',', header=0, parse_dates=[0], squeeze=False)
# Butterworth filter: x -> y
lowcut=0.01
highcut=15.0
signal_freq=360
filter_order=1
nyquist_freq = 0.5*signal_freq
low=lowcut/nyquist_freq
high=highcut/nyquist_freq
b, a = signal.butter(filter_order, [low,high], btype="band")
# Standardize the beat annotations
# vals_to_replace = {'N':'N','L':'N','e':'N','j':'N','R':'N','A':'SVEB','a':'SVEB','J':'SVEB','S':'SVEB','V':'VEB','E':'VEB','F':'F','Q':'Q','P':'Q','f':'Q','U':'Q'}
# use integers 0..4 instead of annotation...
vals_to_replace = {'N':0, 'L':0, 'e':0, 'j':0, 'R':0, 'A':1, 'a':1, 'J':1, 'S':1, 'V':2, 'E':2, 'F':3, 'Q':4, 'P':4, 'f':4, 'U':4}
for patient_num in ds_list:
dfann[patient_num]['Type'] = dfann[patient_num]['Type'].map(vals_to_replace)
dfann[patient_num]['RRI'] = (dfann[patient_num]['sample'] - dfann[patient_num]['sample'].shift(1)) / 360
dfann[patient_num] = dfann[patient_num][1:]
for patient_num in ds_list:
annList = [];
rriList = [];
begNList = [];
endNList = [];
mixNList = [];
sliceNList = [];
for index, row in dfann[patient_num].iterrows():
Nbegin = row['sample'] - preX;
Nend = row['sample'] + postX;
begNList.append(Nbegin);
endNList.append(Nend);
annList.append(row['Type'])
rriList.append(row['RRI'])
for index, row in dfann[patient_num].iterrows():
Nbegin = row['sample'] - preX;
Nend = row['sample'] + postX;
begNList.append(Nbegin);
endNList.append(Nend);
annList.append(row['Type'])
rriList.append(row['RRI'])
mixNList = tuple(zip(begNList, endNList, annList, rriList))
for row in mixNList:
dfseg = dfall[patient_num][(dfall[patient_num]['sample'] >= row[0]) & (dfall[patient_num]['sample'] <= row[1])]
dfseg1 = dfseg[dfseg.columns[1:2]]
dfseg2 = dfseg[dfseg.columns[2:3]]
if (cleanse == True):
dfseg1_fir = signal.lfilter(b, a, dfseg[dfseg.columns[1:2]])
dfseg2_fir = signal.lfilter(b, a, dfseg[dfseg.columns[2:3]])
dfseg1_baseline_values = peakutils.baseline(dfseg1_fir)
dfseg2_baseline_values = peakutils.baseline(dfseg2_fir)
dfseg1 = dfseg1_fir-dfseg1_baseline_values
dfseg2 = dfseg2_fir-dfseg2_baseline_values
training_inputs1 = np.asarray(dfseg1.values.flatten(), dtype=np.float32)
training_inputs2 = np.asarray(dfseg2.values.flatten(), dtype=np.float32)
training_inputs1 = np.concatenate((training_inputs1, np.asarray([row[3]], dtype=np.float32)))
training_inputs2 = np.concatenate((training_inputs2, np.asarray([row[3]], dtype=np.float32)))
segment_data.append(np.concatenate((training_inputs1, training_inputs2), axis=0))
training_labels = row[2]
segment_labels.append(training_labels)
segment_data = np.asarray(segment_data)
return dfall, dfann, segment_data, segment_labels
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
segment_data = tf.placeholder('float32', [None, 80])
train_data = tf.placeholder('float32', [None, 80])
eval_data = tf.placeholder('float32', [None, 80])
x = tf.placeholder('float32', [None, 80])
input_layer = tf.placeholder('float32', [None, 80])
segment_labels = tf.placeholder('int32')
train_labels = tf.placeholder('int32')
eval_labels = tf.placeholder('int32')
y = tf.placeholder('int32')
input_layer = tf.reshape(features["x"], [-1, 1, 80, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 1, 80, 1]
# Output Tensor Shape: [batch_size, 1, 78, 5]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=5,
kernel_size=[1, 3],
#kernel_initializer=,
padding='valid',
activation=tf.nn.leaky_relu)
#print("conv1: ")
#print(conv1.shape)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 1, 78, 5]
# Output Tensor Shape: [batch_size, 1, 39, 5]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[1, 2], strides=2)
#print("pool1: ")
#print(pool1.shape)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 1, 39, 5]
# Output Tensor Shape: [batch_size, 1, 36, 10]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=10,
kernel_size=[1, 4],
#kernel_initializer="c2",
# padding="same",
activation=tf.nn.leaky_relu)
#print("conv2: ")
#print(conv2.shape)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 1, 36, 10]
# Output Tensor Shape: [batch_size, 1, 18, 10]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[1, 2], strides=2)
#print("pool2: ")
#print(pool2.shape)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 1, 8, 10]
# Output Tensor Shape: [batch_size, 1, 8, 10]
pool2_flat = tf.reshape(pool2, [-1, 1 * 18 * 10])
#print("pool2_flat: ")
#print(pool2_flat.shape)
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=18, activation=tf.nn.leaky_relu)
#print("dense: ")
#print(dense.shape)
# Add dropout operation; 0.7 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.3, training=mode == tf.estimator.ModeKeys.TRAIN)
#print("dropout: ")
#print(dropout.shape)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=5)
#print("logits: ")
#print(logits.shape)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.003)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
con = tf.confusion_matrix(labels=labels, predictions=predictions["classes"])
#sess = tf.Session()
#with sess.as_default():
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def cnn_model_fn2(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
segment_data = tf.placeholder('float32', [None, 480])
train_data = tf.placeholder('float32', [None, 480])
eval_data = tf.placeholder('float32', [None, 480])
x = tf.placeholder('float32', [None, 480])
input_layer = tf.placeholder('float32', [None, 480])
segment_labels = tf.placeholder('int32')
train_labels = tf.placeholder('int32')
eval_labels = tf.placeholder('int32')
y = tf.placeholder('int32')
input_layer = tf.reshape(features["x"], [-1, 1, 480, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 1, 480, 1]
# Output Tensor Shape: [batch_size, 1, 478, 5]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=5,
kernel_size=[1, 3],
# kernel_initializer=,
padding='valid',
activation=tf.nn.leaky_relu)
# print("conv1: ")
# print(conv1.shape)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 1, 478, 5]
# Output Tensor Shape: [batch_size, 1, 239, 5]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[1, 2], strides=2)
# print("pool1: ")
# print(pool1.shape)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 1, 239, 5]
# Output Tensor Shape: [batch_size, 1, 236, 10]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=10,
kernel_size=[1, 4],
# kernel_initializer="c2",
# padding="same",
activation=tf.nn.leaky_relu)
# print("conv2: ")
# print(conv2.shape)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 1, 236, 10]
# Output Tensor Shape: [batch_size, 1, 118, 10]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[1, 2], strides=2)
# Convolutional Layer #3
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 1, 118, 10]
# Output Tensor Shape: [batch_size, 1, 116, 20]
conv3 = tf.layers.conv2d(
inputs=pool2,
filters=20,
kernel_size=[1, 3],
# kernel_initializer=,
padding='valid',
activation=tf.nn.leaky_relu)
# print("conv1: ")
# print(conv1.shape)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 1, 116, 20]
# Output Tensor Shape: [batch_size, 1, 58, 20]
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[1, 2], strides=2)
# print("pool2: ")
# print(pool2.shape)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 1, 58, 20]
# Output Tensor Shape: [batch_size, 1, 58, 20]
pool3_flat = tf.reshape(pool3, [-1, 1 * 58 * 20])
# print("pool2_flat: ")
# print(pool2_flat.shape)
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense1 = tf.layers.dense(inputs=pool3_flat, units=30, activation=tf.nn.leaky_relu)
# print("dense: ")
# print(dense.shape)
dense2 = tf.layers.dense(inputs=dense1, units=20, activation=tf.nn.leaky_relu)
# Add dropout operation; 0.7 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense2, rate=0.3, training=mode == tf.estimator.ModeKeys.TRAIN)
# print("dropout: ")
# print(dropout.shape)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=5)
# print("logits: ")
# print(logits.shape)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.003)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
#con = tf.confusion_matrix(labels=labels, predictions=predictions["classes"])
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
|
14,269 | 6b34e6869efaee5e1b9acb029df790b9cbd33512 | from __future__ import absolute_import, division, print_function
import sys
from PyQt4 import QtCore, QtGui
from widgets.SidebarSkel import Ui_Sidebar
from widgets.ImageFormSkel import Ui_ImageForm
from widgets.GPSFormSkel import Ui_GPSForm
from widgets.GZCQWidgets import QwwColorComboBox
from os.path import join, basename, exists
from shutil import rmtree
import utool as ut
from clientfuncs import CopyFiles, ExtractGPS, find_candidates, ex_deco, ensure_structure, resource_path
import zipfile
import random
import simplejson as json
import requests
LOGO_SIZE = 200
LOGO = resource_path(join('assets', 'logo_ibeis_alpha.png'))
# LOGO = resource_path(join('assets', 'logo_kwf_alpha.png'))
# LOGO = resource_path(join('assets', 'logo_kws_alpha.png'))
IMPORT_ICON = resource_path(join('assets', 'icons', 'icon_import.png'))
BROWSE_ICON = resource_path(join('assets', 'icons', 'icon_browse.png'))
CLEAR_ICON = resource_path(join('assets', 'icons', 'icon_trash.png'))
WAITING_ICON = resource_path(join('assets', 'icons', 'icon_import.png'))
SUBMIT_ICON = resource_path(join('assets', 'icons', 'icon_upload.png'))
ACCEPTED_ICON = resource_path(join('assets', 'icons', 'icon_accepted.png'))
REJECTED_ICON = resource_path(join('assets', 'icons', 'icon_rejected.png'))
RESOURCE_PATH = ut.get_app_resource_dir('gzc-client')
RESOURCE_EMPTY = join(RESOURCE_PATH, '.empty')
DIRECTORY_OVERRIDE_STR = '<<<override>>>'
RESOURCE_TEMPORARY_TRACK = join(RESOURCE_PATH, 'track.gpx')
CAR_COLORS_COMBO = [('Select Color', '#F6F6F6')] + [
('white', '#FFFFFF'),
('red', '#D9534F'),
('orange', '#EF7A4C'),
('yellow', '#F0AD4E'),
('green', '#5CB85C'),
('blue', '#3071A9'),
('purple', '#6F5499'),
('black', '#333333'),
]
CAR_COLORS = [ color[0] for color in CAR_COLORS_COMBO[1:] ]
CAR_NUMBERS = map(str, range(1, 26)) # 51
CAR_NUMBERS_COMBO = ['Select Number'] + CAR_NUMBERS
PERSON_LETTERS = ['a', 'b', 'c', 'd', 'e', 'f'] # , 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg', 'hh', 'ii', 'jj', 'kk', 'll', 'mm', 'nn', 'oo', 'pp', 'qq', 'rr', 'ss', 'tt', 'uu', 'vv', 'ww', 'xx', 'yy', 'zz']
PERSON_LETTERS_COMBO = ['Select Letter'] + PERSON_LETTERS
TIME_HOUR_RANGE = map(str, range(6, 23))
TIME_HOUR_RANGE_COMBO = ['Hour'] + TIME_HOUR_RANGE
TIME_MINUTE_RANGE = map(str, range(0, 60))
TIME_MINUTE_RANGE_COMBO = ['Minute'] + TIME_MINUTE_RANGE
TRACK_RANGE = map(str, range(1, 6))
TRACK_RANGE_COMBO = ['Select Track'] + TRACK_RANGE
PALETTE_BASE = '''
font: 35px;
margin: 0 1px 0 1px;
border-style: groove;
border-width: 1px;
'''
PALETTE_CLEAR = '''
margin-left: 10px;
color: #333333;
border-color: #da534f;
'''
PALETTE_DEFAULT = '''
color: #333333;
border-color: #afafaf;
'''
PALETTE_SUBMIT = '''
color: #ffffff;
background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #2198c0, stop: 1 #0c457e);
border-color: #0c457e;
'''
PALETTE_ACCEPT = '''
color: #ffffff;
background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #5cb85c, stop: 1 #4cae4c);
border-color: #4cae4c;
'''
PALETTE_REJECT = '''
color: #ffffff;
background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #d9534f, stop: 1 #d43f3a);
border-color: #d43f3a;
'''
class Sidebar(QtGui.QWidget, Ui_Sidebar):
def __init__(self, parent):
QtGui.QWidget.__init__(self)
self.parent = parent
self.CopyFiles = None
self.ExtractGPS = None
self.setupUi(self)
self.initWidgets()
self.initConnect()
self.initVisuals()
def initWidgets(self):
# Load Image Form
self.imageForm = ImageForm(self)
self.form.addWidget(self.imageForm)
# Load GPS Form
self.gpsForm = GPSForm(self)
self.form.addWidget(self.gpsForm)
# Load logos
self.initLogos()
def initLogos(self):
logo = QtGui.QPixmap(LOGO).scaled(QtCore.QSize(LOGO_SIZE, LOGO_SIZE), QtCore.Qt.KeepAspectRatio, 1)
self.logo.setPixmap(logo)
def initConnect(self):
self.submitButton.clicked.connect(self.submitClicked)
self.clearButton.clicked.connect(self.clearClicked)
self.connect(self.parent.imageDisplay, QtCore.SIGNAL('images_modified'), self.updateStatus)
def initVisuals(self):
# Setup clear icon
self.clearButton.setText('')
self.clearButton.setIcon(QtGui.QIcon(CLEAR_ICON))
# Set clear button palette
self.clearButton.setStyleSheet(PALETTE_BASE + PALETTE_CLEAR)
# Clear Sidebar
self.clear()
# Slots
def submitClicked(self, *args):
if self.currentDisplay() == 0:
if self.imageStatus < 8:
self.copyImages()
else:
self.submitImages()
elif self.currentDisplay() == 1:
if self.gpsStatus < 5:
self.copyGPS()
else:
self.submitGPS()
def clearClicked(self):
self.clear()
# Convenience
@ex_deco
def setSubmitButtonLook(self, text=None, icon=None, palette=None):
if text is not None:
self.submitButton.setText(text)
if icon is not None:
self.submitButton.setIcon(QtGui.QIcon(icon))
if palette is not None:
self.submitButton.setStyleSheet(PALETTE_BASE + palette)
@ex_deco
def currentDisplay(self):
return self.parent.currentDisplay
@ex_deco
def clearCursor(self):
QtGui.QApplication.restoreOverrideCursor()
self.updateStatus()
@ex_deco
def setWaitCursor(self):
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
@ex_deco
def copyFilesImageCopied(self, index, length, filename):
self.sidebarStatus.setText('Copying Image %s / %s' % (index, length, ))
add_to_display = index in self.imageImportFilesRandomIndices
self.parent.imageDisplay.add_filename(filename, add_to_display=add_to_display)
@ex_deco
def copyFilesGPSCopied(self, index, length, filename):
# print('CAUGHT %s %s' % (index, length, ))
self.sidebarStatus.setText('Copying Track %s / %s' % (index, length, ))
@ex_deco
def copyFilesCompleted(self, filenames):
if self.currentDisplay() == 0:
self.imageCopied = True
elif self.currentDisplay() == 1:
self.gpsCopied = True
self.clearCursor()
@ex_deco
def copyFilesException(self, exception):
# This is to throw the exception and ex_deco in the correct thread
self.clearCursor()
raise exception
@ex_deco
def extractGPSException(self, exception):
# This is to throw the exception and ex_deco in the correct thread
self.clearCursor()
raise exception
@ex_deco
def extractGPSCompleted(self, gpx_content):
with open(RESOURCE_TEMPORARY_TRACK, 'w') as track:
track.write(gpx_content)
self.copyGPX(RESOURCE_TEMPORARY_TRACK)
# Functions
def updateStatus(self):
self.sidebarStatus.setText('')
self.submitButton.setEnabled(False)
self.setSubmitButtonLook('Fill Form', IMPORT_ICON, PALETTE_DEFAULT)
self.imageStatus = 0
self.gpsStatus = 0
if self.currentDisplay() == 0:
# Show base case elements
self.imageForm.driveLayout.show()
self.imageForm.idLayout.hide()
self.imageForm.syncLayout.hide()
self.imageForm.nameInput.setEnabled(True)
# Gather form values
carNumber = self.imageForm.getNumber()
carColor = self.imageForm.getColor()
personLetter = self.imageForm.getLetter()
imageName = self.imageForm.getImageName()
timeHour = self.imageForm.getHour()
timeMinute = self.imageForm.getMinute()
# Image - Step 1
if self.imageImportDirectory is None:
self.sidebarStatus.setText('Specify the SD Card directory')
return
self.imageStatus += 1
self.imageForm.idLayout.show()
# Image - Step 2
if carNumber not in CAR_NUMBERS:
self.sidebarStatus.setText('Specify the car number')
return
self.imageStatus += 1
if carColor not in CAR_COLORS:
self.sidebarStatus.setText('Specify the car color')
return
self.imageStatus += 1
if personLetter not in PERSON_LETTERS:
self.sidebarStatus.setText('Specify the person letter')
return
self.imageStatus += 1
self.imageForm.syncLayout.show()
if self.imageImportDirectory == DIRECTORY_OVERRIDE_STR:
self.imageForm.nameInput.setEnabled(False)
# Image - Step 3 (Copy Images)
if imageName == '':
self.sidebarStatus.setText('Specify the image name to search')
return
self.imageStatus += 1
self.submitButton.setEnabled(True)
self.setSubmitButtonLook('Import Images', IMPORT_ICON)
# Image - Step 4 (Sync and Select)
if not self.imageCopied:
self.sidebarStatus.setText('Import the card\'s images (this may take a minute or two)')
return
self.imageStatus += 1
self.submitButton.setEnabled(False)
self.setSubmitButtonLook('Images Imported', WAITING_ICON, PALETTE_DEFAULT)
if timeHour not in TIME_HOUR_RANGE or timeMinute not in TIME_MINUTE_RANGE:
self.sidebarStatus.setText('Specify the sync time hour and minute')
return
self.imageStatus += 1
if not self.parent.allImagesSelected():
self.sidebarStatus.setText('Select the species for all images')
return
self.imageStatus += 1
self.submitButton.setEnabled(True)
self.setSubmitButtonLook('Submit Images', SUBMIT_ICON, PALETTE_SUBMIT)
# Image - Step 5 (Submit)
if self.imageSubmitted is None:
self.sidebarStatus.setText('Submit the images for processing')
return
self.imageStatus += 1
if self.imageSubmitted:
self.setSubmitButtonLook('Images Accepted', ACCEPTED_ICON, PALETTE_ACCEPT)
else:
self.setSubmitButtonLook('Images Rejected', REJECTED_ICON, PALETTE_REJECT)
self.imageStatus += 1
self.submitButton.setEnabled(False)
self.sidebarStatus.setText('Clear the form to start the next submission')
elif self.currentDisplay() == 1:
# Show base case elements
self.gpsForm.idLayout.show()
self.gpsForm.syncLayout.hide()
# Gather form values
carNumber = self.gpsForm.getNumber()
carColor = self.gpsForm.getColor()
timeHour = self.gpsForm.getHour()
timeMinute = self.gpsForm.getMinute()
trackNumber = self.gpsForm.getTrack()
# GPS - Step 1
if carNumber not in CAR_NUMBERS:
self.sidebarStatus.setText('Specify the car number')
return
self.gpsStatus += 1
if carColor not in CAR_COLORS:
self.sidebarStatus.setText('Specify the car color')
return
self.gpsStatus += 1
self.gpsForm.syncLayout.show()
# GPS - Step 3 (Copy GPSs)
self.submitButton.setEnabled(True)
self.setSubmitButtonLook('Import Track', IMPORT_ICON)
# GPS - Step 4 (Sync and Select)
if not self.gpsCopied:
self.sidebarStatus.setText('Import the dongle\'s GPS track (this may take a minute or two)')
return
self.gpsStatus += 1
self.submitButton.setEnabled(False)
self.setSubmitButtonLook('Track Imported', WAITING_ICON, PALETTE_DEFAULT)
if timeHour not in TIME_HOUR_RANGE or timeMinute not in TIME_MINUTE_RANGE:
self.sidebarStatus.setText('Specify the sync time hour and minute')
return
self.gpsStatus += 1
if trackNumber not in TRACK_RANGE:
self.sidebarStatus.setText('Specify the track number')
return
self.gpsStatus += 1
self.submitButton.setEnabled(True)
self.setSubmitButtonLook('Submit Track', SUBMIT_ICON, PALETTE_SUBMIT)
# Image - Step 5 (Submit)
if self.gpsSubmitted is None:
self.sidebarStatus.setText('Submit the GPS track for processing')
return
self.gpsStatus += 1
if self.gpsSubmitted:
self.setSubmitButtonLook('Track Accepted', ACCEPTED_ICON, PALETTE_ACCEPT)
else:
self.setSubmitButtonLook('Track Rejected', REJECTED_ICON, PALETTE_REJECT)
self.gpsStatus += 1
self.submitButton.setEnabled(False)
self.sidebarStatus.setText('Clear the form to start the next submission')
def clear(self, clearCopyCache=True, ignoreImage=False, ignoreGPS=False):
# Always clear these attributes
self.imageCopied = False
self.gpsCopied = False
self.imageSubmitted = None # None because we use booleans for error checking
self.gpsSubmitted = None # None because we use booleans for error checking
self.imageImportFilesRandomIndices = set()
if clearCopyCache:
# Clear copy cache
self.imageImportDirectory = None
self.imageImportFiles = []
self.gpsImportFiles = []
# Stop any ongoing copy or extract thread
if self.CopyFiles is not None:
self.CopyFiles.terminate()
self.CopyFiles.quit()
if self.ExtractGPS is not None:
self.ExtractGPS.terminate()
self.ExtractGPS.quit()
# Update overall display
if self.currentDisplay() == 0:
if not ignoreImage:
self.imageForm.clear()
self.parent.clearImageDisplay()
self.imageForm.show()
self.gpsForm.hide()
elif self.currentDisplay() == 1:
if not ignoreGPS:
self.gpsForm.clear()
self.parent.clearGPSDisplay()
self.imageForm.hide()
self.gpsForm.show()
# Clear cursor, which calls updateStatus
self.clearCursor()
# Images
@ex_deco
def imageManualSelection(self, filepaths):
if len(filepaths) < 3:
raise IOError('Please select at least three images to import when manually selecting images (fisrt, last and at least one query image)')
self.clear()
self.imageImportDirectory = DIRECTORY_OVERRIDE_STR
self.imageImportFiles = filepaths
self.imageForm.driveLabel.setText('Manual Selection')
self.imageForm.nameInput.setText(basename(self.imageImportFiles[0]))
self.updateStatus()
@ex_deco
def copyImages(self):
self.clear(clearCopyCache=False, ignoreImage=True)
self.setWaitCursor()
# Get information from form
carNumber = self.imageForm.getNumber()
carColor = self.imageForm.getColor()
personLetter = self.imageForm.getLetter()
imageName = self.imageForm.getImageName()
# Sanity checks (round 1)
if self.imageImportDirectory is None or self.imageImportDirectory == '':
self.clearCursor()
raise IOError('Please select the directory that contains the photos you wish to import from')
if carNumber not in CAR_NUMBERS or carColor not in CAR_COLORS or personLetter not in PERSON_LETTERS:
self.clearCursor()
raise IOError('Please select the correct car number, color and person letter')
if imageName == '':
self.clearCursor()
raise IOError('The first image name must be defined')
# Find candidates if searching
if self.imageImportDirectory != DIRECTORY_OVERRIDE_STR:
self.imageImportFiles = find_candidates(self.imageImportDirectory, imageName)
# Sanity checks (round 2)
if len(self.imageImportFiles) == 0:
self.clearCursor()
raise IOError('Could not find any files for selected directory. Please check your first image name')
# Create random indices
while len(self.imageImportFilesRandomIndices) < min(10, len(self.imageImportFiles)):
index = random.randint(0, len(self.imageImportFiles) - 1)
self.imageImportFilesRandomIndices.add(index)
# Set the first and last images
filenames = [basename(f) for f in self.imageImportFiles]
self.parent.setImageDisplayFirstImage(filenames.pop(0))
self.parent.setImageDisplayLastImage(filenames.pop())
# Ensure path exists for all destinations
destinationPaths = []
for index, path in enumerate(self.parent.backupDestinationPaths):
personPath = ensure_structure(path, 'images', carNumber, carColor, personLetter)
if exists(personPath):
print('Target person directory already exists... deleting')
rmtree(personPath)
destinationPaths.append(personPath)
# Start copy thread
self.CopyFiles = CopyFiles(self.imageImportFiles, destinationPaths)
self.connect(self.CopyFiles, QtCore.SIGNAL('fileCopied'), self.copyFilesImageCopied)
self.connect(self.CopyFiles, QtCore.SIGNAL('completed'), self.copyFilesCompleted)
self.connect(self.CopyFiles, QtCore.SIGNAL('__EXCEPTION__'), self.copyFilesException)
self.CopyFiles.start()
@ex_deco
def submitImages(self):
# Get path and domain from parent
path = self.parent.backupDestinationPaths[0]
domain = '%s/images/submit' % (self.parent.domain)
# Get data from form
carNumber = self.imageForm.getNumber()
carColor = self.imageForm.getColor()
personLetter = self.imageForm.getLetter()
timeHour = self.imageForm.getHour()
timeMinute = self.imageForm.getMinute()
# Establish source and destination folders
srcDirectory = ensure_structure(path, 'images', carNumber, carColor, personLetter)
dstDirectory = ensure_structure(path, 'zip', carNumber, carColor)
# Gather selected images from the GUI
first = self.parent.getImageDisplayFirstImage()
last = self.parent.getImageDisplayLastImage()
zebra = []
giraffe = []
for IB in self.parent.getImageDisplayImageBoxes():
(select_path, select_type) = IB.get_selection()
if select_type == 'Unassigned':
raise IOError('Please make sure all image boxes have been identified as either zebra of giraffe')
return
elif select_type == 'Zebra':
zebra.append(select_path)
elif select_type == 'Giraffe':
giraffe.append(select_path)
elif select_type == 'Ignore':
pass
# Make empty file
if not exists(RESOURCE_EMPTY):
with open(RESOURCE_EMPTY, 'w') as empty:
empty.write('')
# Create zip archive
zip_path = join(dstDirectory, personLetter + '.zip')
with zipfile.ZipFile(zip_path, 'w') as zip_archive:
zip_archive.write(join(srcDirectory, first), 'first.jpg')
zip_archive.write(join(srcDirectory, last), 'last.jpg')
if len(zebra) == 0:
zip_archive.write(RESOURCE_EMPTY, join('zebra', '.empty'))
if len(giraffe) == 0:
zip_archive.write(RESOURCE_EMPTY, join('giraffe', '.empty'))
for filename in zebra:
zip_archive.write(join(srcDirectory, filename), join('zebra', filename))
for filename in giraffe:
zip_archive.write(join(srcDirectory, filename), join('giraffe', filename))
# Format data
data = {
'car_number': carNumber,
'car_color': carColor,
'person_letter': personLetter,
'image_first_time_hour': timeHour,
'image_first_time_minute': timeMinute,
}
content = open(zip_path, 'rb')
files = {
'image_archive': content,
}
# Send POST request
r = requests.post(domain, data=data, files=files)
# Response
response = json.loads(r.text)
if response['status']['code'] != 0:
self.imageSubmitted = False
self.updateStatus()
raise IOError('Server responded with an error: %r' % (response, ))
self.imageSubmitted = True
self.updateStatus()
# GPS
@ex_deco
def copyGPX(self, preextractedGPXPath):
# Get data from form
carNumber = self.gpsForm.getNumber()
carColor = self.gpsForm.getColor()
# Sanity checks
if carNumber not in CAR_NUMBERS or carColor not in CAR_COLORS:
self.clearCursor()
raise IOError('Please select the correct car number and color')
# Load GPX path
self.gpsImportFiles = [preextractedGPXPath]
with open(preextractedGPXPath) as gpxFile:
gpxTrack = gpxFile.read()
self.parent.displayGPXTrack(gpxTrack)
# Ensure path exists for all destinations
destinationPaths = []
for index, path in enumerate(self.parent.backupDestinationPaths):
carPath = ensure_structure(path, 'gps', carNumber, carColor)
if exists(carPath):
print('Target car directory already exists... deleting')
rmtree(carPath)
destinationPaths.append(carPath)
# Start copy thread
self.CopyFiles = CopyFiles(self.gpsImportFiles, destinationPaths)
self.connect(self.CopyFiles, QtCore.SIGNAL('fileCopied'), self.copyFilesGPSCopied)
self.connect(self.CopyFiles, QtCore.SIGNAL('completed'), self.copyFilesCompleted)
self.connect(self.CopyFiles, QtCore.SIGNAL('__EXCEPTION__'), self.copyFilesException)
self.CopyFiles.start()
@ex_deco
def copyGPS(self, preextractedGPXPath=None):
self.clear(clearCopyCache=False, ignoreGPS=True)
self.setWaitCursor()
# Get data from form
carNumber = self.gpsForm.getNumber()
carColor = self.gpsForm.getColor()
# Sanity checks
if carNumber not in CAR_NUMBERS or carColor not in CAR_COLORS:
self.clearCursor()
raise IOError('Please select the correct car number and color')
# Extract the data from the GPS dongle or process the preextractedGPXPath
if preextractedGPXPath is None:
self.ExtractGPS = ExtractGPS()
self.connect(self.ExtractGPS, QtCore.SIGNAL('trackExtracted'), self.copyFilesGPSCopied)
self.connect(self.ExtractGPS, QtCore.SIGNAL('completed'), self.extractGPSCompleted)
self.connect(self.ExtractGPS, QtCore.SIGNAL('__EXCEPTION__'), self.extractGPSException)
self.ExtractGPS.start()
else:
self.copyGPX(preextractedGPXPath)
@ex_deco
def submitGPS(self):
# Get path and domain from parent
path = self.parent.backupDestinationPaths[0]
domain = '%s/gps/submit' % (self.parent.domain)
# Get data from form
carNumber = self.gpsForm.getNumber()
carColor = self.gpsForm.getColor()
timeHour = self.gpsForm.getHour()
timeMinute = self.gpsForm.getMinute()
trackNumber = self.gpsForm.getTrack()
# Establish source folder
srcDirectory = ensure_structure(path, 'gps', carNumber, carColor)
# Format data
data = {
'car_number': carNumber,
'car_color': carColor,
'gps_start_time_hour': timeHour,
'gps_start_time_minute': timeMinute,
'track_number': trackNumber,
}
content = open(join(srcDirectory, 'track.gpx'), 'rb')
files = {
'gps_data': content,
}
# Send POST request
r = requests.post(domain, data=data, files=files)
# Response
response = json.loads(r.text)
if response['status']['code'] != 0:
self.gpsSubmitted = False
self.updateStatus()
raise IOError('Server responded with an error: %r' % (response, ))
self.gpsSubmitted = True
self.updateStatus()
class ImageForm(QtGui.QWidget, Ui_ImageForm):
def __init__(self, parent):
QtGui.QWidget.__init__(self)
self.parent = parent
self.setupUi(self)
self.initWidgets()
self.initConnect()
def initWidgets(self):
self.numberInput.addItems(CAR_NUMBERS_COMBO)
self.letterInput.addItems(PERSON_LETTERS_COMBO)
self.colorInput = QwwColorComboBox()
self.colorInputContainer.addWidget(self.colorInput)
for (color_name, color_hex) in CAR_COLORS_COMBO:
color = QtGui.QColor(color_hex)
self.colorInput.addColor(color, color_name)
self.driveBrowse.setIcon(QtGui.QIcon(BROWSE_ICON))
self.timeHour.addItems(TIME_HOUR_RANGE_COMBO)
self.timeMinute.addItems(TIME_MINUTE_RANGE_COMBO)
def initConnect(self):
self.driveBrowse.clicked.connect(self.browseDirectory)
self.colorInput.currentIndexChanged[int].connect(self.parent.updateStatus)
self.numberInput.currentIndexChanged[int].connect(self.parent.updateStatus)
self.letterInput.currentIndexChanged[int].connect(self.parent.updateStatus)
self.nameInput.textEdited.connect(self.parent.updateStatus)
self.timeHour.currentIndexChanged[int].connect(self.parent.updateStatus)
self.timeMinute.currentIndexChanged[int].connect(self.parent.updateStatus)
# Slots
def browseDirectory(self):
def _truncate(path):
if len(directory) > 40:
return '...' + directory[-40:]
else:
return directory
directory = str(QtGui.QFileDialog.getExistingDirectory(self, 'Select Directory'))
if len(directory) > 0:
self.driveLabel.setText(_truncate(directory))
self.parent.imageImportDirectory = directory
self.parent.updateStatus()
# Convenience
def getNumber(self):
return str(self.numberInput.currentText())
def getColor(self):
return str(self.colorInput.currentText())
def getLetter(self):
return str(self.letterInput.currentText())
def getHour(self):
return str(self.timeHour.currentText())
def getMinute(self):
return str(self.timeMinute.currentText())
def getImageName(self):
return str(self.nameInput.text())
# Functions
@ex_deco
def clear(self):
self.driveLabel.setText('Select a Directory...')
self.colorInput.setCurrentIndex(0)
self.numberInput.setCurrentIndex(0)
self.letterInput.setCurrentIndex(0)
self.nameInput.setEnabled(True)
self.nameInput.setText('')
class GPSForm(QtGui.QWidget, Ui_GPSForm):
def __init__(self, parent):
QtGui.QWidget.__init__(self)
self.parent = parent
self.setupUi(self)
self.initWidgets()
self.initConnect()
def initWidgets(self):
self.timeHour.addItems(TIME_HOUR_RANGE_COMBO)
self.timeMinute.addItems(TIME_MINUTE_RANGE_COMBO)
self.colorInput = QwwColorComboBox()
self.colorInputContainer.addWidget(self.colorInput)
self.numberInput.addItems(CAR_NUMBERS_COMBO)
for (color_name, color_hex) in CAR_COLORS_COMBO:
color = QtGui.QColor(color_hex)
self.colorInput.addColor(color, color_name)
self.trackNumber.addItems(TRACK_RANGE_COMBO)
def initConnect(self):
self.colorInput.currentIndexChanged[int].connect(self.parent.updateStatus)
self.numberInput.currentIndexChanged[int].connect(self.parent.updateStatus)
self.timeHour.currentIndexChanged[int].connect(self.parent.updateStatus)
self.timeMinute.currentIndexChanged[int].connect(self.parent.updateStatus)
self.trackNumber.currentIndexChanged[int].connect(self.parent.updateStatus)
# Convenience
def getNumber(self):
return str(self.numberInput.currentText())
def getColor(self):
return str(self.colorInput.currentText())
def getHour(self):
return str(self.timeHour.currentText())
def getMinute(self):
return str(self.timeMinute.currentText())
def getTrack(self):
return str(self.trackNumber.currentText())
# Functions
@ex_deco
def clear(self):
self.colorInput.setCurrentIndex(0)
self.numberInput.setCurrentIndex(0)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
widget = Sidebar()
widget.show()
sys.exit(app.exec_())
|
14,270 | 11ca3b62e8e708798a9ed82a24ccbc910da51228 | '''
Description: Main program used in train and testing my network
Author:Charles Shen
Date:8/22/2020
'''
import numpy as np
import time
import os
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from models.encoder import Encoder
from models.decoder import Decoder
from models.PCN import PCN
from data.scannet_loader import ScanNetLoader
from data.shapenet_loader import ShapeNetLoader
from utils.triplet_loss import random_sample, random_sample_original
from torch.utils.data import DataLoader
import chamfer3D.dist_chamfer_3D
import constants
from initialize import initialize, build_args
from utils.triplet_loss import random_sample
from utils.common import get_triplet_loss, get_chamfer_dist_train, get_chamfer_dist_valid, step_weight
def train(args, epoch, epochs, device, generator_partial, generator_complete, decoder, \
optimizer_generator_complete, optimizer_generator_partial, optimizer_decoder, data_loader_shapenet_train, result_dir):
'''
description: train the models for one epoch
variable: args, epoch, epochs, device, generator_partial, generator_complete, decoder, \
optimizer_generator_complete, optimizer_generator_partial, optimizer_decoder, data_loader_shapenet_train, result_dir
return: generator_partial, generator_complete, decoder
'''
generator_partial.train()
generator_complete.train()
decoder.train()
total_dist = 0
total_triplet = 0
total_batch = 0
for i, (partial_shapenet, ground_truth_fine, ground_truth_coarse) in enumerate(data_loader_shapenet_train):
if device:
partial_shapenet = partial_shapenet.to(device)
ground_truth_fine = ground_truth_fine.to(device)
ground_truth_coarse = ground_truth_coarse.to(device)
#partial_scannet = partial_scannet.to(device)
#medium feature
if args.dataset == 'complete':
batch_size = partial_shapenet.size(0)
num_partial = partial_shapenet.size(1)
anchor_examples, positive_examples, negative_examples, positive_fine_gt, positive_coarse_gt, negative_fine_gt, negative_coarse_gt\
= random_sample(partial_shapenet, ground_truth_fine, ground_truth_coarse)
feature_anchor = generator_partial(anchor_examples)
feature_positive = generator_complete(positive_examples)
feature_negative = generator_complete(negative_examples)
else:
negative_examples = random_sample_original(partial_shapenet, ground_truth_fine)
feature_anchor = generator_partial(partial_shapenet)
feature_positive = generator_complete(ground_truth_fine)
feature_negative = generator_complete(negative_examples)
triplet_loss, the_times_triplet, feature_anchor, feature_positive, feature_negative = \
get_triplet_loss(feature_anchor, feature_positive, feature_negative, args, device)
#reconstruction loss
#anchor
weight_triplet = step_weight(args, epoch, epochs, the_times_triplet)
if args.dataset == 'complete':
coarse_anchor, fine_anchor = decoder(feature_anchor)
dis_anchor = get_chamfer_dist_train(coarse_anchor, fine_anchor, positive_coarse_gt, positive_fine_gt)
coarse_positive, fine_positive = decoder(feature_positive)
dis_positive = get_chamfer_dist_train(coarse_positive, fine_positive, positive_coarse_gt, positive_fine_gt)
coarse_negative, fine_negative = decoder(feature_negative)
dis_negative = get_chamfer_dist_train(coarse_negative, fine_negative, negative_coarse_gt, negative_fine_gt)
total_loss = triplet_loss * (weight_triplet / 10000) + \
(dis_anchor * args.weight_anchor + dis_positive *args.weight_positive + dis_negative * args.weight_negative)
else:
coarse_anchor, fine_anchor = decoder(feature_anchor)
dis_anchor = get_chamfer_dist_train(coarse_anchor, fine_anchor, ground_truth_fine, ground_truth_coarse)
total_loss = triplet_loss * (weight_triplet / 10000) + dis_anchor
if args.dataset == 'complete':
total_dist += min(dis_anchor.item(), dis_positive.item()) * 10000
else:
total_dist += dis_anchor.item() * 10000
total_triplet += triplet_loss.item()
total_batch += 1
optimizer_generator_complete.zero_grad()
optimizer_generator_partial.zero_grad()
optimizer_decoder.zero_grad()
total_loss.backward()
optimizer_generator_complete.step()
optimizer_generator_partial.step()
optimizer_decoder.step()
if args.dataset == 'complete':
min_loss = min(dis_anchor.item(), dis_positive.item())
else:
min_loss = dis_anchor.item()
print('Train:epoch:[{}/{}] batch {}, dis: {:.2f}, triplet: {:.6f}'.format(epoch + 1, epochs, i+1, min_loss * 10000, triplet_loss.item()))
avg_dist = total_dist / total_batch
avg_triplet = total_triplet / total_batch
file = open(result_dir, "a")
file.write(str(avg_dist) + ' ' + str(avg_triplet) + '\n')
file.close()
return generator_partial, generator_complete, decoder
def valid(args, epoch, epochs, device, generator_partial, generator_complete, decoder, data_loader_shapenet_val, best_dist, \
result_dir, model_dir_partial, model_dir_complete, model_dir_decoder):
'''
description: valid the models for one epoch
variable:args, epoch, epochs, device, generator_partial, generator_complete, decoder, data_loader_shapenet_val, best_dist, \
result_dir, model_dir_partial, model_dir_complete, model_dir_decoder
return: best_dist
'''
generator_partial.eval()
generator_complete.eval()
decoder.eval()
total_dist = 0
total_batch = 0
for i, (partial_shapenet, ground_truth_fine, ground_truth_coarse) in enumerate(data_loader_shapenet_val):
if device:
partial_shapenet = partial_shapenet.to(device)
ground_truth_fine = ground_truth_fine.to(device)
ground_truth_coarse = ground_truth_coarse.to(device)
#partial_scannet = partial_scannet.to(device)
if args.dataset == 'complete':
batch_size = partial_shapenet.size(0)
num_partial = partial_shapenet.size(1)
partial_shapenet = partial_shapenet.resize(batch_size * num_partial, partial_shapenet.size(2), partial_shapenet.size(3))
ground_truth_fine = ground_truth_fine.repeat(num_partial, 1, 1)
ground_truth_coarse = ground_truth_coarse.repeat(num_partial, 1, 1)
feature_anchor = generator_partial(partial_shapenet)
feature_positive = generator_complete(partial_shapenet)
if args.loss == 'cosine':
#归一化
if args.normalize == True:
feature_anchor = torch.nn.functional.normalize(feature_anchor, dim = 1)
if args.dataset == 'complete':
coarse_anchor, fine_anchor = decoder(feature_anchor)
dis_anchor = get_chamfer_dist_valid(coarse_anchor, fine_anchor, ground_truth_coarse, ground_truth_fine)
coarse_positive, fine_positive = decoder(feature_positive)
dis_positive = get_chamfer_dist_valid(coarse_positive, fine_positive, ground_truth_coarse, ground_truth_fine)
total_dist += min(dis_anchor.item(), dis_positive.item()) * 10000
total_batch += 1
else:
coarse_anchor, fine_anchor = decoder(feature_anchor)
dis_anchor = get_chamfer_dist_valid(coarse_anchor, fine_anchor, ground_truth_coarse, ground_truth_fine)
total_dist += dis_anchor.item() * 10000
total_batch += 1
if args.dataset == 'complete':
min_loss = min(dis_anchor.item(), dis_positive.item())
else:
min_loss = dis_anchor.item()
print('Valid:epoch:[{}/{}] batch {}, dis: {:.2f}'.format(epoch + 1, epochs, i+1, min_loss * 10000))
avg_dist = total_dist / total_batch
file = open(result_dir, "a")
file.write(str(avg_dist) + '\n')
file.close()
print('Valid:epoch:[{}/{}] total average dist: {:.2f}'.format(epoch + 1, epochs, avg_dist))
if avg_dist < best_dist:
best_dist = avg_dist
torch.save(generator_partial.state_dict(), model_dir_partial)
torch.save(generator_complete.state_dict(), model_dir_complete)
torch.save(decoder.state_dict(), model_dir_decoder)
return best_dist
if __name__ == "__main__":
args = build_args()
device, generator_partial, generator_complete, decoder, pcn, decoder_fused, optimizer_generator_complete, optimizer_generator_partial, optimizer_decoder, optimizer_PCN, optimizer_fused, \
data_loader_shapenet_train, data_loader_shapenet_val, result_dir_PCN, result_dir, model_dir_PCN, model_dir_partial, model_dir_complete, model_dir_decoder = initialize(args)
try:
os.remove(result_dir)
except:
pass
for epoch in range(constants.num_epochs):
generator_partial, generator_complete, decoder = train(args, epoch, args.epochs, device, generator_partial, generator_complete, decoder, \
optimizer_generator_complete, optimizer_generator_partial, optimizer_decoder, data_loader_shapenet_train, result_dir)
best_dist = valid(args, epoch, args.epochs, device, generator_partial, generator_complete, decoder, data_loader_shapenet_val, 14530529, \
result_dir, model_dir_partial, model_dir_complete, model_dir_decoder)
|
14,271 | 6ab96bf012ff58aab428551d03e5a1c952d39fb3 | import pytest
from day10.monitoring_station import parse_asteroid_map, can_detect, count_detectable_asteroids, find_best_station, sort_for_laser_round, shoot
from common.io import read
@pytest.mark.parametrize("input,expected", [
("""##.
.#.
..#""", [(0, 0), (1, 0), (1, 1), (2, 2)])
])
def test_parse_asteroid_map(input, expected):
assert parse_asteroid_map(input) == expected
@pytest.mark.parametrize("asteroids,station,target,expected", [
(parse_asteroid_map(
""".#..#
.....
#####
....#
...##"""), (3, 4), (1, 0), False),
(parse_asteroid_map(
""".#..#
.....
##.##
....#
...##"""), (3, 4), (1, 0), True),
(parse_asteroid_map(
""".#..#
.....
#####
....#
...##"""), (3, 4), (0, 2), True)
])
def test_can_detect(asteroids, station, target, expected):
assert can_detect(asteroids, station, target) == expected
@pytest.mark.parametrize("asteroids,station,expected", [
(parse_asteroid_map(
""".#..#
.....
#####
....#
...##"""), (3, 4), 8),
(parse_asteroid_map(
""".#..#
.....
#####
....#
...##"""), (4, 2), 5)
])
def test_count_detectable_asteroids(asteroids, station, expected):
assert count_detectable_asteroids(asteroids, station) == expected
@pytest.mark.parametrize("asteroids,expected", [
(parse_asteroid_map(
""".#..#
.....
#####
....#
...##"""), ((3, 4), 8)),
(parse_asteroid_map(
"""......#.#.
#..#.#....
..#######.
.#.#.###..
.#..#.....
..#....#.#
#..#....#.
.##.#..###
##...#..#.
.#....####"""), ((5, 8), 33)),
(parse_asteroid_map(
"""#.#...#.#.
.###....#.
.#....#...
##.#.#.#.#
....#.#.#.
.##..###.#
..#...##..
..##....##
......#...
.####.###."""), ((1, 2), 35)),
(parse_asteroid_map(
""".#..#..###
####.###.#
....###.#.
..###.##.#
##.##.#.#.
....###..#
..#.#..#.#
#..#.#.###
.##...##.#
.....#.#.."""), ((6, 3), 41)),
(parse_asteroid_map(
""".#..##.###...#######
##.############..##.
.#.######.########.#
.###.#######.####.#.
#####.##.#.##.###.##
..#####..#.#########
####################
#.####....###.#.#.##
##.#################
#####.##.###..####..
..######..##.#######
####.##.####...##..#
.#####..#.######.###
##...#.##########...
#.##########.#######
.####.#.###.###.#.##
....##.##.###..#####
.#.#.###########.###
#.#.#.#####.####.###
###.##.####.##.#..##"""), ((11, 13), 210)),
(parse_asteroid_map(read("src/day10/input.txt")), ((11, 19), 253)),
])
def test_find_best_station(asteroids, expected):
assert find_best_station(asteroids) == expected
@pytest.mark.parametrize("asteroids,station,expected", [
(parse_asteroid_map(
"""###
###
###"""), (1, 1), [((1, 0), 0.0), ((2, 0), 45.0), ((2, 1), 90.0), ((2, 2), 135.0), ((1, 2), 180.0), ((0, 2), 225.0), ((0, 1), 270.0), ((0, 0), 315.0)]),
(parse_asteroid_map(
"""###
###
###"""), (0, 0), [((1, 0), 90.0), ((2, 0), 90.0), ((2, 1), 116.56505117707799), ((1, 1), 135.0), ((2, 2), 135.0), ((1, 2), 153.43494882292202), ((0, 1), 180.0), ((0, 2), 180.0)])
])
def test_sort_for_laser_round(asteroids, station, expected):
assert sort_for_laser_round(asteroids, station) == expected
@pytest.mark.parametrize("asteroids,station,expected", [
(parse_asteroid_map(
"""###
###
###"""), (1, 1), [(1, 0), (2, 0), (2, 1), (2, 2), (1, 2), (0, 2), (0, 1), (0, 0)]),
(parse_asteroid_map(
"""###
###
###"""), (0, 0), [(1, 0), (2, 1), (1, 1), (1, 2), (0, 1), (2, 0), (2, 2), (0, 2)])
])
def test_shoot(asteroids, station, expected):
assert shoot(asteroids, station) == expected
@pytest.mark.parametrize("asteroids,station,expected", [
(parse_asteroid_map(
""".#..##.###...#######
##.############..##.
.#.######.########.#
.###.#######.####.#.
#####.##.#.##.###.##
..#####..#.#########
####################
#.####....###.#.#.##
##.#################
#####.##.###..####..
..######..##.#######
####.##.####...##..#
.#####..#.######.###
##...#.##########...
#.##########.#######
.####.#.###.###.#.##
....##.##.###..#####
.#.#.###########.###
#.#.#.#####.####.###
###.##.####.##.#..##"""), (11, 13), 802),
(parse_asteroid_map(read("src/day10/input.txt")), (11, 19), 815),
])
def test_solution_part2(asteroids, station, expected):
shot_asteroids = shoot(asteroids, station)
asteroid200 = shot_asteroids[199]
magic_number = asteroid200[0] * 100 + asteroid200[1]
assert magic_number == expected
|
14,272 | 2e7a6303deeea59b1d3e55c6c46934f89a647da8 | """
Основаная идея заключалается в отстреле кораблей противника по одному
"""
import json
from dataclasses import dataclass
from enum import Enum
from typing import List
from random import random
# region Primitives
@dataclass
class Vector:
x: int
y: int
z: int
def __add__(self, other):
if not isinstance(other, Vector):
raise TypeError()
return Vector(self.x + other.x,
self.y + other.y,
self.z + other.z)
def __sub__(self, other):
if not isinstance(other, Vector):
raise TypeError()
return Vector(self.x - other.x,
self.y - other.y,
self.z - other.z)
def __str__(self):
return f'{self.x}/{self.y}/{self.z}'
@property
def coords(self):
return self.x, self.y, self.z
@classmethod
def from_json(cls, data: str):
x, y, z = map(int, data.split('/'))
return cls(x, y, z)
# endregion
# region Utils
class JSONCapability:
def to_json(self) -> dict:
return {k: str(v) if isinstance(v, Vector) else v
for k, v in self.__dict__.items() if v is not None}
class Physics:
@staticmethod
def clen(v: Vector) -> int:
"""Метрика Чебышёва"""
return max(map(abs, v.coords))
@staticmethod
def mlen(v: Vector) -> int:
"""Манхэттенская метрика"""
return sum(map(abs, v.coords))
@staticmethod
def get_len_vector(vector_difference: Vector) -> int:
"""Метод, который находит длину раззности векторов"""
return sum(value ** 2 for value in vector_difference.__dict__.values()) ** 0.5
# endregion
# region Equipment
class BlockType(Enum):
Energy = 0
Gun = 1
Engine = 2
Health = 3
class EffectType(Enum):
Blaster = 0
@dataclass
class Block(JSONCapability):
Name: str
Type: BlockType
@classmethod
def from_json(cls, data):
if BlockType(data['Type']) == BlockType.Energy:
return EnergyBlock(**data)
elif BlockType(data['Type']) == BlockType.Gun:
return GunBlock(**data)
elif BlockType(data['Type']) == BlockType.Engine:
return EngineBlock(**data)
elif BlockType(data['Type']) == BlockType.Health:
return HealthBlock(**data)
@dataclass
class EnergyBlock(Block):
Type = BlockType.Energy
IncrementPerTurn: int
MaxEnergy: int
StartEnergy: int
@dataclass
class GunBlock(Block):
Type = BlockType.Gun
Damage: int
EnergyPrice: int
Radius: int
EffectType: int
@dataclass
class EngineBlock(Block):
Type = BlockType.Engine
MaxAccelerate: int
@dataclass
class HealthBlock(Block):
Type = BlockType.Health
MaxHealth: int
StartHealth: int
# endregion
# region Draft Input
@dataclass
class DraftCompleteShip(JSONCapability):
Id: str
Price: int
Equipment: List[str]
@classmethod
def from_json(cls, data):
return cls(**data)
@dataclass
class DraftEquipment(JSONCapability):
Size: int
Equipment: List[Block]
@classmethod
def from_json(cls, data):
data['Equipment'] = list(map(Block.from_json, data['Equipment']))
return cls(**data)
@dataclass
class MapRegion(JSONCapability):
From: Vector
To: Vector
@classmethod
def from_json(cls, data):
data['From'] = Vector.from_json(data['From'])
data['To'] = Vector.from_json(data['To'])
return cls(**data)
@dataclass
class DraftOptions(JSONCapability):
PlayerId: int
MapSize: int
Money: int
MaxShipsCount: int
DraftTimeout: int
BattleRoundTimeout: int
StartArea: MapRegion
Equipment: List[DraftEquipment]
Ships: List[DraftCompleteShip]
@classmethod
def from_json(cls, data):
data['StartArea'] = MapRegion.from_json(['StartArea'])
data['Equipment'] = list(map(DraftEquipment.from_json, data['Equipment']))
data['Ships'] = list(map(DraftCompleteShip.from_json, data['Ships']))
return cls(**data)
# endregion
# region Draft Output
@dataclass
class DraftShipChoice:
CompleteShipId: str
Position: Vector = None
@dataclass
class DraftChoice:
Ships: List[DraftShipChoice] = None
Message: str = None
# endregion
# region Battle Input
@dataclass
class Ship(JSONCapability):
Id: int
Position: Vector
Velocity: Vector
Health: int = None
Energy: int = None
Equipment: List[Block] = None
@classmethod
def from_json(cls, data):
if data.get('Equipment'): # не доступно для оппонента
data['Equipment'] = list(map(Block.from_json, data.get('Equipment', [])))
data['Position'] = Vector.from_json(data['Position'])
data['Velocity'] = Vector.from_json(data['Velocity'])
return cls(**data)
@dataclass
class FireInfo(JSONCapability):
Source: Vector
Target: Vector
EffectType: EffectType
@classmethod
def from_json(cls, data):
data['Source'] = Vector.from_json(data['Source'])
data['Target'] = Vector.from_json(data['Target'])
return cls(**data)
@dataclass
class State(JSONCapability):
My: List[Ship]
Opponent: List[Ship]
FireInfos: List[FireInfo]
@classmethod
def from_json(cls, data):
data['My'] = list(map(Ship.from_json, data['My']))
data['Opponent'] = list(map(Ship.from_json, data['Opponent']))
data['FireInfos'] = list(map(FireInfo.from_json, data['FireInfos']))
return cls(**data)
# endregion
# region Battle Output
MOVE = 'MOVE'
ACCELERATE = 'ACCELERATE'
ATTACK = 'ATTACK'
@dataclass
class CommandParameters(JSONCapability):
pass
@dataclass
class MoveParameters(CommandParameters):
Id: int
Target: Vector
@dataclass
class AccelerateParameters(CommandParameters):
Id: int
Vector: Vector
@dataclass
class AttackParameters(CommandParameters):
Id: int
Name: str
Target: Vector
@dataclass
class Command(JSONCapability):
Command: str
Parameters: CommandParameters
@dataclass
class UserOutput(JSONCapability):
UserCommands: List[Command] = None
Message: str = None
# endregion
class Game:
def __init__(self):
self.targeted = None
# вес ближайшего врага ко всем
self.main_particle_weight = 0.9
# вес ближайшего врага к конкретной частице
self.best_particle_weight = 0.1
# построение готово
self.ready = False
# счетчик ходов
self.ready_commands = 0
@staticmethod
def draft(_: dict) -> DraftChoice:
return DraftChoice() # корабли набираются автоматически
def velocity_change(self, closest_enemy: Ship, ship: Ship) -> dict:
# собственно метод алгоритма роя
# вынес словари в переменные, чтобы не создавать их десятки раз, пока мы в списочном выражении
targeted_position_raw = self.targeted.Position.__dict__
closest_enemy_position_raw = closest_enemy.Position.__dict__
ship_position_raw = ship.Position.__dict__
best_particle_weight_coeff = random()
main_particle_weight_coeff = random()
# перевел это на конкретную задачу (https://clck.ru/VbhZs)
return {key: (value + self.best_particle_weight * best_particle_weight_coeff *
(closest_enemy_position_raw[key] - ship_position_raw[key]) +
main_particle_weight_coeff * random() * (targeted_position_raw[key] - ship_position_raw[key]))
for key, value in ship.Velocity.__dict__.items()}
def building_ships(self, ship: Ship) -> Command:
# самому не нравится, как это все работает, но другого способа не придумал
# функция, которая занимается постраеним кораблей
if ship.Id == 0:
return Command(Command=MOVE,
Parameters=MoveParameters(Id=ship.Id,
Target=Vector(0, 0, 7)))
elif ship.Id == 4:
return Command(Command=MOVE,
Parameters=MoveParameters(Id=ship.Id,
Target=Vector(8, 0, 8)))
elif ship.Id == 1:
return Command(Command=MOVE,
Parameters=MoveParameters(Id=ship.Id,
Target=Vector((2 if self.ready_commands <= 2 else 0), 8, 2)))
elif ship.Id == 3:
return Command(Command=MOVE,
Parameters=MoveParameters(Id=ship.Id,
Target=Vector((6 if self.ready_commands <= 2 else 8), 8, 1)))
elif ship.Id == 2:
return Command(Command=MOVE,
Parameters=MoveParameters(Id=ship.Id,
Target=Vector(4, 4, 0)))
if ship.Id == 10000:
return Command(Command=MOVE,
Parameters=MoveParameters(Id=ship.Id,
Target=Vector(28, 28, 21)))
elif ship.Id == 10004:
return Command(Command=MOVE,
Parameters=MoveParameters(Id=ship.Id,
Target=Vector(20, 28, 20)))
elif ship.Id == 10001:
return Command(Command=MOVE,
Parameters=MoveParameters(Id=ship.Id,
Target=Vector((26 if self.ready_commands <= 2 else 28), 20, 26)))
elif ship.Id == 10003:
return Command(Command=MOVE,
Parameters=MoveParameters(Id=ship.Id,
Target=Vector((22 if self.ready_commands <= 2 else 20), 20, 27)))
elif ship.Id == 10002:
return Command(Command=MOVE,
Parameters=MoveParameters(Id=ship.Id,
Target=Vector(24, 24, 28)))
def battle(self, data: dict) -> UserOutput:
state = State.from_json(data)
user_output = UserOutput()
# так как корабли движутся, цель выбираем каждый ход
# сумма расстояний от всех кораблей до новой жертвы должна быть наименьшей
self.targeted = min(state.Opponent,
key=lambda x: sum([Physics.get_len_vector(y.Position - x.Position) for y in state.My]))
user_output.UserCommands = []
for ship in state.My:
guns = [x for x in ship.Equipment if isinstance(x, GunBlock)]
if guns:
# корабль выбирает оружие с наибольшей дальностью
ranged_gun = max(guns, key=lambda x: x.Radius)
# ближайший оппонент к текущему кораблю
closest_enemy = min(state.Opponent, key=lambda x: Physics.get_len_vector(ship.Position - x.Position))
# Проверка, что оружие достанет до "жертвы" (взял с запасом)
if ranged_gun.Radius * 3 >= Physics.get_len_vector(ship.Position - closest_enemy.Position):
user_output.UserCommands.append(Command(Command=ATTACK,
Parameters=AttackParameters(
Id=ship.Id,
Name=ranged_gun.Name,
Target=closest_enemy.Position)))
# костыль, как и многое, что тут есть
if not self.ready:
user_output.UserCommands.append(self.building_ships(ship))
else:
ship.Velocity.x, ship.Velocity.y, ship.Velocity.z = self.velocity_change(closest_enemy,
ship).values()
user_output.UserCommands.append(Command(Command=MOVE,
Parameters=MoveParameters(Id=ship.Id,
Target=self.targeted.Position)))
self.ready_commands += 1
if self.ready_commands >= 10:
self.ready = True
return user_output
def main(self):
while True:
line_in = input()
data = json.loads(line_in)
# самому не нравится, но лучшего способа определить к какому этапу относится ввод организаторы не дали
if 'PlayerId' in data:
result = self.draft(data)
else:
result = self.battle(data)
# не уверен так ли необходимо `ensure_ascii=False`, но оно было в примере организаторов, так что пусть будет
line_out = json.dumps(result,
default=JSONCapability.to_json,
ensure_ascii=False)
print(line_out)
if __name__ == '__main__':
Game().main()
|
14,273 | 118a4483ca1cf645d66de85cad611d4b936981a8 | # SOFTVEROVY NASTROJ PLGP
# Furtkevicova Ludmila, cast diplomovej prace
# script: okno s piatimi zalozkami, funkcie, tlacidla, modely
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import os
import tkMessageBox
import Tkinter
import ttk
from Tkinter import *
from ttk import *
import sys
import subprocess
import ScrolledText
import tkFileDialog
from tkFileDialog import askdirectory, asksaveasfile
# trieda tykajuca sa programu GRASS GIS
class GRASS:
def __init__(self):
# spustenie GRASS GIS
grass7bin_win = r'C:\Program Files (x86)\GRASS GIS 7.0.0\grass70.bat'
# definovanie GRASS DATABASE (GRASS GIS database) directory
# cestaL z GUI
self.gisdb = "C:\\DP_LF"
# SOFTVER
grass7bin = grass7bin_win
# GRASS 7 a GISBASE
startcmd = [grass7bin, '--config', 'path']
p = subprocess.Popen(startcmd, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
print >>sys.stderr, "ERROR: Cannot find GRASS GIS 7 start script (%s)" % startcmd
sys.exit(-1)
self.gisbase = out.strip('\n\r')
# premenna GISBASE a PATH
os.environ['GISBASE'] = self.gisbase
os.environ['PATH'] += os.pathsep + os.path.join(self.gisbase, 'extrabin')
# definicia GRASS-Python environment
gpydir = os.path.join(self.gisbase, "etc", "python")
sys.path.append(gpydir)
os.environ['GISDBASE'] = self.gisdb
# trieda tykajuca sa presmerovania (obsah okna do konkretneho suboru)
class Presmerovanie(object):
def __init__(self, text_ctrl):
self.output = text_ctrl
def write(self, string):
self.output.insert(Tkinter.END, string)
# trieda tykajuca sa pouzivatelskeho rozhrania
class GUI(Tkinter.Frame):
Gobj = GRASS()
cestaV = ""
cestaI = ""
cestaL = ""
recl1 = "recl1"
cesta = "C:\\DP_LF\\vypocet\\"
# GUI
def __init__(self,gui):
Tkinter.Frame.__init__(self, gui)
self.gui = gui
self.gui.title(u"PLGP (Ludmila Furtkevicova, 2015) ")
note = Notebook(self.gui)
# pat zaloziek
tab1 = Tkinter.Frame(note)
tab2 = Tkinter.Frame(note)
tab3 = Tkinter.Frame(note)
tab4 = Tkinter.Frame(note)
tab5 = Tkinter.Frame(note)
# nastavenie stylu v zalozkach
ttk.Style().configure('TLabelframe.Label', foreground='forest green',font="Verdana 8 bold")
ttk.Style().configure('TButton', foreground='cadet blue',font="Helvetica 8 bold")
ttk.Style().configure("TNotebook.Tab", foreground="dim gray",font="Helvetica 8 bold")
# nastavenie popisov zaloziek
note.add(tab1, text = " 1. Settings ")
note.add(tab2, text = " 2. Parametric maps ")
note.add(tab3, text = " 3. Weight calculation ")
note.add(tab4, text = " 4. Prediction ")
note.add(tab5, text = " 5. Validation ")
note.pack()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ PRVA ZALOZKA ~~~~~~~~~~~~~~~~~~~~~~~~~
# nastavenie ciest a tvorba potrebnych suborov pre dalsie zalozky
ttk.Label(tab1, text=u" \n\nHandy software tool for geologists", anchor="s",\
foreground="forest green", font = "Verdana 9 italic").grid(in_=tab1,column=0, row=0,\
columnspan=7, sticky="S",padx=70, pady=17)
ttk.Label(tab1, text=u"\nPredict landslide with GRASS GIS and Python", anchor="n",\
foreground="dark green", font = "Verdana 13 bold").grid(in_=tab1,column=0, row=0,\
columnspan=7, sticky="N",padx=30, pady=1)
# prve podokno v ramci prvej zalozky (vstupne data)
self.one = ttk.Labelframe(tab1, text = " 1. Input data: ")
self.one.grid(row=1, column=0, columnspan=2, sticky='S', padx=5, pady=5, ipadx=4,\
ipady=1)
L21 = ttk.Label(self.one, text=" Path to folder with vector data: ")
L21.grid(row=2, column=0, sticky='W', pady=5)
self.E21 = ttk.Entry(self.one, width=40)
self.E21.grid(row=2, column=1, columnspan=2, sticky="WE", pady=5, padx = 5)
B21 = ttk.Button(self.one, text=" Browse ...",command = self.openV)
B21.grid(row=2, column=3, sticky='W',pady=5, padx = 2)
# druhe podokno vramci prvej zalozky (nazov lokacie, epsg kod, ...)
self.two = ttk.Labelframe(tab1, text = " 2. New LOCATION and new MAPSETs:\n ")
self.two.grid(row=3, column=0, columnspan=2, sticky='S', padx=5, pady=5, ipadx=4,\
ipady=5)
L10 = ttk.Label(self.two, text=" LOCATION name: ")
L10.grid(row=4, column=0, sticky='W', padx=5, pady = 5)
self.E10 = ttk.Entry(self.two, width=30)
self.E10.grid(row=4, column=1, columnspan=2, sticky="WE", pady=2)
self.E10.insert(1,"Mapy")
self.nameL = self.E10.get()
L11 = ttk.Label(self.two, text=" EPSG code:")
L11.grid(row=5, column=0, sticky='W', padx=5, pady=2)
self.E11 = ttk.Entry(self.two, width=7)
self.E11.grid(row=5, column=1, columnspan=2, sticky="WE", pady=2)
self.E11.insert(1,"2065")
self.epsg = self.E11.get()
L12 = ttk.Label(self.two, text=" Path for new LOCATION:")
L12.grid(row=6, column=0, sticky='W', padx=5, pady=2)
self.E12 = ttk.Entry(self.two, width=10)
self.E12.grid(row=6, column=1, columnspan=2, sticky="WE", pady=2)
B12 = ttk.Button(self.two, text=" Browse ...",command = self.openL)
B12.grid(row=6, column=3, sticky='W', padx=5, pady=2)
L13 = ttk.Label(self.two, text=" Name of MAPSET for input data: ")
L13.grid(row=7, column=0, sticky='W', padx=5, pady=2)
self.E13 = ttk.Entry(self.two, width=10)
self.E13.grid(row=7, column=1, columnspan=2, sticky="WE", pady=2)
self.E13.insert(1,"VSTUP")
self.nameMV = self.E13.get()
L14 = ttk.Label(self.two, text=" Name of MAPSET for intermediate data: ")
L14.grid(row=8, column=0, sticky='W', padx=5, pady=2)
self.E14 = ttk.Entry(self.two, width=10)
self.E14.grid(row=8, column=1, columnspan=2, sticky="WE", pady=2)
self.E14.insert(1,"PM")
self.nameMM = self.E14.get()
L15 = ttk.Label(self.two, text=" Name of MAPSET for results: ")
L15.grid(row=9, column=0, sticky='W', padx=5, pady=2)
self.E15 = ttk.Entry(self.two, width=10)
self.E15.grid(row=9, column=1, columnspan=2, sticky="WE", pady=2)
self.E15.insert(1,"PREDIKCIA")
self.nameM = self.E15.get()
# tretie podokno vramci prvej zalozky (vysledky)
self.three = ttk.Labelframe(tab1, text = " 3. Reports, reclassification rules, information about calculation:\n ")
self.three.grid(row=10, column=0, columnspan=2, sticky='S', padx=5, pady=1, ipadx=5,\
ipady=5)
L31 = ttk.Label(self.three, text=" Path to folder for results: ")
L31.grid(row=11, column=0, sticky='WE', padx=5, pady=2)
self.E31 = ttk.Entry(self.three, width=39)
self.E31.grid(row=11, column=1, columnspan=2, sticky="WE", pady=2)
B31 = ttk.Button(self.three, text="Browse ...",command = self.openI)
B31.grid(row=11, column=3, sticky='W', padx=5, pady=2)
# tlacidlo REFRESH na zmazanie predvolene nastavenych vstupov
ttk.Button(tab1, text="REFRESH",command=self.refreshALL).grid(row=13, column=0,\
sticky="WE", padx=5, pady=5,columnspan=1, rowspan=1)
# tlacidlo na ukoncenie prace
ttk.Button(tab1, text="QUIT",command=self.gEND).grid(row=13, column=1, \
sticky='WE', padx=5, pady=5,columnspan=1, rowspan=1)
# tlacidlo na ziskanie obsahu vyplnenych poloziek a tvorbu potrebnych suborov
ttk.Button(tab1, text="NEXT", command=lambda: self.valueGET(self.E10.get(),\
self.E11.get(), self.E13.get(), self.E14.get(),\
self.E15.get())).grid(row=14, column=0, \
sticky='WE', padx=5, columnspan=2, rowspan=1,pady=5)
# tlacidlo ako alternativa HELP
ttk.Button(tab1, text='INFO',command=tkMessageBox.showinfo).grid(row=12, column=0,\
sticky="WE", padx=5, pady=5,columnspan=1, rowspan=1)
# tlacidlo ktorym sa da spustit prostredie GRASS GIS
ttk.Button(tab1, text='RUN GRASS GIS',command=self.RG).grid(row=12, column=1,\
sticky="WE", padx=5, pady=5,columnspan=1, rowspan=1)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DRUHA ZALOZKA ~~~~~~~~~~~~~~~~~~~~~~
# zobrazenie obsahu mapsetu PERMANENT, tvorba parametrickych map
# zobrazenie informacii o mapach a ich prvotna reklasifikacia
ttk.Label(tab2, text=u" \n\nHandy software tool for geologists", anchor="s",\
foreground="forest green", font = "Verdana 9 italic").grid(in_=tab2,column=0, row=0,\
columnspan=7, sticky="S",padx=70, pady=17)
ttk.Label(tab2, text=u"\nPredict landslide with GRASS GIS and Python", anchor="n",\
foreground="dark green", font = "Verdana 13 bold").grid(in_=tab2,column=0, row=0,\
columnspan=7, sticky="N",padx=30, pady=1)
# prve podokno vramci druhej zalozky na zobrazenie obsahu map v mapsete
self.four = ttk.Labelframe(tab2, text = " 4. MAPSET content: " )
self.four.grid(row=1, column=0, columnspan=2, sticky='E', padx=10, pady=5)
self.txf1 = ScrolledText.ScrolledText(self.four, height = 5, width = 61)
self.txf1.grid(row=2, column=0,columnspan=2, rowspan=3, sticky='NS', padx=5, pady=5)
ttk.Button(tab2, text='VIEW CONTENT',command = self.wrZM).grid(row=2,\
column=1, sticky='E', padx=10, pady=5,columnspan=1, rowspan=1)
# druhe podokno vramci druhej zalozky na zobrazenie info o param. mapach
self.five = ttk.Labelframe(tab2, text = " 5. Information in TXT file: " )
self.five.grid(row=3, column=0, columnspan=2, sticky='E', padx=10, pady=5)
self.txf3 = ScrolledText.ScrolledText(self.five, height = 9, width = 61)
self.txf3.grid(row=4, column=0,columnspan=2, rowspan=3, sticky='NS', padx=5, pady=5)
ttk.Button(tab2, text='INFO',command=self.showexample).grid(row=7, column=0,\
sticky="WE", padx=5, pady=5,columnspan=1, rowspan=1)
ttk.Button(tab2, text='RUN GRASS GIS',command=self.RG).grid(row=8, column=0,\
sticky="WE", padx=5, pady=5,columnspan=1, rowspan=1)
ttk.Button(tab2, text='NEXT', command = self.II).grid(row=9, column=1,sticky='WE', padx=5,\
pady=5,columnspan=1, rowspan=1)
# tlacidlo, ktorym sa ulozi obsah v okne do konkretneho suboru
ttk.Button(tab2, text='SAVE AS',command=self.edit_save).grid(row=8, column=1,sticky='WE', padx=5,\
pady=5,columnspan=1, rowspan=1)
ttk.Button(tab2, text="QUIT", command=self.gEND).grid(row=9, column=0,sticky='WE',\
padx=5, columnspan=1, rowspan=10,pady=5)
# tlacidlo, ktorym sa nacita obsah konkretneho txt suboru do okna PLGP
ttk.Button(tab2, text='LOAD TXT', command = self.open_file).grid(row=7,\
column=1, sticky='WE', padx=5, pady=5,columnspan=1, rowspan=1)
# ~~~~~~~~~~~~~~~~~~~~~~~~ TRETIA ZALOZKA ~~~~~~~~~~~~~~~~~~~~~~~
# zobrazenie vypocitanych vah a dalsich informacii, zobrazenie rovnice Y
ttk.Label(tab3, text=u" \n\nHandy software tool for geologists", anchor="s",\
foreground="forest green", font = "Verdana 9 italic").grid(in_=tab3,column=0, row=0,\
columnspan=7, sticky="S",padx=70, pady=17)
ttk.Label(tab3, text=u"\nPredict landslide with GRASS GIS and Python", anchor="n",\
foreground="dark green", font = "Verdana 13 bold").grid(in_=tab3,column=0, row=0,\
columnspan=7, sticky="N",padx=30, pady=1)
self.six = ttk.Labelframe(tab3, text = " 6. Information about calculated weights of all factors : " )
self.six.grid(row=1, column=0, columnspan=2, sticky='E', padx=10, pady=5)
self.txf2 = ScrolledText.ScrolledText(self.six, height = 12, width = 61)
self.txf2.grid(row=2, column=0,columnspan=2, rowspan=3, sticky='NS', padx=5, pady=5)
self.seven = ttk.Labelframe(tab3, text = " 7. The equation to calculate value Y : " )
self.seven.grid(row=3, column=0, columnspan=2, sticky='E', padx=10, pady=5)
self.txf4 = ScrolledText.ScrolledText(self.seven, height = 3.5, width = 61)
self.txf4.grid(row=4, column=0,columnspan=2, rowspan=3, sticky='NS', padx=5, pady=5)
ttk.Button(tab3, text='INFO',command=tkMessageBox.showinfo).grid(row=7, column=0,\
sticky="WE", padx=5, pady=5,columnspan=1, rowspan=1)
ttk.Button(tab3, text='RUN GRASS GIS',command=self.RG).grid(row=8, column=0,\
sticky="WE", padx=5, pady=5,columnspan=1, rowspan=1)
ttk.Button(tab3, text='NEXT', command = self.III).grid(row=9, column=1,\
sticky='WE', padx=5,pady=5,columnspan=1, rowspan=1)
# zobrazenie rovnice Y
ttk.Button(tab3, text='EQUATION',command = self.WrRovnica).grid(row=8, column=1,\
sticky='WE', padx=5,pady=5,columnspan=1, rowspan=1)
ttk.Button(tab3, text="QUIT", command=self.gEND).grid(row=9, column=0,\
sticky='WE',padx=5, columnspan=1, rowspan=1,pady=5)
# vypocet vah
ttk.Button(tab3, text='CALCULATE WEIGHTS', command=self.CalculateFactors).grid(row=7,\
column=1, sticky='WE', padx=5, pady=5,columnspan=1, rowspan=1)
# ~~~~~~~~~~~~~~~~~~~~~~~~~ STVRTA ZALOZKA ~~~~~~~~~~~~~~~~~~~~~~~~~
# zobrazenie MIN a MAX hodnoty v bunke rasta Y
# reklasifikacia spojiteho intervalu
ttk.Label(tab4, text=u" \n\nHandy software tool for geologists", anchor="s",\
foreground="forest green", font = "Verdana 9 italic").grid(in_=tab4,column=0, row=0,\
columnspan=7, sticky="S",padx=70, pady=17)
ttk.Label(tab4, text=u"\nPredict landslide with GRASS GIS and Python", anchor="n",\
foreground="dark green", font = "Verdana 13 bold").grid(in_=tab4,column=0, row=0,\
columnspan=7, sticky="N",padx=30, pady=1)
self.eight = ttk.Labelframe(tab4, text = " 8. The result of equation: " )
self.eight.grid(row=1, column=0, columnspan=2, sticky='E', padx=5, pady=5)
self.txf5 = ScrolledText.ScrolledText(self.eight, height = 5, width = 62)
self.txf5.grid(row=2, column=0,columnspan=2, rowspan=3, sticky='NS', padx=5, pady=5)
self.eightt = ttk.Labelframe(tab4, text = " is raster map with MIN and MAX value:" )
self.eightt.grid(row=3, column=0, columnspan=2, sticky='E', padx=5, pady=5)
self.txf6 = ScrolledText.ScrolledText(self.eightt, height = 3, width = 62)
self.txf6.grid(row=4, column=0,columnspan=2, rowspan=3, sticky='NS', padx=5, pady=5)
ttk.Button(tab4, text='INFO',command=tkMessageBox.showinfo).grid(row=4,\
column=0,sticky="WE", padx=5, pady=5,columnspan=1, rowspan=1)
ttk.Button(tab4, text='MIN MAX',command = self.open_filey).grid(row=4,\
column=1, sticky='WE', padx=5, pady=5,columnspan=1, rowspan=1)
self.nine = ttk.Labelframe(tab4, text = " 9. Reclassification rules for result map: " )
self.nine.grid(row=5, column=0, columnspan=2, sticky='E', padx=5, pady=5)
self.txf7 = ScrolledText.ScrolledText(self.nine, height = 5.3, width = 62)
self.txf7.grid(row=6, column=0,columnspan=2, rowspan=3, sticky='NS', padx=5, pady=5)
ttk.Button(tab4, text='SAVE AS',command=self.edit_savey).grid(row=6, column=1,\
sticky="WE", padx=5, pady=5,columnspan=1, rowspan=1)
ttk.Button(tab4, text='NEXT', command = self.IV).grid(row=7,\
column=1,sticky='WE',padx=5, columnspan=1, rowspan=1,pady=5)
ttk.Button(tab4,text='RUN GRASS GIS',command=self.RG ).grid(row=6, column=0,sticky='WE',\
padx=5, pady = 5, columnspan=1, rowspan=1)
ttk.Button(tab4, text="QUIT", command=self.gEND).grid(row=7, column=0,sticky='WE',\
padx=5, columnspan=1, rowspan=10,pady=5)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ PIATA ZALOZKA ~~~~~~~~~~~~~~~~~~~~~~~~
# verifikacia vysledkov
# COV1, COV2 a COV3 sa tykaju cutoffvalues, co je hranica, ktora rozdeli
# interval Y na dve kategorie: stabilne a nestabilne oblasti
# v diplomovej praci je len jedna hranica (COV1,2,3 su rovnake),
# preto je ROC hranata: plot.png
ttk.Label(tab5, text=u" \n\nHandy software tool for geologists", anchor="s",\
foreground="forest green", font = "Verdana 9 italic").grid(in_=tab5,column=0, row=0,\
columnspan=7, sticky="S",padx=70, pady=17)
ttk.Label(tab5, text=u"\nPredict landslide with GRASS GIS and Python", anchor="n",\
foreground="dark green", font = "Verdana 13 bold").grid(in_=tab5,column=0, row=0,\
columnspan=7, sticky="N",padx=30, pady=1)
self.ten = ttk.Labelframe(tab5, text = " 10. Validation")
self.ten.grid(row=1, column=0, columnspan=2, sticky='E', padx=10, pady=5)
# zobrazenie intervalov, reklasifikacne pravidla pre rozdelenie vysledku
# na kategorie: stabilne a nestabilne oblasti
self.tenL = ttk.Label(self.ten,text=" Intervals according to set cutoff value:",foreground="cadet blue")
self.tenL.grid(row=2, column = 0, columnspan=2, sticky='W', padx=1, pady=1)
self.txf8 = ScrolledText.ScrolledText(self.ten, height = 8, width = 30)
self.txf8.grid(row=3, column=0,columnspan=2, rowspan=1, sticky='NS', padx=5, pady=5)
self.tenL = ttk.Label(self.ten,text=" Receiver operating characteristic :",foreground="cadet blue")
self.tenL.grid(row=2, column = 2, columnspan=2, sticky='W', padx=1, pady=5)
self.txf9 = ScrolledText.ScrolledText(self.ten, height = 17, width = 27)
self.txf9.grid(row=3, column=2,columnspan=2, rowspan=3, sticky='NS', padx=5, pady=5)
# zobrazenie plochkategorii: stabilne a nestabilne oblasti
self.tenL = ttk.Label(self.ten,text=" Area according to set cutoff value:",foreground="cadet blue")
self.tenL.grid(row=4, column = 0, columnspan=2, sticky='W', padx=1, pady=5)
self.txf10 = ScrolledText.ScrolledText(self.ten, height = 6, width = 30)
self.txf10.grid(row=5, column=0,columnspan=2, rowspan=1, sticky='NS', padx=5, pady=5)
# zobrazenie hodnot pre vypocet plochy pod ROC krivkou
ttk.Button(tab5, text="SHOW VALUES ", command = self.valid).grid(row=7,\
column=0,sticky="WE", padx=5, pady=5,columnspan=1, rowspan=1)
ttk.Button(tab5, text='RUN GRASS GIS',command=self.RG).grid(row=8,\
column=0,sticky="WE",padx=5, pady=5,columnspan=1, rowspan=1)
# zobrazenie orientacneho vysledku: bez legendy, existujucich zosuvov, ...
ttk.Button(tab5, text="SHOW MAP",command = self.showimg).grid(row=8, column=1,sticky='WE',\
padx=5, pady=5,columnspan=1, rowspan=1)
# zobrazenie ROC krivky
ttk.Button(tab5, text="SHOW ROC", command = self.showROC).grid(row=7, column=1,sticky='WE',\
padx=5,pady=5,columnspan=1, rowspan=1)
ttk.Button(tab5, text="QUIT", command=self.gEND).grid(row=9,\
column=0,sticky='WE',\
padx=5, columnspan=2, rowspan=1,pady=5)
# funkcia na zobrazenie prikladu ako maju vyzerat reklasifikacne pravidla
# pre pouzitie modulu r.recode na reklasifikaciu FLOAT map
def showexample(self):
tkMessageBox.showinfo("recl_file", "\nText file for reclassification:\n\n\
MIN : ? : ?\n ? : ? : ?\n ? : ? : ?\n . . . \n \n ? : ? : ?\n ? : MAX : ? ")
# funkcie na zobrazenie okna o pokracovani dalsou zalozkou
def II(self):
tkMessageBox.showinfo("GO NEXT"," Continue with third tab ... ")
def III(self):
tkMessageBox.showinfo("GO NEXT"," Continue with fourth tab ... ")
def IV(self):
tkMessageBox.showinfo("GO NEXT"," Continue with fifth tab ... ")
# funkcia na spustenie GRASS GIS
def RG(self):
try:
os.startfile(r'C:\Program Files (x86)\GRASS GIS 7.0.0\grass70.bat')
except:
tkMessageBox.showwarning(""," Cannot run GRASS GIS. ")
# funkcia na zistenie PATH k hlavnemu priecnku
def openL(self):
self.E12.delete(0,"end")
#DEFAULT CESTA
pr = askdirectory(initialdir="C:\\DP_LF")
self.cestaL = os.path.abspath(pr)
self.E12.insert(0, self.cestaL)
self.cestaL = self.cestaL.encode("ascii","ignore")
return self.cestaL
# funkcia na ziskanie PATH, kde su ulozene vstupne data
def openV(self):
self.E21.delete(0,"end")
#DEFAULT CESTA
priecinok = askdirectory(initialdir="C:\\DP_LF\\data")
self.cestaV = os.path.abspath(priecinok)
self.E21.insert(0, self.cestaV)
self.cestaV = self.cestaV.encode("ascii","ignore")
return self.cestaV
# funkcia na ziskanie PATH, kde budu ulozene INFO o vypocte
def openI(self):
self.E31.delete(0,"end")
#DEFAULT CESTA
priecinok = askdirectory(initialdir="C:\\DP_LF\\vypocet")
self.cestaI = os.path.abspath(priecinok)
self.E31.insert(0, self.cestaI)
self.cestaI = self.cestaI.encode("ascii","ignore")
return self.cestaI
# funkcia na vykonanie akcii po stlaceni POKRACOVAT v prvej zalozke
# precitanie vyplnenych policok v prvej zalozke
def valueGET(self,a,b,c,d,e):
self.createL()
self.nameL = str(a)
self.epsg = str(b)
self.nameMV = str(c)
self.nameMM = str(d)
self.nameM = str(e)
try:
self.epsg=int(self.epsg)
except:
tkMessageBox.showerror( ""," EPSG code must be numeric ! " )
self.gui.destroy()
self.epsg=str(self.epsg)
if ((self.nameL != "") and (self.epsg != "") and (self.nameMV != "")\
and (self.nameMM != "") and (self.nameM != "") and (self.cestaL != "")\
and (self.cestaV != "") and (self.cestaI != "")):
if tkMessageBox.askquestion("Settings", " New LOCATION, new MAPSETs and other\n\
necessary folders and *.txt files will be created.\n\
All existing files with the same name will be \n\
deleted.\n\n Do you really want to continue?")=="yes":
# vytvorenie novych foldrov
nf_info = self.cestaI+"\\info"
if not os.path.isdir(nf_info):
os.makedirs(nf_info)
nf_recl1 = self.cestaI+"\\recl1" #robim new folder
if not os.path.isdir(nf_recl1):
os.makedirs(nf_recl1)
nf_report = self.cestaI+"\\report" #robim new folder
if not os.path.isdir(nf_report):
os.makedirs(nf_report)
nf_recl2 = self.cestaI+"\\recl2" #robim new folder
if not os.path.isdir(nf_recl2):
os.makedirs(nf_recl2)
# vytvorenie txt suborov na prvotnu reklasifikaciu
r1_G = nf_recl1+"\\recl1_G.txt"
open(r1_G, 'w')
r1_DMR = nf_recl1+"\\recl1_DMR.txt"
open(r1_DMR, 'w')
r1_S = nf_recl1+"\\recl1_S.txt"
open(r1_S, 'w')
r1_E = nf_recl1+"\\recl1_E.txt"
open(r1_E, 'w')
r1_DS = nf_recl1+"\\recl1_DS.txt"
open(r1_DS, 'w')
r1_M = nf_recl1+"\\recl1_M.txt"
open(r1_M, 'w')
r1_K = nf_recl1+"\\recl1_K.txt"
open(r1_K, 'w')
r1_VK = nf_recl1+"\\recl1_VK.txt"
open(r1_VK, 'w')
# vytvorenie dalsich potrebnych txt suborov
open(self.cesta + "recl_y.txt","wb")
open(self.cesta + "recl_COV1.txt","wb")
open(self.cesta + "recl_COV2.txt","wb")
open(self.cesta + "recl_COV3.txt","wb")
tkMessageBox.showinfo("New folders", " In %s these folders have already been created:\
\n 1. info - information about parametric maps\
\n 2. recl1 - necessary rules for first reclassification\
\n 3. report - information about classes: areas\
\n 4. recl2 - necessary rules for second reclassification\n"\
%self.cestaI)
tkMessageBox.showinfo("First reclassification", " In %s these *.txt files have already been created:\n\
\n 1. recl1_G.txt - geology factor\
\n 2. recl1_DMR.txt - DEM factor\
\n 3. recl1_S.txt - slope factor\
\n 4. recl1_E.txt - aspect factor\
\n 5. recl1_DS.txt - flowlength factor\
\n 6. recl1_M.txt - accumulation factor\
\n 7. recl1_K.txt - curvature factor\
\n 8. recl1_VK.txt - landuse factor\n" %nf_recl1)
tkMessageBox.showinfo("GO NEXT"," Continue with second tab ... ")
else:
self.gui.destroy()
else:
tkMessageBox.showerror("", " ERROR \n\n Check the input values !" )
return self.cestaL
# funkcia na vymazanie obsahu defaultne vyplnenych policok
def refreshALL(self):
self.E10.delete(0,"end")
self.E11.delete(0,"end")
self.E12.delete(0,"end")
self.E13.delete(0,"end")
self.E14.delete(0,"end")
self.E15.delete(0,"end")
self.E21.delete(0,"end")
self.E31.delete(0,"end")
# funkcia na ukoncenie prace v PLGP
def gEND(self):
if tkMessageBox.askyesno('Verification', ' Do you really want to quit? '):
self.gui.destroy()
else:
tkMessageBox.askretrycancel("No", ' Press ENTER to continue ')
def wrZM(self):
# vymazanie obsahu a vypisanie mapsetov, rastrov a vektorov do okna txf1
self.txf1.delete(1.0, END)
redir = Presmerovanie(self.txf1)
sys.stdout = redir
self.zm()
self.zistiR()
self.zistiV()
# self.txf1.insert(INSERT,"Existujuce rastrove mapy:\n\nExistujuce vektorove mapy:")
# print(self.txf1.get(1.0, END))
def delZM(self):
self.txf1.delete(1.0, END)
def open_file(self):
#zabezpeci ze sa obsah txt zobrazi do okna
self.txf3.delete(1.0, END)
redir = Presmerovanie(self.txf3)
sys.stdout = redir
self.txf3.delete(1.0, END)
options = {}
options['defaultextension'] = '.txt'
options['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
options['initialdir'] = "C:\\DP_LF\\vypocet\\info"
options['parent'] = self.gui
options['title'] = "Open a file"
# z txt suboru INFO precita iba informacie o MIN a MAX hodnote bunky
with tkFileDialog.askopenfile(mode='r', initialdir = "C:\\DP_LF\\vypocet\\info") as f_handle:
pr = os.path.curdir
self.oo = os.path.abspath(pr)
self.oo = self.oo.encode("ascii","ignore")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ vytlacit nazov suboru
print "Map:"
print "---------------------------------------"
print "MIN and MAX cell value in raster of selected factor :\n"
#vytlaci obsah suboru
for line in f_handle:
line = line.strip()
if line == "": continue
if "max" in line:
print line
if "min" in line:
print line
# ulozit subor txt ako ...
def edit_savey(self):
options = {}
options['defaultextension'] = '.txt'
options['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
options['parent'] = self.gui
options['title'] = "Save as ..."
f = asksaveasfile(mode='w+', defaultextension=".txt", initialdir = "C:\\DP_LF\\vypocet")
if not f:
return
f.write(self.txf7.get(1.0, END))
f.close()
# otvorenie txt suboru INFO
def open_filey(self):
# zabezpeci ze sa obsah txt zobrazi do okna
self.txf6.delete(1.0, END)
redir = Presmerovanie(self.txf6)
sys.stdout = redir
options = {}
options['defaultextension'] = '.txt'
options['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
options['initialdir'] = "C:\\DP_LF\\vypocet"
options['parent'] = self.gui
options['title'] = "Open a file"
f_handle = "C:\\DP_LF\\vypocet\\info_y.txt"
file = open(f_handle, 'r')
# vytlaci obsah suboru
for line in file:
line = line.strip()
if line == "": continue
if "max" in line:
print line
if "min" in line:
print line
def edit_save(self):
options = {}
options['defaultextension'] = '.txt'
options['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
options['parent'] = self.gui
options['title'] = "Save as ..."
f = asksaveasfile(mode='w+', defaultextension=".txt", initialdir = "C:\\DP_LF\\vypocet\\recl1")
if not f:
return
f.write(self.txf3.get(1.0, END))
f.close()
# vytvorenie LOCATION
def createL(self):
import grass.script as gscript
import grass.script.setup as gsetup
import grass.script.core as gcore
cestaL = self.Gobj.gisdb
nameL = self.nameL
epsg = self.epsg
mapset = self.nameMV
mapset1 = self.nameMM
mapset2 = self.nameM
gisbase = self.Gobj.gisbase
gsetup.init(gisbase, cestaL, nameL, "PERMANENT")
#vytvorenie LOCATION
gcore.create_location(cestaL, nameL, epsg=epsg, proj4=None, filename=None, wkt=None,\
datum=None, datum_trans=None, desc=None, overwrite=True)
# vytvorenie MAPSETov
gscript.run_command("g.mapset",overwrite = True,mapset = mapset, flags="c")
gscript.run_command("g.mapset",overwrite = True,mapset = mapset1, flags="c")
gscript.run_command("g.mapset",overwrite = True,mapset = mapset2, flags="c")
# vypise zoznam mapsetov v location
def zm(self):
import grass.script as gscript
print "MAPSETs:"
print gscript.read_command("g.mapsets",flags = "l")
# vypise zoznam rastrov
def zistiR(self):
import grass.script as gscript
print "Raster maps:"
for rast in gscript.list_strings(type = 'rast'):
print rast,
# vypise zoznam vektorov
def zistiV(self):
import grass.script as gscript
print "\nVector maps:"
for vect in gscript.list_strings(type = 'vect'):
print vect,
# vypocet vahy konkretneho faktora
def Faktor(self, faktor):
import math
import scipy
# funkcia na ulozenie reklasifikacnych pravidiel pre II reklasifikaciu
def STL(a,b,c):
ctxt = self.cesta + "recl2\\" + "recl2_" + str(c) + ".txt"
file = open(ctxt, 'w+')
for j,k in zip(a, b):
file.writelines("%r = %r\n" % (j,k))
file.close()
# funkcia na citanie obsahu z reportov
def Report(self,F):
import csv
tf = open(F, "rb")
lines = tf.readlines()
lines1 = lines[4:(len(lines)-3)]
data = csv.reader(lines1, delimiter="|")
table = [row for row in data]
self.recl1 = [None]
self.P = [None]
for row in table:
a = row[1]
b = row[3]
if self.recl1 is [None]:
self.recl1 = [a]
else: self.recl1.append(a)
if self.P is [None]:
self.P = [b]
else: self.P.append(b)
del self.recl1[0]
del self.P[0]
self.recl1 = [int(i) for i in self.recl1]
self.P = [float(i) for i in self.P]
STL(self.recl1, self.P, faktor)
return (self.recl1,self.P)
f1 = "report_"
f2 = str(faktor)
f3 = ".txt"
f4 = "_z.txt"
Ft = self.cesta+"report\\"+f1+f2+f3
Ftz = self.cesta+"report\\"+f1+f2+f4
# plocha triedy
pt = Report(self, Ft)
Pt = pt[1]
recl1t = pt[0]
# plocha zosuvov v triede
ptz = Report(self, Ftz)
Ptz = ptz[1]
recl1tz = ptz[0]
# pocet tried parametrickej mapy
s = len(Pt)
# pravdepodobnost vzniku svahovych deformacii v triede
p = [(Ptzi)/Pti for Ptzi,Pti in zip(Ptz,Pt)]
# sucet pravdepodobnosti v ramci parametra
p_sum = sum(p)
# hustota pravdepodobnosti
pp = [(pi)/p_sum for pi in p]
# hodnota entropie
H = (-1)*(sum([(math.log(pi)/math.log(2))*pi for pi in pp]))
# maximalna entropia
Hmax = math.log(s)/math.log(2)
# priemerna hodnota pravdepodobnosti
p_pr = scipy.mean(p)
# informacny koeficient
I = (Hmax - H)/Hmax
# vaha prislusneho parametra
W = I*p_pr
recl1_u,pp_u = zip(*sorted(zip(self.recl1,pp), key=lambda x: x[1]))
recl1_u = list(recl1_u)
print "Factor", faktor,":"
print "---------------------------------------"
print "Weight of factor",faktor, "is %s." % W
print "Second reclassification is saved in *.txt file in\n%s." % (self.cesta + "recl2\\" + faktor + "_recl2.txt")
STL(recl1_u, self.recl1, faktor)
# print Pt[0], Psd[0], p[0], pp[0], H, s, Hmax, p_pr, I, W
if len(recl1t) == len(recl1tz):
print "Landslides occure in all classes.\n"
else:
print "Landslides occure not in all classes.\n"
return float(W)
def CalculateFactors(self):
# zabezpeci ze sa obsah txt zobrazi do okna
self.txf2.delete(1.0, END)
redir = Presmerovanie(self.txf2)
sys.stdout = redir
self.Wg = self.Faktor("G")
self.Wdmr = self.Faktor("DMR")
self.Ws = self.Faktor("S")
self.We = self.Faktor("E")
self.Wds = self.Faktor("DS")
self.Wk = self.Faktor("K")
self.Wm = self.Faktor("M")
self.Wvk = self.Faktor("VK")
# vypisanie rovnice do okna
def WrRovnica(self):
self.txf4.delete(1.0, END)
redir = Presmerovanie(self.txf4)
sys.stdout = redir
print "y = geology_recl2 * %f + dmr_recl2 * %f + slope_recl2 * %f + aspect_recl2 * %f + curv_m_recl2 * %f + flowlength_recl2 * %f + accumulation_recl2 * %f + landuse_recl2 * %f" % (self.Wg, self.Wdmr, self.Ws, self.We, self.Wk, self.Wds,self.Wm, self.Wvk)
self.ypsilon()
# vypisanie rovnice do txt suboru
def ypsilon(self):
ctxt = self.cesta + "rovnica.txt"
file = open(ctxt, 'w+')
file.write(self.txf4.get(1.0, END))
file.close()
self.txf5.delete(1.0, END)
redir = Presmerovanie(self.txf5)
sys.stdout = redir
print self.txf4.get(1.0, END)
def valid(self):
self.valrecl()
self.bastats()
self.val()
def val(self):
import numpy as np
import pylab as pl
self.txf9.delete(1.0, END)
redir = Presmerovanie(self.txf9)
sys.stdout = redir
ctxt4 = self.cesta + "stats_COV1.txt"
try:
fhand = open(ctxt4)
except:
print "File not found:",ctxt4
lst = list()
for line in fhand:
line.rstrip()
if line == "": continue
a = line.split()
for word in a:
lst.append(word)
lst=[ lst[i] for i in range(len(lst))]
tn4 = float(lst[2])
fn4 = float(lst[5])
fp4 = float(lst[8])
tp4 = float(lst[11])
N4 = tn4+fp4
P4 = fn4+tp4
TP4 = 1-tp4/P4
FP4 = fp4/N4
ctxt6 = self.cesta + "stats_COV2.txt"
try:
fhand = open(ctxt6)
except:
print "File not found:",ctxt6
lst = list()
for line in fhand:
line.rstrip()
if line == "": continue
a = line.split()
for word in a:
lst.append(word)
lst=[ lst[i] for i in range(len(lst))]
tn6 = float(lst[2])
fn6 = float(lst[5])
fp6 = float(lst[8])
tp6 = float(lst[11])
N6 = tn6+fp6
P6 = fn6+tp6
TP6 = 1-tp6/P6
FP6 = fp6/N6
ctxt8 = self.cesta + "stats_COV3.txt"
try:
fhand = open(ctxt8)
except:
print "File not found:",ctxt8
lst = list()
for line in fhand:
line.rstrip()
if line == "": continue
a = line.split()
for word in a:
lst.append(word)
lst=[ lst[i] for i in range(len(lst))]
tn8 = float(lst[2])
fn8 = float(lst[5])
fp8 = float(lst[8])
tp8 = float(lst[11])
N8 = tn8+fp8
P8 = fn8+tp8
TP8 = 1-tp8/P8
FP8 = fp8/N8
x = 0,FP4,FP6,FP8,1
y = 0,TP4,TP6,TP8,1
# AUC
self.auc = np.trapz(y,x)
# ROC curve
pl.clf()
pl.plot(x, y, "r", linewidth="1.7", label='ROC curve (area = %0.2f)' % self.auc)
pl.plot([0, 1], [0, 1], 'r--',alpha=0.57)
pl.xlim([0.0, 1.0])
pl.ylim([0.0, 1.0])
pl.xlabel('False Positive Rate')
pl.ylabel('True Positive Rate')
pl.title('Receiver operating characteristic')
pl.legend(loc="lower right")
pl.fill_between(x,y,color="red",alpha=0.17)
pl.grid(True,alpha=0.7)
pl.savefig(self.cesta + "plot.png")
areaUC = self.auc*100.00
print "Area under the ROC curve:\n%0.2f" % areaUC,"%"
print "\n(I. COV)\n-------------\n*true negative: %0.2f" % (((tn4)/(N4+P4))*100),"%"
print "*false negative: %0.2f" % (((fn4)/(N4+P4))*100),"%"
print "*false positive: %0.2f" % (((fp4)/(N4+P4))*100),"%"
print "*true positive: %0.2f" % (((tp4)/(N4+P4))*100),"%"
print "*FP = %0.2f" % FP4
print "*TP = %0.2f" % TP4
print "\n(II. COV)\n-------------\n*true negative: %0.2f" % (((tn6)/(N6+P6))*100),"%"
print "*false negative: %0.2f" % (((fn6)/(N6+P6))*100),"%"
print "*false positive: %0.2f" % (((fp6)/(N6+P6))*100),"%"
print "*true positive: %0.2f" % (((tp6)/(N6+P6))*100),"%"
print "*FP = %0.2f" % FP6
print "*TP = %0.2f" % TP6
print "\n(III. COV)\n-------------\n*true negative: %0.2f" % (((tn8)/(N8+P8))*100),"%"
print "*false negative: %0.2f" % (((fn8)/(N8+P8))*100),"%"
print "*false positive: %0.2f" % (((fp8)/(N8+P8))*100),"%"
print "*true positive: %0.2f" % (((tp8)/(N8+P8))*100),"%"
print "*FP = %0.2f" % FP8
print "*TP = %0.2f" % TP8
def bastats(self):
self.txf10.delete(1.0, END)
redir = Presmerovanie(self.txf10)
sys.stdout = redir
print "(I. COV):\n-------------"
self.BA_stats(1)
print "(II. COV):\n-------------"
self.BA_stats(2)
print "(III. COV):\n-------------"
self.BA_stats(3)
def BA_stats(self,fstats):
ctxt = self.cesta + "y_stats_COV" + str(fstats) + ".txt"
try:
fhand = open(ctxt)
except:
print "File not found:",ctxt
lst = list()
for line in fhand:
line.rstrip()
if line == "": continue
a = line.split()
for word in a:
lst.append(word)
lst=[ lst[i] for i in range(len(lst))]
a = lst[1]
b = lst[3]
c = float(a)+ float(b)
pa = (float(a)/c)*100
pb = (float(b)/c)*100
print "*without landslide: %0.2f" % (pa),"%"
print "*with landslide: %0.2f" % (pb),"%\n"
def valrecl(self):
self.txf8.delete(1.0, END)
redir = Presmerovanie(self.txf8)
sys.stdout = redir
print "(I. COV):\n-------------"
self.VAL_recl(1)
print "(II. COV):\n-------------"
self.VAL_recl(2)
print "(III. COV):\n-------------"
self.VAL_recl(3)
def VAL_recl(self,frecl):
ctxt = self.cesta + "recl_COV" + str(frecl) + ".txt"
try:
fhand = open(ctxt)
except:
print "File not found:",ctxt
lst = list()
for line in fhand:
line.rstrip()
if line == "": continue
a = line.split(":")
for word in a:
lst.append(word)
lst=[ lst[i] for i in range(len(lst))]
# print lst
a = lst[0]
b = lst[1]
c = lst[3]
d = lst[4]
print "*without landslide"
print a,"-",b
print "*with landslide"
print c,"-",d,"\n"
# zobrazenie orientacnej mapy po stlaceni prislusneho tlacidla
def showimg(self):
image = self.cesta + "y.png"
try:
os.startfile(image)
except:
tkMessageBox.showwarning(""," Cannot open map. ")
# zobrazenie ROC krivky po stlaceni prislusneho tlacidla
def showROC(self):
ROCg = self.cesta + "plot.png"
try:
os.startfile(ROCg)
except:
tkMessageBox.showwarning(""," Cannot open map. ")
# ~~~~~~ HLAVNE GUI ~~~~~~~~
def main():
gui = Tkinter.Tk()
# zobrazenie grafickej casti okna GUI
o1 = PhotoImage(file="files\gui.gif")
def panelObr(o):
Label(gui, image=o).pack(side="right", fill="both", expand=True)
panelObr(o1)
GUI(gui).pack(side="right", fill="both", expand=True)
gui.mainloop()
if __name__ == '__main__':
main()
## PRVY MODEL: import dat, tvorba parametrickych map, export informacii o kazdej z nich)
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#import sys
#import os
#import atexit
#
#from grass.script import parser, run_command
#
#def cleanup():
# pass
#
#def main():
# run_command("v.in.ogr",
# flags = 'o',
# overwrite = True,
# input = "C:\DP_LF\data",
# layer = "area",
# output = "area",
# min_area = 0.0001,
# snap = -1,
# geometry = "None")
#
# run_command("v.in.ogr",
# flags = 'o',
# overwrite = True,
# input = "C:\DP_LF\data",
# layer = "geology",
# output = "geology",
# min_area = 0.0001,
# snap = -1,
# geometry = "None")
#
# run_command("v.in.ogr",
# flags = 'o',
# overwrite = True,
# input = "C:\DP_LF\data",
# layer = "polohopis",
# output = "polohopis",
# min_area = 0.0001,
# snap = -1,
# geometry = "None")
#
# run_command("v.in.ogr",
# flags = 'o',
# overwrite = True,
# input = "C:\DP_LF\data",
# layer = "vyskopis",
# output = "vyskopis",
# min_area = 0.0001,
# snap = -1,
# geometry = "None")
#
# run_command("v.in.ogr",
# flags = 'o',
# overwrite = True,
# input = "C:\DP_LF\data",
# layer = "zosuvy",
# output = "zosuvy",
# min_area = 0.0001,
# snap = -1,
# geometry = "None")
#
# run_command("g.region",
# overwrite = True,
# vector = "area",
# res = "10")
#
# run_command("v.to.rast",
# overwrite = True,
# input = "area",
# layer = "1",
# type = "point,line,area",
# output = "zu",
# use = "attr",
# attribute_column = "Id",
# value = 1,
# memory = 300)
#
# run_command("v.to.rast",
# overwrite = True,
# input = "geology",
# layer = "1",
# type = "point,line,area",
# output = "geology",
# use = "attr",
# attribute_column = "kat",
# value = 1,
# memory = 300)
#
# run_command("v.to.rast",
# overwrite = True,
# input = "polohopis",
# layer = "1",
# type = "point,line,area",
# output = "landuse",
# use = "attr",
# attribute_column = "Id",
# value = 1,
# memory = 300)
#
# run_command("v.to.rast",
# overwrite = True,
# input = "zosuvy",
# layer = "1",
# type = "point,line,area",
# output = "zosuvy0",
# use = "attr",
# attribute_column = "Id",
# value = 1,
# memory = 300)
#
# run_command("r.mapcalc",
# overwrite = True,
# expression = "zosuvy = if( zosuvy0 == 0, null(), 1)")
#
# run_command("r.mask",
# overwrite = True,
# raster = "zu",
# maskcats = "*",
# layer = "1")
#
# run_command("v.surf.rst",
# overwrite = True,
# input = "vyskopis",
# layer = "1",
# zcolumn = "VYSKA",
# elevation = "dmr",
# slope = "slope",
# aspect = "aspect",
# pcurvature = "curvature_p_rst",
# tcurvature = "curvature_t_rst",
# mcurvature = "curvature_m",
# tension = 40.,
# segmax = 40,
# npmin = 300,
# zscale = 1.0)
#
# run_command("r.flow",
# overwrite = True,
# elevation = "dmr",
# flowlength = "flowlength")
#
# run_command("r.terraflow",
# overwrite = True,
# elevation = "dmr",
# filled = "filled",
# direction = "direction",
# swatershed = "swatershed",
# accumulation = "accumulation",
# tci = "tci",
# memory = 300)
#
# run_command("r.univar",
# flags = 'g',
# overwrite = True,
# map = "geology",
# output = "C:\DP_LF\vypocet\info\info_G.txt",
# percentile = 90,
# separator = "pipe")
#
# run_command("r.univar",
# flags = 'g',
# overwrite = True,
# map = "dmr",
# output = "C:\DP_LF\vypocet\info\info_DMR.txt",
# percentile = 90,
# separator = "pipe")
#
# run_command("r.univar",
# flags = 'g',
# overwrite = True,
# map = "curvature_m",
# output = "C:\DP_LF\vypocet\info\info_K.txt",
# percentile = 90,
# separator = "pipe")
#
# run_command("r.univar",
# flags = 'g',
# overwrite = True,
# map = "flowlength",
# output = "C:\DP_LF\vypocet\info\info_DS.txt",
# percentile = 90,
# separator = "pipe")
#
# run_command("r.univar",
# flags = 'g',
# overwrite = True,
# map = "accumulation",
# output = "C:\DP_LF\vypocet\info\info_M.txt",
# percentile = 90,
# separator = "pipe")
#
# run_command("r.univar",
# flags = 'g',
# overwrite = True,
# map = "landuse",
# output = "C:\DP_LF\vypocet\info\info_VK.txt",
# percentile = 90,
# separator = "pipe")
#
# run_command("r.univar",
# flags = 'g',
# overwrite = True,
# map = "aspect",
# output = "C:\DP_LF\vypocet\info\info_E.txt",
# percentile = 90,
# separator = "pipe")
#
# run_command("r.univar",
# flags = 'g',
# overwrite = True,
# map = "slope",
# output = "C:\DP_LF\vypocet\info\info_S.txt",
# percentile = 90,
# separator = "pipe")
#
# return 0
#
#if __name__ == "__main__":
# options, flags = parser()
# atexit.register(cleanup)
# sys.exit(main())
#
## DRUHY MODEL: prvotna reklasifikacia parametrickych map, export informacii
## o ploche kazdej triedy a ploche zosuvov v tejto triede
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#import sys
#import os
#import atexit
#
#from grass.script import parser, run_command
#
#def cleanup():
# pass
#
#def main():
# run_command("r.reclass",
# overwrite = True,
# input = "geology",
# output = "geology_recl1",
# rules = "C:\DP_LF\vypocet\recl1\recl1_G.txt")
#
# run_command("r.recode",
# overwrite = True,
# input = "dmr",
# output = "dmr_recl1",
# rules = "C:\DP_LF\vypocet\recl1\recl1_DMR.txt")
#
# run_command("r.recode",
# overwrite = True,
# input = "slope",
# output = "slope_recl1",
# rules = "C:\DP_LF\vypocet\recl1\recl1_S.txt")
#
# run_command("r.recode",
# overwrite = True,
# input = "aspect",
# output = "aspect_recl1",
# rules = "C:\DP_LF\vypocet\recl1\recl1_E.txt")
#
# run_command("r.reclass",
# overwrite = True,
# input = "landuse",
# output = "landuse_recl1",
# rules = "C:\DP_LF\vypocet\recl1\recl1_VK.txt")
#
# run_command("r.recode",
# overwrite = True,
# input = "flowlength",
# output = "flowlength_recl1",
# rules = "C:\DP_LF\vypocet\recl1\recl1_DS.txt")
#
# run_command("r.recode",
# overwrite = True,
# input = "accumulation",
# output = "accumulation_recl1",
# rules = "C:\DP_LF\vypocet\recl1\recl1_M.txt")
#
# run_command("r.recode",
# overwrite = True,
# input = "curvature_m",
# output = "curv_m_recl1",
# rules = "C:\DP_LF\vypocet\recl1\recl1_K.txt")
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "geology_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_G.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "dmr_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_DMR.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "slope_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_S.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "aspect_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_E.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "landuse_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_VK.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "flowlength_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_DS.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "accumulation_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_M.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "curv_m_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_K.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.mask",
# overwrite = True,
# raster = "zosuvy",
# maskcats = "*",
# layer = "1")
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "geology_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_G_z.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "dmr_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_DMR_z.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "slope_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_S_z.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "aspect_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_E_z.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "landuse_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_VK_z.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "flowlength_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_DS_z.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "accumulation_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_M_z.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.report",
# flags = 'hn',
# overwrite = True,
# map = "curv_m_recl1",
# units = "k,p",
# output = "C:\DP_LF\vypocet\report\report_K_z.txt",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.mask",
# overwrite = True,
# raster = "zu",
# maskcats = "*",
# layer = "1")
#
# return 0
#
#if __name__ == "__main__":
# options, flags = parser()
# atexit.register(cleanup)
# sys.exit(main())
#
## TRETI MODEL: druhotna reklasifikacia parametrickych map
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#import sys
#import os
#import atexit
#
#from grass.script import parser, run_command
#
#def cleanup():
# pass
#
#def main():
# run_command("r.reclass",
# overwrite = True,
# input = "geology",
# output = "geology_recl2",
# rules = "C:\DP_LF\vypocet\recl2\recl2_G.txt")
#
# run_command("r.reclass",
# overwrite = True,
# input = "dmr_recl1",
# output = "dmr_recl2",
# rules = "C:\DP_LF\vypocet\recl2\recl2_DMR.txt")
#
# run_command("r.reclass",
# overwrite = True,
# input = "slope_recl1",
# output = "slope_recl2",
# rules = "C:\DP_LF\vypocet\recl2\recl2_S.txt")
#
# run_command("r.reclass",
# overwrite = True,
# input = "aspect_recl1",
# output = "aspect_recl2",
# rules = "C:\DP_LF\vypocet\recl2\recl2_E.txt")
#
# run_command("r.reclass",
# overwrite = True,
# input = "landuse_recl1",
# output = "landuse_recl2",
# rules = "C:\DP_LF\vypocet\recl2\recl2_VK.txt")
#
# run_command("r.reclass",
# overwrite = True,
# input = "flowlength_recl1",
# output = "flowlength_recl2",
# rules = "C:\DP_LF\vypocet\recl2\recl2_DS.txt")
#
# run_command("r.reclass",
# overwrite = True,
# input = "accumulation_recl1",
# output = "accumulation_recl2",
# rules = "C:\DP_LF\vypocet\recl2\recl2_M.txt")
#
# run_command("r.reclass",
# overwrite = True,
# input = "curv_m_recl1",
# output = "curv_m_recl2",
# rules = "C:\DP_LF\vypocet\recl2\recl2_K.txt")
#
# run_command("r.mapcalc",
# overwrite = True,
# file = "C:\DP_LF\vypocet\rovnica.txt")
#
# run_command("r.univar",
# flags = 'g',
# overwrite = True,
# map = "y",
# output = "C:\DP_LF\vypocet\info_y.txt",
# percentile = 90,
# separator = "pipe")
#
# return 0
#
#if __name__ == "__main__":
# options, flags = parser()
# atexit.register(cleanup)
# sys.exit(main())
#
## STVRTY MODEL: rozdelenie spojiteho intervalu do kategorii (viac ako dva alebo dva)
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#import sys
#import os
#import atexit
#
#from grass.script import parser, run_command
#
#def cleanup():
# pass
#
#def main():
# run_command("r.quantile",
# flags = 'r',
# input = "y",
# quantiles = 5,
# bins = 1000000)
#
# run_command("r.quantile",
# flags = 'r',
# quiet = True,
# input = "y",
# quantiles = -1000000000,
# percentiles = 90,
# bins = 1000000)
#
# return 0
#
#if __name__ == "__main__":
# options, flags = parser()
# atexit.register(cleanup)
# sys.exit(main())
#
## PIATY MODEL: export vyslednej mapy, nastavenie farieb, reklasifikacia mapy Y
## a export informacii o mapach pre validaciu a zostrojenie ROC krivky (tri cutoff values)
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#import sys
#import os
#import atexit
#
#from grass.script import parser, run_command
#
#def cleanup():
# pass
#
#def main():
# run_command("r.colors",
# flags = 'e',
# map = "y",
# color = "gyr")
#
# run_command("r.recode",
# overwrite = True,
# input = "y",
# output = "ba",
# rules = "C:\DP_LF\vypocet\recl_y.txt",
# title = "kategorie")
#
# run_command("r.out.png",
# flags = 't',
# overwrite = True,
# input = "y",
# output = "C:\DP_LF\vypocet\y.png",
# compression = 7)
#
# run_command("r.recode",
# overwrite = True,
# input = "y",
# output = "y_COV1",
# rules = "C:\DP_LF\vypocet\recl_COV1.txt",
# title = "validation")
#
# run_command("r.stats",
# flags = 'an',
# overwrite = True,
# input = "y_COV1",
# output = "C:\DP_LF\vypocet\y_stats_COV1.txt",
# separator = "space",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.stats",
# flags = 'cn',
# overwrite = True,
# input = "zosuvy0,y_COV1",
# output = "C:\DP_LF\vypocet\stats_COV1.txt",
# separator = "space",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.recode",
# overwrite = True,
# input = "y",
# output = "y_COV2",
# rules = "C:\DP_LF\vypocet\recl_COV2.txt")
#
# run_command("r.stats",
# flags = 'an',
# overwrite = True,
# input = "y_COV2",
# output = "C:\DP_LF\vypocet\y_stats_COV2.txt",
# separator = "space",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.stats",
# flags = 'cn',
# overwrite = True,
# input = "zosuvy0,y_COV2",
# output = "C:\DP_LF\vypocet\stats_COV2.txt",
# separator = "space",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.recode",
# overwrite = True,
# input = "y",
# output = "y_COV3",
# rules = "C:\DP_LF\vypocet\recl_COV3.txt")
#
# run_command("r.stats",
# flags = 'an',
# overwrite = True,
# input = "y_COV3",
# output = "C:\DP_LF\vypocet\y_stats_COV3.txt",
# separator = "space",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.stats",
# flags = 'cn',
# overwrite = True,
# input = "zosuvy0,y_COV3",
# output = "C:\DP_LF\vypocet\stats_COV3.txt",
# separator = "space",
# null_value = "*",
# nsteps = 255)
#
# run_command("r.category",
# map = "ba",
# separator = ":",
# rules = "C:\DP_LF\nastroj\files\display\cat_vysledok.txt")
#
#
# return 0
#
#
#if __name__ == "__main__":
# options, flags = parser()
# atexit.register(cleanup)
# sys.exit(main()) |
14,274 | 4ca8a4b1e20657503a1055125de23b7b0fd90644 | #!/usr/bin/python
import subprocess
import glob
subprocess.check_call(["pdflatex", "-halt-on-error", "-file-line-error", "main.tex"])
if len(glob.glob("*.bib"))>0:
subprocess.check_call(["bibtex", "main"])
subprocess.check_call(["pdflatex", "-halt-on-error", "-file-line-error", "main.tex"])
subprocess.check_call(["pdflatex", "-halt-on-error", "-file-line-error", "main.tex"])
subprocess.check_call(["pdflatex", "-halt-on-error", "-file-line-error", "main.tex"])
|
14,275 | 51ed93bb791fa5bdc51f90c91564870adde027e3 | """""
for c in "Bruno gostoso": # for com string
print (c)
"""
""""
range(0, 10, 1) # (start, stop, step)
x = list(range(0, 10, 1)) # Função range retorna em range, por isso converti em list
print(x) # range é parecido com Progressão Aritmética
"""
""""
for i in range(10, 100, 5):
print(i)
"""
""""
for k in range(100):
print(k)
if(k == 54):
break # Função break interrompe um laço e economiza memória
"""
""""
print()
print("início da rodada")
f = 0
while(f<10):
f += 1
if(f%2==0):
continue
if(f>5):
break
print(f)
else:
print("else")
print("fim")
print()
"""
for j in range(100, 2):
print(j) |
14,276 | 95da2d9fcd3c09c343d7f23886bb2f5f45075f2f | from django.contrib import admin
from django.utils.html import format_html
# Register your models here.
from .models import Question
class QuestionsAdmin(admin.ModelAdmin):
"""
Set up question and answer in back end admin page
"""
list_display=["Question","Answer","dateAsked", "Question_Status"]
search_fields=["dateAsked","Question","Answer"]
list_filter=["dateAsked"]
def Question_Status(self, obj):
return format_html('<Button><a href="/admin/forum/question/{}/change/">Change Status</a></button>  ', obj.questionID) + format_html('<Button><a href="/admin/forum/question/{}/delete/">Delete</a></button></>', obj.questionID)
class Meta:
model=Question
admin.site.register(Question,QuestionsAdmin)
|
14,277 | d4c0bb0a03c58a52d4a0c6caa7e20833927adc41 | #
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
from ocean_lib.web3_internal.currency import to_wei
from ocean_lib.web3_internal.event_filter import EventFilter
def test_transfer_event_filter(alice_ocean, alice_wallet, alice_address, bob_address):
token = alice_ocean.create_data_token(
"DataToken1", "DT1", from_wallet=alice_wallet, blob="foo_blob"
)
token.mint(alice_address, to_wei(100), from_wallet=alice_wallet)
token.approve(bob_address, to_wei(1), from_wallet=alice_wallet)
token.transfer(bob_address, to_wei(5), from_wallet=alice_wallet)
block = alice_ocean.web3.eth.block_number
event_filter = EventFilter(
token.events.Transfer(), from_block=block, to_block=block
)
assert event_filter.filter_id, "Event filter ID is None."
event_filter.uninstall()
event_filter.recreate_filter()
assert (
event_filter.get_new_entries() == []
), "There are new entries for EventFilter."
|
14,278 | f3ee082ab616718afa315b127fdc490da4724ed6 | #!/usr/bin/python3
'''
[-2, -1, 0, 5, 10]
[0, 1, 4, 25, 100]
'''
def sortedSquareArray(array):
squared_array = [0 for _ in array]
smallerValueIdx = 0
greaterValueIdx = len(array) - 1
for idx in reversed(range(len(array))):
smallerValue = array[smallerValueIdx]
greaterValue = array[greaterValueIdx]
if abs(smallerValue) > abs(greaterValue):
squared_array[idx] = smallerValue * smallerValue
smallerValueIdx += 1
else:
squared_array[idx] = greaterValue * greaterValue
greaterValueIdx -= 1
return squared_array
if __name__ == "__main__":
print(sortedSquareArray([-2, -1, 0, 5, 10]))
|
14,279 | 971e6c4637f37cb6cf49ecaf1f54deed70e073a9 | from . import utils
from . import energy
from . import seamops
from . import warnings
from . import advanced
|
14,280 | 5ae54564ce4d7e3930990c687a92499e66dcee41 | def input_tuple_lc(prompt, types, sep):
answer_str = input(prompt)
new_list = answer_str.split(sep)
new_tuple = ()
if len(new_list) != len(types):
print("Number of values inputted do not match number of values intended")
return ()
i = 0
if len(new_list) == len(types):
while i < len(new_list): #or len(types)
if i == len(new_list) - 1: #because bool('False') returns True, we use eval() instead
try:
new_object = eval(new_list[i])
except NameError:
print("Error, incorrect data type inputted. If false/true typed, type Fale/True")
return ()
new_tuple = new_tuple + (new_object,)
i+=1
else:
try:
new_object = types[i](new_list[i])
except ValueError:
print("Error, incorrect data type inputted")
return ()
new_tuple = new_tuple + (new_object,)
i+=1
return new_tuple
else:
return ()
def input_tuple(prompt, types, sep):
answer_str = input(prompt)
converted_str = ''
new_tuple = ()
i = 0
type_index = 0
while i < len(answer_str) and type_index < len(types): #if answer_str has fewer elements than types, the new_tuple returned will have few intended elements, assuming there are no value errors
if answer_str[i]==sep or i == len(answer_str) - 1:
if i == len(answer_str)-1:
converted_str = answer_str[:i+1]
try:
converted_str = eval(converted_str)
except NameError:
print("Error, incorrect data type inputted. If false typed, please type False")
return ()
else:
converted_str = answer_str[:i]
try:
converted_str = types[type_index](converted_str)
except ValueError:
print("Error, incorrect data type inputted")
return ()
new_tuple = new_tuple + (converted_str,)
answer_str = answer_str[i+1:]
i = 0
type_index+=1
else:
i+=1
return new_tuple
def read_tuple(file_obj, types, sep):
new_str = ''
for char in file_obj:
new_str = new_str + char
new_ls = new_str.split(' ') #get only the values we want
new_str = new_ls[0] #get the first element of the list
new_list = new_str.split(sep)
new_tuple = ()
if len(new_list) != len(types):
print("Number of values inputted do not match number of values intended")
return ()
i = 0
if len(new_list) == len(types):
while i < len(new_list): #or len(types)
if i == len(new_list) - 1: #because bool('False') returns True, we use eval() instead
try:
new_object = eval(new_list[i])
except NameError:
print("Error, incorrect data type inputted. If false/true typed, type Fale/True")
return ()
new_tuple = new_tuple + (new_object,)
i+=1
else:
try:
new_object = types[i](new_list[i])
except ValueError:
print("Error, incorrect data type inputted")
return ()
new_tuple = new_tuple + (new_object,)
i+=1
return new_tuple
sep = ','
prompt = "Please input a first name, last name, age, ID, and full-time(True or False) separated by '" + sep + "':"
types = (str,str,float,int,bool)
some_tuple = input_tuple(prompt,types,sep)
print(some_tuple)
some_tuple = input_tuple_lc(prompt,types,sep)
print(some_tuple)
file_obj = open("some_text.txt","r")
some_tuple = read_tuple(file_obj,types,sep)
print(some_tuple)
|
14,281 | cc16b73f8150786547f479358ffcf21b1baf6b10 | # Copyright 2011-2014 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuselage.argument import Integer, String
from fuselage.policy import NAND, Policy, Present
from fuselage.resource import Resource
class Service(Resource):
"""This represents service startup and shutdown via an init daemon."""
name = String()
""" A unique name representing an initd service.
This would normally match the name as it appears in /etc/init.d.
"""
priority = Integer(default=99)
""" Priority of the service within the boot order.
This attribute will have no effect when using a dependency or event based
init.d subsystem like upstart or systemd. """
start = String()
""" A command that when executed will start the service.
If not provided, the provider will use the default service start invocation
for the init.d system in use.
"""
stop = String()
""" A command that when executed will start the service.
If not provided, the provider will use the default service stop invocation
for the init.d system in use.
"""
restart = String()
""" A command that when executed will restart the service.
If not provided, the provider will use the default service restart invocation
for the init.d system in use. If it is not possible to automatically determine
if the restart script is avilable the service will be stopped and started instead.
"""
reconfig = String()
""" A command that when executed will make the service reload its
configuration file. """
running = String()
""" A comamnd to execute to determine if a service is running. Should have an exit code of 0 for success. """
pidfile = String()
""" Where the service creates its pid file.
This can be provided instead of a status command as an alternative way of checking if
a service is running or not.
"""
class ServiceStartPolicy(Policy):
"""Start a service if it isn't running"""
resource = Service
name = "start"
default = True
signature = (
Present("name"),
NAND(Present("running"), Present("pidfile")),
)
class ServiceStopPolicy(Policy):
"""Stop a service if it is running"""
resource = Service
name = "stop"
signature = (
Present("name"),
NAND(Present("running"), Present("pidfile")),
)
class ServiceRestartPolicy(Policy):
"""Restart a service
If a service isn't running it will just be started instead.
"""
resource = Service
name = "restart"
signature = (
Present("name"),
NAND(Present("running"), Present("pidfile")),
)
class ServiceReloadPolicy(Policy):
"""Get the service to reload its configuration
If a service isn't running it will just be started instead.
"""
resource = Service
name = "reconfig"
signature = (
Present("name"),
NAND(Present("running"), Present("pidfile")),
)
|
14,282 | a72fc3f7d5476e7e9ab95d38a5b93dd8f32a0b28 | # #############################################################################
# array.py
# ========
# Author : Sepand KASHANI [kashani.sepand@gmail.com]
# #############################################################################
"""
Tools and utilities for manipulating arrays.
"""
def index(x, axis, index_spec):
"""
Form indexing tuple for NumPy arrays.
Given an array `x`, generates the indexing tuple that has :py:class:`slice` in each axis except
`axis`, where `index_spec` is used instead.
Parameters
----------
x : :py:class:`~numpy.ndarray`
Array to index.
axis : int
Dimension along which to apply `index_spec`.
index_spec : int or :py:class:`slice`
Index/slice to use.
Returns
-------
indexer : tuple
Indexing tuple.
"""
idx = [slice(None)] * x.ndim
idx[axis] = index_spec
indexer = tuple(idx)
return indexer
|
14,283 | d7f5dbd116e2d4ce77bdfa129ba37e7e8a60959f | from listeners import *
|
14,284 | f1826e55bbc34039e2c0d9e39b980aac4517404d | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-09 20:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('slug', models.SlugField(blank=True, null=True, unique=True)),
('order', models.PositiveSmallIntegerField()),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('content_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name='тип')),
('object_id', models.PositiveIntegerField(blank=True, null=True)),
],
options={
'ordering': ('order',),
},
managers=[
('_default_manager', django.db.models.manager.Manager()),
],
),
migrations.AlterUniqueTogether(
name='menu',
unique_together=set([]),
),
migrations.CreateModel(
name='GroupMenu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('order', models.PositiveSmallIntegerField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MainMenu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('order', models.PositiveSmallIntegerField()),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menu.GroupMenu')),
],
),
migrations.CreateModel(
name='SubMenu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('order', models.PositiveSmallIntegerField()),
('slug', models.SlugField(blank=True, null=True, unique=True)),
('object_id', models.PositiveIntegerField(blank=True, null=True)),
('content_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name='тип')),
('main_menu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menu.MainMenu')),
],
),
migrations.AlterUniqueTogether(
name='submenu',
unique_together=set([('main_menu', 'order')]),
),
migrations.AlterUniqueTogether(
name='mainmenu',
unique_together=set([('group', 'order')]),
),
migrations.AlterModelOptions(
name='groupmenu',
options={'ordering': ('order',)},
),
migrations.AddField(
model_name='groupmenu',
name='blank',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='mainmenu',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='main_menus', to='menu.GroupMenu'),
),
migrations.AlterField(
model_name='submenu',
name='main_menu',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_menus', to='menu.MainMenu'),
),
migrations.RemoveField(
model_name='menu',
name='content_type',
),
migrations.RemoveField(
model_name='menu',
name='parent',
),
migrations.AlterModelOptions(
name='submenu',
options={'ordering': ('order',)},
),
migrations.DeleteModel(
name='Menu',
),
]
|
14,285 | 0bff24da254eaac4d00b66366e57d9222d730130 | # 172. Factorial Trailing Zeroes
class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
ans = 1
while n > 1:
ans, n = ans*n, n-1
count = 0
for ch in str(ans)[::-1]:
if ch == "0":
count += 1
else:
break
return count
class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
ans = 0
five = 5
while True:
inte = n//five
if inte == 0:
return ans
ans += inte
five *= 5
class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
return 0 if n == 0 else n/5 + self.trailingZeroes(n/5)
|
14,286 | 163444d45931432915959c4b7a1d92ac4f34c58e | def findLoop(n,m,arr):
for i in range(n):
x = arr[i]
for j in range(m):
if arr[i+j] != x:
break
|
14,287 | 10735cce6ba21ac14d6000da1b139a7509c58ae8 | from . import report_shopping_book
from . import report_sales_book
|
14,288 | f109a6e910faa71376aca52f07c310b089ffa4cc | import pandas as pd
import plotly.express as px
import numpy as np
import csv
def getDataSource(data_path):
Days=[]
Marks=[]
with open(data_path) as csv_file:
csv_reader=csv.DictReader(csv_file)
for row in csv_reader:
Days.append(float(row["Marks In Percentage"]))
Marks.append(float(row["Days Present"]))
return{"x":Marks,"y":Days}
def findCorrelation(datasource):
correlation=np.corrcoef(datasource["x"],datasource["y"])
print("correlation=",correlation[0,1])
def plot():
df= pd.read_csv("students.csv")
fig=px.scatter(df,x="Marks In Percentage",y="Days Present")
fig.show()
def setup():
data_path="./students.csv"
datasource=getDataSource(data_path)
findCorrelation(datasource)
setup()
plot()
|
14,289 | c15e41d84389e2838c78157fd336062d3696d023 | '''
Description:
-----------
Generates traces for comparing declarative and procedural Synoptic algorithms.
Generates 100 log files, ranging over event types from an alphabet of 8 events,
each log contains 24 event instances.
$ python gen_traces.py
'''
import sys
import os
import random
import sys
def clear_dir(dirname):
'''
Remove and re-create a directory.
'''
print dirname
os.system("rm -Rf " + dirname)
os.system("mkdir " + dirname)
def create_trace_etypes(dirname, total_events, etypes, i):
fname = dirname + "/log-%d_etypes-%d-%d.txt" % (total_events, etypes, i)
f = open(fname, 'w')
def logEventFn(e):
f.write(e + "\n")
execs = total_events / 4
for ex in range(execs):
etype = 0
for ev in range(total_events / execs):
etype_s = random.randint(0, etypes)
logEventFn("e" + str(etype_s))
logEventFn("--")
return
def main():
# vary number of invariants:
dirname = "vary-etypes"
total_events = 24
clear_dir(dirname)
for etypes in [8]:
for i in range(100):
create_trace_etypes(dirname, total_events, etypes, i)
if __name__ == "__main__":
answer = raw_input("This will delete all the previously generated trace files in, continue? (y/n) ")
if answer != 'y':
sys.exit(0)
main()
|
14,290 | 6c6e3ac5255ec5f69a10f7ba0b3d954ad39dd274 | from django.shortcuts import render
from .models import Clothes, Shoes, Watch, Cap
# Create your views here.
def index(request):
clothes = Clothes.objects.all()
shoes = Shoes.objects.all()
watch = Watch.objects.all()
cap = Cap.objects.all()
return render(request, 'tezcoder/index.html', {'clothes': clothes, 'shoes': shoes, 'watch': watch, 'cap': cap})
def cart(request):
return render(request, 'tezcoder/cart.html') |
14,291 | 88a0450305c6dcba8d9f1d933e28cc83227b7b5d | #
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
from test_integration.contract_skills import ContractSkills
class TestSkillFixit(ContractSkills):
def get_skill_name(self):
return 'fixit'
def get_commands_to_execute(self):
return ['clean', 'puthon']
def get_commands_expected(self):
return ['clear', 'python']
|
14,292 | 645d66fe3db85b131afd1b77312f2709bbcbed39 | import pandas as pd
import os
from sqlalchemy import create_engine
def main():
engine = create_engine('postgresql://postgres:postgres@localhost:5432/molecular_prediction')
query = '''
SELECT
magnetic_sheilding_tensors.*
, structures.atom
, structures.x
, structures.y
, structures.z
, mulliken_charges.mulliken_charge
, scalar_coupling_contributions.atom_index_0
, scalar_coupling_contributions.atom_index_1
, scalar_coupling_contributions.type
, scalar_coupling_contributions.fc
, scalar_coupling_contributions.sd
, scalar_coupling_contributions.pso
, scalar_coupling_contributions.dso
FROM
magnetic_sheilding_tensors
INNER JOIN
mulliken_charges
ON
magnetic_sheilding_tensors.molecule_name = mulliken_charges.molecule_name
AND
magnetic_sheilding_tensors.atom_index = mulliken_charges.atom_index
INNER JOIN
structures
ON
magnetic_sheilding_tensors.molecule_name = structures.molecule_name
AND
magnetic_sheilding_tensors.atom_index = structures.atom_index
INNER JOIN
scalar_coupling_contributions
ON
magnetic_sheilding_tensors.molecule_name = scalar_coupling_contributions.molecule_name
ORDER BY magnetic_sheilding_tensors.molecule_name ASC, magnetic_sheilding_tensors.atom_index ASC
'''
# induvidual_atom_in_molecule = pd.read_sql(query,engine)
# induvidual_atom_in_molecule
return pd.read_sql(query,engine)
if __name__ == "__main__":
print(main()) |
14,293 | c45a45adbb7f34f008480bddc894d3efd2f54649 | # Generated by Django 3.1.3 on 2020-11-14 06:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('visitas', '0003_auto_20201114_0005'),
]
operations = [
migrations.RenameModel(
old_name='visita',
new_name='RegistrarVisita',
),
]
|
14,294 | 9579a34b89cd94aafb3ccd6285a5061e2d2e0599 | import io
import os
import shutil
import zipfile
import re
import unicodedata
from PIL import Image
MAX_FILESIZE = 20 * 1024 ** 3 # 20MB
def set_dir(filepath):
"""
Create a folder if the given path does not exist.
Otherwise remove and recreate the folder.
:param filepath: path
:return:
"""
if not os.path.exists(filepath):
os.mkdir(filepath)
else:
shutil.rmtree(filepath)
os.mkdir(filepath)
def ensure_dir(dir_path):
"""
Create a folder if the given path does not exist.
:param dir_path:
:return:
"""
try:
os.mkdir(dir_path)
except FileExistsError:
pass
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return re.sub(r'[-\s]+', '-', value)
def zip_file(src_dir):
"""
Compress the given folder
:param src_dir:
:return:
"""
zip_name = slugify(src_dir) + '.zip'
z = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(src_dir):
fpath = dirpath.replace(src_dir, '')
fpath = fpath and fpath + os.sep or ''
for filename in filenames:
z.write(os.path.join(dirpath, filename), fpath + filename)
z.close()
def secure_filetype(file):
"""
Secure the uploaded file is an image
:param file:
:return: bool
"""
ext_list = ['png', 'jpg', 'jpeg']
ext_valid = file.filename.split('.')[-1] in ext_list
mimetype_list = ["image/jpeg", "image/jpg", "image/png"]
mimetype_valid = file.mimetype in mimetype_list
return ext_valid and mimetype_valid
def secure_filesize(filepath):
"""
Secure the size of the uploaded file is smaller than MAX_FILESIZE
:param filepath:
:return: bool
"""
return os.path.getsize(filepath) <= MAX_FILESIZE
def check_and_save_img(file, path):
"""
save file as an image if valid
:param file:
:param path:
:return:
"""
img_bytes = file.read()
if not secure_filetype(file):
return "Not an image. "
input_image = Image.open(io.BytesIO(img_bytes))
input_image.save(path)
if not secure_filesize(path):
return "Too large given file. "
return ''
|
14,295 | 0bd869b078824465d917f71e4f5e75ed36cde85c | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='DjangoAdminLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action_time', models.DateTimeField()),
('object_id', models.TextField(null=True, blank=True)),
('object_repr', models.CharField(max_length=200)),
('action_flag', models.SmallIntegerField()),
('change_message', models.TextField()),
],
options={
'db_table': 'django_admin_log',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoContentType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100)),
],
options={
'db_table': 'django_content_type',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoMigrations',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('app', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('applied', models.DateTimeField()),
],
options={
'db_table': 'django_migrations',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoSession',
fields=[
('session_key', models.CharField(max_length=40, serialize=False, primary_key=True)),
('session_data', models.TextField()),
('expire_date', models.DateTimeField()),
],
options={
'db_table': 'django_session',
'managed': False,
},
),
migrations.CreateModel(
name='Breedchicken',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('title', models.CharField(max_length=50, null=True, blank=True)),
('abstract', models.CharField(max_length=255, null=True, blank=True)),
('content', models.TextField(null=True, blank=True)),
('publish_time', models.DateTimeField(null=True, blank=True)),
('click_times', models.IntegerField(null=True, blank=True)),
('src_img', models.CharField(max_length=255, null=True, blank=True)),
('type', models.CharField(max_length=20, null=True, blank=True)),
],
),
migrations.CreateModel(
name='Breedfish',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('title', models.CharField(max_length=50, null=True, blank=True)),
('abstract', models.CharField(max_length=255, null=True, blank=True)),
('content', models.TextField(null=True, blank=True)),
('publish_time', models.DateTimeField(null=True, blank=True)),
('click_times', models.IntegerField(null=True, blank=True)),
('src_img', models.CharField(max_length=255, null=True, blank=True)),
('type', models.CharField(max_length=20, null=True, blank=True)),
],
),
migrations.CreateModel(
name='BreedImprovement',
fields=[
('bi_id', models.IntegerField(serialize=False, primary_key=True)),
('title', models.CharField(max_length=50, null=True, blank=True)),
('abstract', models.CharField(max_length=255, null=True, blank=True)),
('content', models.TextField(null=True, blank=True)),
('publish_time', models.DateTimeField(null=True, blank=True)),
('click_times', models.IntegerField(null=True, blank=True)),
('src_img', models.CharField(max_length=255, null=True, blank=True)),
('type', models.CharField(max_length=20, null=True, blank=True)),
],
),
migrations.CreateModel(
name='Breedpig',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('title', models.CharField(max_length=50, null=True, blank=True)),
('abstract', models.CharField(max_length=255, null=True, blank=True)),
('content', models.TextField(null=True, blank=True)),
('publish_time', models.DateTimeField(null=True, blank=True)),
('click_times', models.IntegerField(null=True, blank=True)),
('src_img', models.CharField(max_length=255, null=True, blank=True)),
('type', models.CharField(max_length=20, null=True, blank=True)),
],
),
migrations.CreateModel(
name='Camphor',
fields=[
('camphort_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Cedar',
fields=[
('ced_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Climate',
fields=[
('cli_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('temperature', models.FloatField(null=True, blank=True)),
('humid', models.FloatField(null=True, blank=True)),
('weather', models.CharField(max_length=50, null=True, blank=True)),
],
),
migrations.CreateModel(
name='Corn',
fields=[
('corn_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('production', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='DayePrivet',
fields=[
('day_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Environment',
fields=[
('env_id', models.IntegerField(serialize=False, primary_key=True)),
('title', models.CharField(max_length=50, null=True, blank=True)),
('abstract', models.CharField(max_length=255, null=True, blank=True)),
('content', models.TextField(null=True, blank=True)),
('publish_time', models.DateTimeField(null=True, blank=True)),
('click_times', models.IntegerField(null=True, blank=True)),
('src_img', models.CharField(max_length=255, null=True, blank=True)),
('type', models.CharField(max_length=20, null=True, blank=True)),
],
),
migrations.CreateModel(
name='FeedCorn',
fields=[
('fc_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='FranceGreen',
fields=[
('fg_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='FranceGreen2',
fields=[
('fg_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Ginkgo',
fields=[
('ginkgo_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Hackberry',
fields=[
('hac_id', models.IntegerField(serialize=False, primary_key=True, db_column='Hac_id')),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='InsidePig',
fields=[
('in_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='LagerstroemiaIndica',
fields=[
('lid', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='MixedPig',
fields=[
('mp_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='NationalPolicy',
fields=[
('np_id', models.IntegerField(serialize=False, primary_key=True)),
('title', models.CharField(max_length=50, null=True, blank=True)),
('content', models.TextField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Osmanthus',
fields=[
('osmanthus_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='OutsidePig',
fields=[
('op_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Pea',
fields=[
('pea_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('production', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='PhotiniaFruticosa',
fields=[
('pho_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Pittosporum',
fields=[
('pit_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Podocarpus',
fields=[
('pod_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='PrivetOfKingson',
fields=[
('pok_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='ProductionControl',
fields=[
('pc_id', models.IntegerField(serialize=False, primary_key=True)),
('title', models.CharField(max_length=50, null=True, blank=True)),
('abstract', models.CharField(max_length=255, null=True, blank=True)),
('content', models.TextField(null=True, blank=True)),
('publish_time', models.DateTimeField(null=True, blank=True)),
('click_times', models.IntegerField(null=True, blank=True)),
('src_img', models.CharField(max_length=255, null=True, blank=True)),
('type', models.CharField(max_length=20, null=True, blank=True)),
],
),
migrations.CreateModel(
name='Redwood',
fields=[
('red_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('sales', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='SoybeanMeal',
fields=[
('soy_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Trend',
fields=[
('tr_id', models.IntegerField(serialize=False, primary_key=True)),
('title', models.CharField(max_length=50, null=True, blank=True)),
('abstract', models.CharField(max_length=255, null=True, blank=True)),
('content', models.TextField(null=True, blank=True)),
('publish_time', models.DateTimeField(null=True, blank=True)),
('click_times', models.IntegerField(null=True, blank=True)),
('src_img', models.CharField(max_length=255, null=True, blank=True)),
('type', models.CharField(max_length=20, null=True, blank=True)),
],
),
migrations.CreateModel(
name='Visits',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('times', models.IntegerField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Wheat',
fields=[
('wheat_id', models.IntegerField(serialize=False, primary_key=True)),
('timestp', models.CharField(max_length=20, null=True, blank=True)),
('price', models.FloatField(null=True, blank=True)),
('scale', models.FloatField(null=True, blank=True)),
('production', models.FloatField(null=True, blank=True)),
],
),
]
|
14,296 | bcd03881a30a88ad5423446e8b2b803cb5e71a96 |
'''
ABCCDEABAA
ABCDE
'''
t, s = input(), input()
for i in range(len(s)):
print()
for j in range(len(s)):
print(s[(i + j) % len(s)]) |
14,297 | bfe75dc32398b320767822176eb6e806d0b9a63f | import paho.mqtt.client as mqtt
import time
import ssl
import json
from sense_hat import SenseHat
sense = SenseHat()
from cpu_temp import CPUTemp
cput = CPUTemp()
cput.__init__()
cput.open()
host = "node02.myqtthub.com"
port = 1883
clean_session = True
client_id = "set client id from client.json"
user_name = "insert myqtthub username"
password = "insert myqtthub password"
def on_connect (client, userdata, flags, rc):
""" Callback called when connection/reconnection is detected """
print ("Connect %s result is: %s" % (host, rc))
client.subscribe("some/message/to/publish")
#creator.createobjects()
# With Paho, always subscribe at on_connect (if you want to
# subscribe) to ensure you resubscribe if connection is
# lost.
# client.subscribe("some/topic")
if rc == 0:
client.connected_flag = True
print ("Connection good!")
return
print ("Failed to connect to %s, error was, rc=%s" % rc)
# handle error here
sys.exit (-1)
# Define clientId, host, user and password
client = mqtt.Client (client_id = client_id, clean_session = clean_session)
client.username_pw_set (user_name, password)
client.on_connect = on_connect
# configure TLS connection
# client.tls_set (cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLSv1_2)
# client.tls_insecure_set (False)
# port = 8883
# connect using standard unsecure MQTT with keepalive to 60
client.connect (host, port, keepalive = 60)
client.connected_flag = False
while not client.connected_flag: #wait in loop
client.loop()
time.sleep (1)
# publish message (optionally configuring qos=1, qos=2 and retain=True/client.loop ()
while True:
try:
#get cpu temperature
c = cput.get_temperature()
#get temperature and humidity from sensehat
p = sense.get_temperature_from_pressure()
h = sense.get_temperature_from_humidity()
# factor = 3 appears to work if the RPi is in a case
# change to factor = 5 appears to work for RPi's not in a case
factor = 5
# temp_calc is accurate to +/- 2 deg C.
temp_calc = ((p+h)/2) - (c/factor)
#temp = temp_calc
temp = temp_calc
client.publish ("some/message/to/publish", '{"%s" : "%s"}' % (client_id, temp))
print('Output: {"%s" : "%s"}' % (client_id, temp))
client.loop ()
except Exception:
print("error")
time.sleep(15)
print ("Publish operation finished with ret=%s" % ret)
# close connection
client.disconnect () |
14,298 | 2bf9f845e953f7a44150b978a1394d3dbedd585d | n = int(input())
r = n // 100
l = n % 100
if r <= 12 and 1 <= r:
if l <= 12 and 1 <= l:
print("AMBIGUOUS")
else:
print("MMYY")
else:
if l <= 12 and 1 <= l:
print("YYMM")
else:
print("NA") |
14,299 | 010839e0fd81c437ce130c4b86f2ece358ec207f | from fastai.text import *
import sklearn.feature_extraction.text as sklearn_text
import pickle
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.feature_extraction.text import CountVectorizer
from dataclasses import asdict, dataclass, field, fields
from sklearn.metrics import mean_squared_error
class BaselineModel:
def __init__(self, rating: LabelLists=None, regression:bool = False):
self.rating = rating
self.regression = regression;
vectorizer = CountVectorizer(ngram_range=(1,3), preprocessor=noop, tokenizer=noop, max_features=800000)
train_docs = self.rating.train.x
train_words = [[self.rating.vocab.itos[o] for o in doc.data] for doc in train_docs]
valid_docs = self.rating.valid.x
valid_words = [[self.rating.vocab.itos[o] for o in doc.data] for doc in valid_docs]
self.train_veczr = vectorizer.fit_transform(train_words)
self.valid_veczr = vectorizer.transform(valid_words)
self.vocab = vectorizer.get_feature_names()
def train(self):
y=self.rating.train.y
if self.regression:
# fit model
m = LinearRegression()
m.fit(self.train_veczr.sign(), y.items);
# get predictions
preds = m.predict(self.valid_veczr.sign())
error = mean_squared_error(self.rating.valid.y.items, preds, squared=False)
print("RMSE: ", error)
else:
# fit model
yes = y.c2i['yes']
no = y.c2i['no']
m = LogisticRegression(C=0.1, dual=False, solver = 'liblinear')
m.fit(self.train_veczr.sign(), y.items);
# get predictions
preds = m.predict(self.valid_veczr.sign())
valid_labels = [label == yes for label in self.rating.valid.y.items]
# check accuracy
accuracy = (preds==valid_labels).mean()
print(f'Accuracy = {accuracy} for Logistic Regression, with binarized trigram counts from `CountVectorizer`' )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.