index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
5,700 | a17c448b068b28881f9d0c89be6037503eca3974 | import tensorflow.keras
from preprocessing_and_training.train import reshape_and_predict
import glob
""" Script for prediction - testing and importing the trained model from train.py """
def make_predictions(file_list, model, is_game=False):
""" Predictions with model that is locally saved
:param file_list: path to files we want to predict
:param model: Trained model for our predictions
:return: None """
temp_list = []
for wav_file in glob.glob(file_list):
temp_list.append(reshape_and_predict(filepath=wav_file, saved_model=model, is_game=is_game))
return temp_list
def make_single_prediction(wav_file, model, is_game):
""" Predictions with model that is locally saved
:param wav_file: wav-file we want to predict
:param model: Trained model for our predictions
:return: None """
return reshape_and_predict(filepath=wav_file, saved_model=model, is_game=is_game)
|
5,701 | b3a2db38e2074b02c8837bfce85d06598a7b194d | #!/usr/bin/env python
import rospy
from op3_utils.op3_utils import *
from vision import *
import cv2
import sys
import rosnode
#Yellow >> Right
#Red >> Left
class States:
INIT = -1
GET_READY = 1
FIND_BAR = 2
WALK_2_BAR = 3
WALK_SIDEWAYS = 4
PICK_BAR = 5
WALK_WITH_BAR = 6
LIFT_BAR = 7
WALK_2_FINISH = 8
END = 99
# Iinitialize Node
rospy.init_node('fira_weightlifting')
# Create robot ('package_name')
robot = Robot('fira_weightlifting')
while not rospy.is_shutdown():
if '/op3_manager' in rosnode.get_node_names():
rospy.loginfo('Found op3 manager')
break
else:
rospy.loginfo('Waiting for op3 manager')
rospy.Rate(20).sleep()
# Make sure every publisher has registered to their topic,
# avoiding lost messages
rospy.sleep(4)
DEGREE2RADIAN = np.pi / 180
def init():
# Set ctrl modules of all actions to joint, so we can reset robot position
robot.setGeneralControlModule("action_module")
robot.moveGripper(left=100.0,right=100.0)
#robot.setGrippersPos(left=0.0, right=0.0)
# >0 is opened
# Call initial robot position
robot.playMotion(1, wait_for_end=True)
# Set ctrl module to walking, this actually only sets the legs
robot.walk_set_param_pub.publish(robot.walking_params[0])
robot.setGeneralControlModule("walking_module")
# Set joint modules of head joints to none so we can control them directly
robot.setJointsControlModule(["head_pan", "head_tilt"], ["none", "none"])
robot.setJointPos(["head_tilt"], [-0.7])
#0 is looking straight forward, <0 is looking down
rospy.sleep(1.0)
tickrate = 30
rate = rospy.Rate(tickrate)
currState = States.INIT
cap = cv2.VideoCapture(0)
current_head_tilt = -0.7
while not rospy.is_shutdown():
ret, frame = cap.read()
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
frame = cv2.resize(frame, (0,0),fx=0.5,fy=0.5, interpolation=cv2.INTER_CUBIC)
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
cnts_yellow = findYellowCnts(hsv_frame)
cnts_red = findRedCnts(hsv_frame)
delta_head = 0
delta_lr = 0
bar_slope = 0
if (cnts_yellow is not None and cnts_red is not None):
cx_y, cy_y = findCentroid(cnts_yellow)
cx_r, cy_r = findCentroid(cnts_red)
delta_lr = focusCenter(hsv_frame, cx_y, cx_r)
#print('delta_lr: ' + str(delta_lr))
delta_head = headTilt(hsv_frame, cy_y, cy_r)
bar_slope = slope(cx_y, cy_y, cx_r, cy_r)
cv2.drawContours(hsv_frame, cnts_yellow, -1, (255,0,0), 2)
cv2.drawContours(hsv_frame, cnts_red, -1, (10,235,290), 2)
cv2.circle(hsv_frame, (int((cx_y + cx_r) / 2), int((cy_y + cy_r) / 2)),5,(130, 40, 255), -1)
cv2.circle(hsv_frame, (int(frame.shape[1]/2), int(frame.shape[0]/2)),5,(130, 40, 255), -1)
cv2.circle(hsv_frame, (cx_y, cy_y),5,(130, 40, 255), -1)
cv2.circle(hsv_frame, (cx_r, cy_r),5,(130, 40, 255), -1)
#cv2.imshow('Current view',hsv_frame)
#cv2.waitKey(33)
if currState == States.INIT:
init()
currState = States.GET_READY
elif currState == States.GET_READY:
print("[GET_READY]")
if robot.get_pressed_button() == 'start':
currState = States.FIND_BAR
#if cv2.waitKey(33) &0xFF == ord('f'):
# currState = States.FIND_BAR
elif currState == States.FIND_BAR:
print("[FIND_BAR]")
robot.walking_params.append(robot.loadWalkingParams('param.yaml'))
robot.setGeneralControlModule("walking_module")
robot.walking_params[1].x_move_amplitude = 0.005
robot.walking_params[1].balance_enable = False
robot.walking_params[1].y_move_amplitude = 0.003
#robot.walking_params[1].angle_move_amplitude = 1.75 * DEGREE2RADIAN
robot.walk_set_param_pub.publish(robot.walking_params[1])
rospy.sleep(2)
robot.walkStart()
currState = States.WALK_2_BAR
elif currState == States.WALK_2_BAR:
print("[WALK_2_BAR]")
#if(delta_head < -10):
head_tilt_delta = delta_head * 0.01
current_head_tilt += head_tilt_delta
current_head_tilt = max(current_head_tilt,-1.2)
print('current head: {}, head_tilt_delta: {}'.format(current_head_tilt,head_tilt_delta))
robot.moveHead(None, current_head_tilt)
print("delta_lr: {}".format(delta_lr))
ratio = 1
angle_delta = delta_lr * ratio
print("*********************************************")
robot.walking_params[1].angle_move_amplitude = angle_delta
robot.walk_set_param_pub.publish(robot.walking_params[1])
print("angle_move_amp: ", angle_delta)
'''
if(delta_lr > 20):
print("GO LEFT")
robot.walking_params[1].angle_move_amplitude = angle_delta
robot.walk_set_param_pub.publish(robot.walking_params[1])
print("angle_move_amp: ", angle_delta)
elif(delta_lr < -20):
print("GO RIGHT")
robot.walking_params[1].angle_move_amplitude = angle_delta
robot.walk_set_param_pub.publish(robot.walking_params[1])
print("angle_move_amp: ", angle_delta)
else:
print("GO FORWARD")
robot.walking_params[1].angle_move_amplitude = 0
robot.walk_set_param_pub.publish(robot.walking_params[1])
print("angle_move_amp: ", angle_delta)
'''
if(current_head_tilt == -1.2):
robot.walkStop()
robot.onlineWalkSetup(x=0.02, z=-0.025, foot_dist=0.08, foot_height=0.05)
currState = States.WALK_SIDEWAYS
continue
elif currState == States.WALK_SIDEWAYS:
ret, frame = cap.read()
print("bar_slope: {}".format(bar_slope))
bar_x = (cx_y + cx_r) / 2
bar_y = (cy_y + cy_r) / 2
print("bar_location: ({},{})".format(bar_x,bar_y))
x_err = bar_x - hsv_frame.shape[1] / 2
y_err = bar_y - hsv_frame.shape[0] *2 / 3
print("bar_error: ({},{})".format(x_err,y_err))
'''
if y_err > 20:
print('back')
robot.onlineWalkCommand(direction="backward", start_leg="right", step_num=2,
front_length=0.02, step_time=0.5)
rospy.sleep(2)
'''
if bar_slope <= -0.07:
print('turn left')
robot.onlineWalkCommand(direction="turn_left", start_leg="left", step_num=2,
front_length=0.0, step_angle=10.0,step_time=0.4)
rospy.sleep(2)
elif bar_slope > 0.07:
print('turn right')
robot.onlineWalkCommand(direction="turn_right", start_leg="right", step_num=2,
front_length=0.0, step_angle=10.0,step_time=0.4)
rospy.sleep(2)
'''
elif x_err > 30:
print('shift right')
robot.onlineWalkCommand(direction="right", start_leg="right", step_num=2,
side_length=0.01, step_time=0.4)
rospy.sleep(2.5)
elif x_err < -30:
print('shift left')
robot.onlineWalkCommand(direction="left", start_leg="left", step_num=2,
side_length=0.01, step_time=0.4)
rospy.sleep(2.5)
elif y_err < -20:
print('forward')
robot.onlineWalkCommand(direction="forward", start_leg="right", step_num=2,
front_length=0.02, step_time=0.4)
rospy.sleep(2)
'''
else:
print('success!!!')
# TODO removed sleep here
#rospy.sleep(6)
currState = States.PICK_BAR
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
'''
print("[WALK_SIDEWAYS]")
print("bar_slope: {}".format(bar_slope))
if(bar_slope > 0.1):
print("Turn facing right")
robot.walking_params[1].x_move_amplitude = 0
robot.walking_params[1].y_move_amplitude = -0.01
robot.walk_set_param_pub.publish(robot.walking_params[1])
rospy.sleep(2)
robot.walkStart()
rospy.sleep(2)
robot.walkStop()
elif(bar_slope < -0.1):
print("Turn facing left")
robot.walking_params[1].x_move_amplitude = 0
robot.walking_params[1].y_move_amplitude = 0.01
robot.walk_set_param_pub.publish(robot.walking_params[1])
rospy.sleep(2)
robot.walkStart()
rospy.sleep(2)
robot.walkStop()
else:
print("Keep facing forward")
currState = States.PICK_BAR
'''
elif currState == States.PICK_BAR:
rospy.loginfo("[PICK_BAR]")
# TODO testing
#rospy.sleep(2)
robot.setGeneralControlModule("none")
rospy.sleep(2)
robot.setGeneralControlModule("action_module")
robot.playMotion(86, wait_for_end=True)
robot.playMotion(87, wait_for_end=True)
rospy.sleep(1.0)
robot.moveGripper(left=40.0,right=40.0)
rospy.sleep(0.5)
robot.moveGripper(left=20.0,right=20.0)
rospy.sleep(1.0)
robot.playMotion(90, wait_for_end=True)
rospy.sleep(1.0)
currState = States.WALK_WITH_BAR
elif currState == States.WALK_WITH_BAR:
print("[WALK_WITH_BAR]")
robot.walking_params.append(robot.loadWalkingParams('pickup_param.yaml'))
#robot.walking_params[2].hip_pitch_offset = -5
robot.walking_params[2].x_move_amplitude = 0.005
robot.walking_params[2].y_move_amplitude = 0.000
#TODO change the a move amplitude to 1
robot.walking_params[2].angle_move_amplitude = 0 * DEGREE2RADIAN
robot.walk_set_param_pub.publish(robot.walking_params[2])
# Set ctrl module to walking, this actually only sets the legs
robot.setJointsControlModule(["r_hip_yaw","l_hip_yaw","r_hip_roll","l_hip_roll","r_hip_pitch",
"l_hip_pitch","r_knee","l_knee","r_ank_pitch","l_ank_pitch","r_ank_roll","l_ank_roll"],
["walking_module"])
print(robot.walking_params[2])
rospy.sleep(3)
robot.walkStart()
rospy.sleep(3)
robot.moveGripper(left=15.0,right=15.0)
rospy.sleep(9)
robot.walkStop()
currState = States.LIFT_BAR
elif currState == States.LIFT_BAR:
print("[LIFT_BAR]")
robot.setGeneralControlModule("none")
robot.setGeneralControlModule("action_module")
robot.playMotion(89, wait_for_end=True)
robot.setJointsControlModule(['head_pan', 'head_tilt'],['none','none'])
robot.moveHead(0,1.5)
currState = States.WALK_2_FINISH
elif currState == States.WALK_2_FINISH:
print("WALK_2_FINISH")
robot.walking_params.append(robot.loadWalkingParams('pickup_param.yaml'))
robot.walking_params[3].hip_pitch_offset = 1 * DEGREE2RADIAN #1.5
robot.walking_params[3].x_move_amplitude = 0
robot.walking_params[3].balance_enable = True
robot.walk_set_param_pub.publish(robot.walking_params[3])
# Set ctrl module to walking, this actually only sets the legs
robot.setJointsControlModule(["r_hip_yaw","l_hip_yaw","r_hip_roll","l_hip_roll","r_hip_pitch",
"l_hip_pitch","r_knee","l_knee","r_ank_pitch","l_ank_pitch","r_ank_roll","l_ank_roll"],
["walking_module"])
rospy.sleep(5)
robot.walkStart()
rospy.sleep(3)
robot.walking_params[3].x_move_amplitude = 0.005
robot.walk_set_param_pub.publish(robot.walking_params[3])
rospy.sleep(1117)
robot.walkStop()
currState = States.END
rate.sleep()
elif currState == States.END:
print("[END]")
#robot.walkStop()
rate.sleep()
|
5,702 | b5160a2574dd2c4eec542d7aca8288da0feadaba | # Кицела Каролина ИВТ 3 курс
# Вариант 6
# Найти сумму всех чисел с плавающей точкой
b = ("name", " DeLorean DMC-12", "motor_pos", "rear", "n_of_wheels", 4,
"n_of_passengers", 2, "weight", 1.230, "height", 1.140, "length", 4.216,
"width", 1.857, "max_speed", 177)
print sum(b[9:16:2])
|
5,703 | 32fc0db68c32c2e644f9c1c2318fbeff41a0543d | import pygame
from pygame import Rect, Color
from pymunk import Body, Poly
from config import WIDTH, HEIGHT
class Ground:
def __init__ (self, space):
# size
self.w = WIDTH - 20
self.h = 25
# position
self.x = 10
self.y = HEIGHT - self.h
# pygame rectangle
self.rect = Rect (self.x, self.y, self.w, self.h)
self.color = Color (100, 6, 107)
# physics
self.rigidbody = Body (body_type=Body.STATIC)
self.rigidbody.position = self.x + self.w / 2, self.y
self.hitbox = Poly.create_box (self.rigidbody, (self.w, self.h))
self.hitbox.elasticity = 0
self.hitbox.mass = 1
self.hitbox.friction = 0
space.add (self.rigidbody, self.hitbox)
def update (self, dt):
return
def draw (self, window):
pygame.draw.rect (window, self.color, self.rect)
return |
5,704 | aa17e22bc13436333b1db4aee41eeced373119a8 | from selenium import webdriver
import math
import time
browser = webdriver.Chrome()
website = 'http://suninjuly.github.io/find_link_text'
link_text = str(math.ceil(math.pow(math.pi, math.e)*10000))
browser.get(website)
find_link = browser.find_element_by_link_text(link_text)
find_link.click()
input_first_name = browser.find_element_by_tag_name('input')
input_first_name.send_keys('Timur')
input_last_name = browser.find_element_by_name('last_name')
input_last_name.send_keys('Atabaev')
input_city = browser.find_element_by_class_name('city')
input_city.send_keys('Tashkent')
input_country = browser.find_element_by_id('country')
input_country.send_keys('Uzbekistan')
button = browser.find_element_by_css_selector('button.btn')
button.click()
|
5,705 | 2df679fc3407c15f5d0c006e9da8d1fc74bcf875 | from __future__ import unicode_literals
import json, alice_static
import logging
from random import choice
# Импортируем подмодули Flask для запуска веб-сервиса.
from flask import Flask, request
app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG)
# Хранилище данных о сессиях.
sessionStorage = {}
# Задаем параметры приложения Flask.
@app.route("/", methods=['POST'])
def format_new_question(quest):
question = choice(alice_static.questions)
return question.format(quest=quest)
def timerout()
user_storage['try'] += 1
timeup = choice(alice_static.answer_timeup)
right_answer=choice(alice_static.right_answer)
real_answer=quest_data[user_storage['answer']]
again=choice(alice_static.again)
response.set_text('{timeup}\n{right_answer}{real_answer}\n{again}'.format(
timeup=timeup,
right_answer=right_answer
real_answer=real_answer
again=again
))
buttons = [{
"title": "Да",
"hide": True
}, {
"title": "Нет",
"hide": True
}]
response.set_buttons(buttons)
#Выводим кнопочки
user_storage['state'] = REPLY
#Меняем состояние пользователя
def handle_dialog(request, response, user_storage):
if request.is_new_session:
# Это новый пользователь.
# Инициализируем сессию и поприветствуем его.
greetings = choice(alice_static.greetings)
user_storage = {
'quest': quest,
'state': STOP,
#STOP-вопрос не задан Wait-ожидается ответ Reply - ответ получен
'wins': 0,
'tries':0
}
response.set_text('{greetings}'.format(
greetings=greetings
))
if user_storage.get('state') == STOP:
newquest = choice(alice_static.newquest)
response.set_text('{newquest}'.format(
newquest=newquest,
))
buttons = [{
"title": "Да",
"hide": True
}, {
"title": "Нет",
"hide": True
}]
response.set_buttons(buttons)
if request.command.lower() == 'да':
quest = choice(list(quest_data.keys()))
user_storage = {
'quest': quest,
'state': WAIT,
'wins': user_storage['wins'],
'tries': user_storage ['tries']
}
response.set_text(format_new_question(quest))
elif request.command.lower() == 'нет':
response.set_text("Желаете выйти?")
buttons = [{
"title": "Да",
"hide": True
}, {
"title": "Нет",
"hide": True
}]
response.set_buttons(buttons)
if request.command.lower() == 'нет':
user_storage['state'] = STOP
elif request.command.lower() == 'да':
response.set_end_session(True)
goodbye = choice(alice_static.goodbye)
response.set_text(goodbye)
else:
response.set_buttons(buttons)
response.set_text ('Извините я не понимаю, вы хотите выйти?')
else:
buttons = [{
"title": "Да",
"hide": True
}, {
"title": "Нет",
"hide": True
}]
response.set_buttons(buttons)
response.set_text('Выбери один из двух вариантов - Да или Нет')
if user_storage.get('state') == WAIT:
# Обрабатываем ответ пользователя.
timer = Timer(30.0, timerout)
timer.start()
if request.command.lower() == quest_data[user_storage['answer']].lower():
# Пользователь угадал.
user_storage['wins'] += 1
user_storage['try'] +=1
#Добавляем победу и попытку
correct = choice(alice_static.answer_correct)
again=choice(alice_static.again)
#Выбираем реплику для поздравления
response.set_text('{correct}\n{again}'.format(
correct=correct
again=again
))
#Поздравляем и спрашиваем хочет ли пользователь сыграть ещё раз
buttons = [{
"title": "Да",
"hide": True
}, {
"title": "Нет",
"hide": True
}]
response.set_buttons(buttons)
#Выводим кнопочки
user_storage['state'] = REPLY
#Меняем состояние пользователя
else:
user_storage['try'] += 1
incorrect = choice(alice_static.answer_incorrect)
right_answer=choice(alice_static.right_answer)
real_answer=quest_data[user_storage['answer']]
again=choice(alice_static.again)
response.set_text('{incorrect}\n{right_answer}{real_answer}\n{again}'.format(
incorrect=incorrect,
right_answer=right_answer
real_answer=real_answer
again=again
))
buttons = [{
"title": "Да",
"hide": True
}, {
"title": "Нет",
"hide": True
}]
response.set_buttons(buttons)
#Выводим кнопочки
user_storage['state'] = REPLY
#Меняем состояние пользователя
elif user_storage.get('state') == REPLY:
if request.command.lower() == 'да':
quest = choice(list(quest_data.keys()))
user_storage = {
'quest': quest,
'state': WAIT,
'wins': user_storage['wins'],
'tries': user_storage ['tries']
}
response.set_text(format_new_question(quest))
elif request.command.lower() == 'нет':
response.set_end_session(True)
goodbye = choice(alice_static.goodbye)
response.set_text(goodbye)
else:
buttons = [{
"title": "Да",
"hide": True
}, {
"title": "Нет",
"hide": True
}]
response.set_buttons(buttons)
response.set_text('Выбери один из двух вариантов - Да или Нет')
|
5,706 | fe5050fdf010ce1c4d458b8a52ac92485a7d8cea | '''
Problem Description
Given two numbers n1 and n2
1. Find prime numbers between n1 and n2, then
2. Make all possible unique combinations of numbers from the prime
numbers list you found in step 1.
3. From this new list, again find all prime numbers.
4. Find smallest (a) and largest (b) number from the 2nd generated
list, also count of this list.
5. Consider smallest and largest number as the 1st and 2nd number
to generate Fibonacci series respectively till the count
(number of primes in the 2nd list).
6. Print the last number of a Fibonacci series as an output
Constraints
2 <= n1, n2 <= 100
n2 - n1 >= 35
Input Format
One line containing two space separated integers n1 and n2.
Output
Last number of a generated Fibonacci series.
Timeout
1
Test Case
Example 1
Input : 2 40
Output : 13158006689
Explanation :
1st prime list = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
Combination of all the primes = [23, 25, 27, 211, 213, 217, 219,
223, 229, 231, 32, 35, 37, 311, 313, 319, 323, 329, 331, 337, 52,
53, 57, 511, 513, 517, 519, 523, 529, 531, 537, 72, 73, 75, 711,
713, 717, 719, 723, 729, 731, 737, 112, 113, 115, 117, 1113, 1117,
1119, 1123, 1129, 1131, 1137, 132, 133, 135, 137, 1311, 1317, 1319,
1323, 1329, 1331, 1337, 172, 173, 175, 177, 1711, 1713, 1719, 1723,
1729, 1731, 1737, 192, 193, 195, 197, 1911, 1913, 1917, 1923, 1929,
1931, 1937, 232, 233, 235, 237, 2311, 2313, 2317, 2319, 2329, 2331,
2337, 292, 293, 295, 297, 2911, 2913, 2917, 2919, 2923, 2931, 2937,
312, 315, 317, 3111, 3113, 3117, 3119, 3123, 3129, 3137, 372, 373,
375, 377, 3711, 3713, 3717, 3719, 3723, 3729, 3731]
2nd prime list=[193, 3137, 197, 2311, 3719, 73, 137, 331, 523,
1931, 719, 337, 211, 23, 1117, 223, 1123, 229, 37, 293, 2917,
1319, 1129, 233, 173, 3119, 113, 53, 373, 311, 313, 1913, 1723,
317]
smallest (a) = 23
largest (b) = 3719
Therefore, the last number of a Fibonacci series i.e. 34th
Fibonacci number in the series that has 23 and 3719 as the first
2 numbers is 13158006689
Example 2
Input : 30 70
Output : 2027041
Explanation
1st prime list=[31, 37, 41, 43, 47, 53, 59, 61, 67]
2nd prime list generated form combination of 1st prime list = [3137,
5953, 5347, 6761, 3761, 4337, 6737, 6131, 3767, 4759, 4153, 3167,
4159, 6143]
smallest prime in 2nd list=3137
largest prime in 2nd list=6761
Therefore, the last number of a Fibonacci series i.e. 14th
Fibonacci number in the series that has 3137 and 6761 as the first
2 numbers is 2027041
'''
# test cases passed , private cases failed
# https://www.rookieslab.com/posts/fastest-way-to-check-if-a-number-is-prime-or-not
# seive of Eratosthenes method
# N = 100
# is_prime = [1]*N
# is_prime[0] = 0
# is_prime[1] = 0
# https://www.geeksforgeeks.org/python-program-to-check-whether-a-number-is-prime-or-not/
def isPrime(n): # use to find if number is prime in 2nd list
# Corner cases
if (n <= 1) :
return False
if (n <= 3) :
return True
# This is checked so that we can skip
# middle five numbers in below loop
if (n % 2 == 0 or n % 3 == 0) :
return False
i = 5
while(i * i <= n) :
if (n % i == 0 or n % (i + 2) == 0) :
return False
i = i + 6
return True
def primeList(n1, n2):
l = []
for n in range(n1, n2+1):
if isPrime(n):
l.append(n)
return l
n1, n2 = map(int, input().split())
l1 = primeList(n1,n2)
# print(l1) - check if first list of prime numbers matches
#combining
l2 = list()
l = len(l1)
for i in range(l):
for j in range(l):
if i == j:
continue
l2.append(str(l1[i])+str(l1[j]))
l3 = primeList(int(l2[0]),int(l2[-1]))
# list of primes from the second list
l4 = []
for i in l3:
if str(i) in l2:
l4.append(i)
# print(l4) - check if secin list of prime numbers matches
x = min(l4)
y = max(l4)
count = len(l4)
# print(x,y,count) - check if smallest, largest prime and count match
for i in range(2,count):
f = x + y
x = y
y = f
print(y) |
5,707 | 55e743cb027d27cc6b668424c1584f27a8e8c51a | # Formatters example
#
# Requirements:
# Go to the ../hello_world directory and do: python prepare_data.py
#
# Instructions:
#
# Just run this file:
#
# python table.py
# Output:
# * standard input – text table
# * table.html
# * cross_table.html
#
from cubes import Workspace, create_formatter
workspace = Workspace("slicer.ini")
# Create formatters
text_formatter = create_formatter("text_table")
html_formatter = create_formatter("simple_html_table")
html_cross_formatter = create_formatter("html_cross_table")
# Get the browser and data
browser = workspace.browser("irbd_balance")
result = browser.aggregate(drilldown=["item"])
result = result.cached()
#
# 1. Create text output
#
print "Text output"
print "-----------"
print text_formatter(result, "item")
#
# 2. Create HTML output (see table.html)
#
with open("table.html", "w") as f:
data = html_formatter(result, "item")
f.write(data)
#
# 3. Create cross-table to cross_table.html
#
result = browser.aggregate(drilldown=["item", "year"])
with open("cross_table.html", "w") as f:
data = html_cross_formatter(result,
onrows=["year"],
oncolumns=["item.category_label"])
f.write(data)
print "Check also table.html and cross_table.html files"
|
5,708 | 97ca134ffce404f4b2bc7352d4aac73a7bb764bd | # Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
import re
from pathlib import Path
from typing import Dict, List, Optional, Type
from parameterized import parameterized_class # type: ignore
import materialize.optbench
import materialize.optbench.sql
from materialize.feature_benchmark.action import Action
from materialize.feature_benchmark.executor import Executor
from materialize.feature_benchmark.measurement_source import (
MeasurementSource,
Timestamp,
)
from materialize.feature_benchmark.scenario import Scenario
class OptbenchInit(Action):
def __init__(self, scenario: str, no_indexes: bool = False) -> None:
self._executor: Optional[Executor] = None
self._scenario = scenario
self._no_indexes = no_indexes
def run(self, executor: Optional[Executor] = None) -> None:
e = executor or self._executor
statements = materialize.optbench.sql.parse_from_file(
Path(f"misc/python/materialize/optbench/schema/{self._scenario}.sql")
)
if self._no_indexes:
idx_re = re.compile(r"(create|create\s+default|drop)\s+index\s+")
statements = [
statement
for statement in statements
if not idx_re.match(statement.lower())
]
e._composition.sql("\n".join(statements)) # type: ignore
class OptbenchRun(MeasurementSource):
def __init__(self, optbench_scenario: str, query: int):
self._executor: Optional[Executor] = None
self._optbench_scenario = optbench_scenario
self._query = query
def run(self, executor: Optional[Executor] = None) -> List[Timestamp]:
assert not (executor is None and self._executor is None)
assert not (executor is not None and self._executor is not None)
e = executor or self._executor
queries = materialize.optbench.sql.parse_from_file(
Path(
f"misc/python/materialize/optbench/workload/{self._optbench_scenario}.sql"
)
)
assert 1 <= self._query <= len(queries)
query = queries[self._query - 1]
explain_query = materialize.optbench.sql.Query(query).explain(timing=True)
explain_output = materialize.optbench.sql.ExplainOutput(
e._composition.sql_query(explain_query)[0][0] # type: ignore
)
# Optimization time is in microseconds, divide by 3 to get a more readable number (still in wrong unit)
timestamps = [0, float(explain_output.optimization_time()) / 3] # type: ignore
return timestamps
def name_with_query(cls: Type["OptbenchTPCH"], num: int, params_dict: Dict) -> str:
return f"OptbenchTPCHQ{params_dict['QUERY']:02d}"
@parameterized_class(
[{"QUERY": i} for i in range(1, 23)], class_name_func=name_with_query
)
class OptbenchTPCH(Scenario):
"""Run optbench TPCH for optimizer benchmarks"""
QUERY = 1
def init(self) -> List[Action]:
return [OptbenchInit("tpch")]
def benchmark(self) -> MeasurementSource:
return OptbenchRun("tpch", self.QUERY)
|
5,709 | bfc6f6acef26e3dc4f6bf2b76363daec68c53cd1 | # This file is part of the Adblock Plus web scripts,
# Copyright (C) 2006-present eyeo GmbH
#
# Adblock Plus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# Adblock Plus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Adblock Plus. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import hmac
import base64
import MySQLdb
import os
import re
import marshal
import subprocess
from sitescripts.utils import get_config, cached, get_template, anonymizeMail, sendMail
def getReportSubscriptions(guid):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT url, hasmatches FROM #PFX#sublists INNER JOIN
#PFX#subscriptions ON (#PFX#sublists.list = #PFX#subscriptions.id)
WHERE report = %s''',
guid)
rows = cursor.fetchall()
cursor.close()
return rows
def getReports(startTime):
count = 10000
offset = 0
while True:
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact,
comment, hasscreenshot, knownissues
FROM #PFX#reports WHERE ctime >= FROM_UNIXTIME(%s) LIMIT %s OFFSET %s''',
(startTime, count, offset))
rows = cursor.fetchall()
cursor.close()
if len(rows) == 0:
break
for row in rows:
yield row
offset += len(rows)
def getReportsForUser(contact):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact,
comment, hasscreenshot, knownissues
FROM #PFX#reports WHERE contact = %s ORDER BY ctime DESC LIMIT 100''',
contact)
rows = cursor.fetchall()
cursor.close()
return rows
def getReport(guid):
cursor = get_db().cursor()
executeQuery(cursor, 'SELECT dump FROM #PFX#reports WHERE guid = %s', guid)
report = cursor.fetchone()
if report == None:
return None
reportData = marshal.loads(report[0])
return reportData
def saveReport(guid, reportData, isNew=False):
cursor = get_db().cursor()
screenshot = reportData.get('screenshot', None)
if screenshot != None:
reportData['hasscreenshot'] = 2 if reportData.get('screenshotEdited', False) else 1
try:
saveScreenshot(guid, screenshot)
except (TypeError, UnicodeEncodeError):
reportData['hasscreenshot'] = 0
del reportData['screenshot']
knownIssues = len(reportData.get('knownIssues', []))
contact = getUserId(reportData.get('email', None)) if reportData.get('email', None) else None
dumpstr = marshal.dumps(reportData)
if contact != None and isNew:
executeQuery(cursor, 'INSERT INTO #PFX#users (id, reports) VALUES (%s, 1) ON DUPLICATE KEY UPDATE reports = reports + 1', contact)
executeQuery(cursor,
'''INSERT INTO #PFX#reports (guid, type, ctime, site, comment, status, contact, hasscreenshot, knownissues, dump)
VALUES (%(guid)s, %(type)s, FROM_UNIXTIME(%(ctime)s), %(site)s, %(comment)s, %(status)s, %(contact)s,
%(hasscreenshot)s, %(knownissues)s, _binary %(dump)s) ON DUPLICATE KEY
UPDATE type = %(type)s, site = %(site)s, comment = %(comment)s, status = %(status)s,
hasscreenshot = %(hasscreenshot)s, knownissues = %(knownissues)s, dump = _binary %(dump)s''',
{'guid': guid, 'type': reportData.get('type', None), 'ctime': reportData['time'], 'site': reportData.get('siteName', None),
'comment': reportData.get('comment', None), 'status': reportData.get('status', None), 'contact': contact,
'hasscreenshot': reportData.get('hasscreenshot', 0), 'knownissues': knownIssues, 'dump': dumpstr})
if len(reportData['subscriptions']) > 0:
for sn in reportData['subscriptions']:
executeQuery(cursor, 'SELECT id FROM #PFX#subscriptions WHERE url = %s', sn['id'])
id = cursor.fetchone()
if id != None:
def filterMatch(f):
return any(u == sn['id'] for u in f.get('subscriptions', []))
hasMatches = any(filterMatch(f) for f in reportData.get('filters', []))
executeQuery(cursor, 'INSERT IGNORE INTO #PFX#sublists (report, list, hasmatches) VALUES (%s, %s, %s)', (guid, id[0], hasMatches))
get_db().commit()
reportData['guid'] = guid
if contact:
# TODO: The mail anonymization should happen in the template, not here
origEmail = reportData['email']
email = reportData['email']
email = re.sub(r' at ', r'@', email)
email = re.sub(r' dot ', r'.', email)
reportData['email'] = anonymizeMail(email)
reportData['uid'] = contact
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html')
dir = os.path.dirname(file)
if not os.path.exists(dir):
os.makedirs(dir)
template = get_template(get_config().get('reports', 'webTemplate'))
template.stream(reportData).dump(file, encoding='utf-8')
if contact:
reportData['email'] = origEmail
def removeReport(guid):
cursor = get_db().cursor()
executeQuery(cursor, 'DELETE FROM #PFX#reports WHERE guid = %s', guid)
get_db().commit()
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html')
if os.path.isfile(file):
os.remove(file)
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png')
if os.path.isfile(file):
os.remove(file)
def getUser(contact):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor, 'SELECT reports, positive, negative FROM #PFX#users WHERE id = %s', contact)
user = cursor.fetchone()
return user
@cached(3600)
def getUserUsefulnessScore(contact):
if contact == None:
return 0
cursor = get_db().cursor()
# source from http://www.evanmiller.org/how-not-to-sort-by-average-rating.html
executeQuery(cursor,
'''SELECT ((positive + 1.9208) / (positive + negative)
- 1.96 * SQRT((positive * negative) / (positive + negative) + 0.9604) / (positive + negative))
/ (1 + 3.8416 / (positive + negative)) AS score FROM #PFX#users WHERE id = %s''',
contact)
score = cursor.fetchone()
if score == None:
return 0
if score[0] == None: # no score yet
return 0.3
else:
return 4 * score[0]
def updateUserUsefulness(contact, newusefulness, oldusefulness):
new = int(newusefulness)
old = int(oldusefulness)
if new == old:
return
positive = 0
negative = 0
if old > 0:
positive -= 1
elif old < 0:
negative -= 1
if new > 0:
positive += 1
elif new < 0:
negative += 1
cursor = get_db().cursor()
executeQuery(cursor, 'UPDATE #PFX#users SET negative = negative + %s, positive = positive + %s WHERE id = %s', (negative, positive, contact))
get_db().commit()
def saveScreenshot(guid, screenshot):
prefix = 'data:image/png;base64,'
if not screenshot.startswith(prefix):
raise TypeError('Screenshot is not a PNG image')
data = base64.b64decode(screenshot[len(prefix):])
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png')
dir = os.path.dirname(file)
if not os.path.exists(dir):
os.makedirs(dir)
f = open(file, 'wb')
f.write(data)
f.close()
if get_config().has_option('reports', 'pngOptimizerPath'):
cmd = get_config().get('reports', 'pngOptimizerPath').split()
cmd.append(file)
subprocess.call(cmd)
def mailDigest(templateData):
sendMail(get_config().get('reports', 'mailDigestTemplate'), templateData)
def sendUpdateNotification(templateData):
sendMail(get_config().get('reports', 'notificationTemplate'), templateData)
def calculateReportSecret(guid):
return hmac.new(get_config().get('reports', 'secret'), guid).hexdigest()
def calculateReportSecret_compat(guid):
hash = hashlib.md5()
hash.update(get_config().get('reports', 'secret'))
hash.update(guid)
return hash.hexdigest()
def getUserId(email):
return hmac.new(get_config().get('reports', 'secret'), email.encode('utf-8')).hexdigest()
def getDigestId(email):
hash = hashlib.md5()
hash.update(email.encode('utf-8'))
return hash.hexdigest()
def getDigestPath(dir, email):
return os.path.join(dir, getDigestId(email) + '.html')
def getDigestSecret(id, (year, week, weekday)):
mac = hmac.new(get_config().get('reports', 'secret'), id)
mac.update(str(year))
mac.update(str(week))
return mac.hexdigest()
def getDigestSecret_compat(id, (year, week, weekday)):
hash = hashlib.md5()
hash.update(get_config().get('reports', 'secret'))
hash.update(id)
hash.update(str(year))
hash.update(str(week))
return hash.hexdigest()
@cached(600)
def get_db():
database = get_config().get('reports', 'database')
dbuser = get_config().get('reports', 'dbuser')
dbpasswd = get_config().get('reports', 'dbpassword')
if os.name == 'nt':
return MySQLdb.connect(user=dbuser, passwd=dbpasswd, db=database, use_unicode=True, charset='utf8', named_pipe=True)
else:
return MySQLdb.connect(user=dbuser, passwd=dbpasswd, db=database, use_unicode=True, charset='utf8')
def executeQuery(cursor, query, args=None):
tablePrefix = get_config().get('reports', 'dbprefix')
query = re.sub(r'#PFX#', tablePrefix, query)
cursor.execute(query, args)
|
5,710 | 002b795f61645ba2023cdb359167d2a65535d768 | /home/runner/.cache/pip/pool/f6/0b/37/37d1907955d15568c921a952a47d6e8fcc905cf4f36ab6f99f5fc7315a |
5,711 | 5fe81a6143642d671686c6623a9ecc93e04a82bf | try:
from setuptools import setup, find_packages
except ImportError:
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name = "pip-utils",
version = "0.0.1",
url = 'https://github.com/mattpaletta/pip-utils',
packages = find_packages(),
include_package_data = True,
install_requires = ["threadlru", "beautifulsoup4"],
setup_requires = [],
author = "Matthew Paletta",
author_email = "mattpaletta@gmail.com",
description = "Programatic Utils for pip management",
license = "BSD",
dependency_links=[
'git+git://github.com/mattpaletta/pynotstdlib.git@master#egg=pynotstdlib-0'
],
) |
5,712 | 46b51f46f6ed73e3b9dc2f759535ba71facd2aae | import pandas as pd
import random
import math
# takes 2 row series and calculates the distances between them
def euclidean_dist(a: pd.Series, b: pd.Series):
diff = a.sub(other=b)
squares = diff ** 2
dist = 0
for feature_distance in squares:
if not math.isnan(feature_distance):
dist += feature_distance
return math.sqrt(dist)
# takes copy of dataframe; returns initialized centroid array
def choose_centroids(data_copy: pd.DataFrame):
new_centroids = []
# randomly picks k centroids
for i in range(0, k):
distance_scores = []
# picks furthest centroid from each other if the first one has been picked; else picks a random initial point
if i != 0:
for j in new_centroids:
distances = []
# for j existing centroids, compare to all other points and selects from all of j for next centroid
for row in data_copy.iterrows():
distances.append((euclidean_dist(j, row[1]), row[0]))
distances.sort()
distance_scores.append(distances[-1])
distance_scores.sort()
centroid_index = distance_scores[-1][1]
else:
centroid_index = random.randrange(num_rows)
# drops centroid from copied dataframe to avoid duplicates
data_copy.drop(labels=centroid_index, axis=0, inplace=True)
# appends the newly selected centroid to the list
new_centroids.append(data.iloc[centroid_index])
return new_centroids
def assign_centroids():
cluster_ids = [] # array for storing column output
cluster_dict = {} # dict for mapping centroid IDs (i.e. 89, 102, 34, etc.) to (0, 1, 2, ..., k)
counter = 0
for i in centroids:
if i.name is None:
i.name = counter
cluster_dict[i.name] = counter
counter += 1 # crude way of assigning centroid IDs
for row in data.iterrows():
distances = []
for j in centroids:
dist = euclidean_dist(row[1], j)
if dist != 0:
distances.append((dist, j.name))
distances.sort()
cluster_ids.append(cluster_dict[distances[0][1]])
# inserts cluster assignment column;
# if column already exists, catches exception and removes the column before insertion
try:
data.insert(6, "ClusterID", cluster_ids)
except ValueError:
data.drop(columns="ClusterID", axis=1, inplace=True)
data.insert(6, "ClusterID", cluster_ids)
except IndexError:
data.drop(columns="ClusterID", axis=1, inplace=True)
data.insert(6, "ClusterID", cluster_ids)
return cluster_ids
def recalculate_clusters():
# for k centroids, take the mean of all values belonging to the centroid and make that point the new centroid
for i in range(0, k):
cluster = pd.DataFrame()
for item in data.iterrows():
if item[1].loc['ClusterID'] == i:
cluster = cluster.append(other=item[1])
centroids[i] = cluster.mean()
data = pd.read_csv("data/fire_data_2011.csv")
# uses a dict to convert from tree genus i.e. "Pinu", "Pice",... to 0, 1,...
counter = 0
tree_count_dict = {}
for i in data.iterrows():
try:
tree_count_dict[i[1]["tree_genus"]]
except KeyError:
tree_count_dict[i[1]["tree_genus"]] = counter
counter += 1
data = data.copy().replace(to_replace=tree_count_dict)
print(data)
k = 7
num_rows = data.iloc[-1].name # gets label of the last row to figure out how many instances are in the data
# giving temporary copy of data so selected values can be removed so there aren't duplicate centroids
centroids = choose_centroids(data.copy())
cluster_assignments = []
unchanged_iteration_count = 0
for iterations in range(0, 100):
print("Clustering Progress: [", iterations + 1, "/ 100 ]")
# update previous cluster assignments; reassign cluster IDs and recalculate centroids
previous_assignments = cluster_assignments.copy()
cluster_assignments = assign_centroids()
recalculate_clusters()
# checks if cluster assignments have changed from one iteration to another
if previous_assignments == cluster_assignments and len(previous_assignments) > 0:
unchanged_iteration_count += 1
else:
unchanged_iteration_count = 0
# if cluster assignments haven't changed in 3 iterations, break from loop and exit
if unchanged_iteration_count > 3:
print("Exiting early: cluster assignments haven't changed in 3 iterations")
break
print("\nCluster Counts ( k =", k, "):")
for i in range(0, k):
print("Cluster", i + 1, ": ", cluster_assignments.count(i))
print("\n\n", data)
data.to_csv("./data/fire_data_2011_clustered.csv")
|
5,713 | 6dfd59bbab74a3a657d2200d62964578c296ee54 |
from ..utils import Object
class ChatMembersFilterAdministrators(Object):
"""
Returns the owner and administrators
Attributes:
ID (:obj:`str`): ``ChatMembersFilterAdministrators``
No parameters required.
Returns:
ChatMembersFilter
Raises:
:class:`telegram.Error`
"""
ID = "chatMembersFilterAdministrators"
def __init__(self, **kwargs):
pass
@staticmethod
def read(q: dict, *args) -> "ChatMembersFilterAdministrators":
return ChatMembersFilterAdministrators()
|
5,714 | e7bec9018f25ba9e3c3ae8a5bbe11f8bc4b54a04 | import logging, os, zc.buildout, sys, shutil
class ZipEggs:
def __init__(self, buildout, name, options):
self.name, self.options = name, options
if options['target'] is None:
raise zc.buildout.UserError('Invalid Target')
if options['source'] is None:
raise zc.buildout.UserError('Invalid Source')
def zipit(self):
target = self.options['target']
if not os.path.exists(target):
os.mkdir(target)
path = self.options['source']
for dirs in os.listdir(path):
try:
source = os.path.join(path, dirs)
dist = "%s/%s" % (target, dirs)
print "%s > %s" % (source, dist)
shutil.make_archive(dist, "zip", source)
os.rename(dist+".zip", dist)
except OSError:
print "ignore %s" % dirs
return []
def install(self):
return self.zipit()
def update(self):
return self.zipit()
|
5,715 | 0b0ae6101fd80bdbcf37b935268f3e49230599fb | import cv2
print(cv2.__version__)
image = cv2.imread("download.jpeg", 1)
print(image)
print(image.shape)
print(image[0])
print("~~~~~~~~~~~~~~~")
print(image.shape[0])
print("~~~~~~~~~~~~~~~")
print(len(image)) |
5,716 | d957fd5fbcdcf2e549323677185eabb8a50536c6 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from contextlib import suppress
import asyncio
import shutil
from aiohttp import web
from bot import app
from var import var
from logger import update_logging_files
loop = asyncio.get_event_loop()
def import_handlers():
from deezer import handlers, callback_handlers
from spotify import handlers, integration, callback_handlers
from vk import handlers, callback_handlers
from soundcloud import handlers, callback_handlers
import handlers
import inline_handlers
import callback_handlers
import error_handlers
if __name__ == '__main__':
with suppress(FileNotFoundError):
shutil.rmtree('downloads')
logging = asyncio.ensure_future(update_logging_files())
import_handlers()
web.run_app(app, port=8081)
loop.close()
|
5,717 | 8a4269f2094fa8ab8f6a93e653183dafb141232e | import re
from pathlib import Path
RAW_DUMP_XML = Path("raw_data/Wikipedia.xml")
def count_regexp():
"""Counts the occurences of the regular expressions you will write.
"""
# Here's an example regular expression that roughly matches a valid email address.
# The ones you write below should be shorter than this
email = re.compile("[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\.[a-zA-Z]{2,5}")
###### Write below #########
subheading = re.compile("\=\=+.*\=\=+")
link_to_subheading = re.compile("\[\[[\w\'*\-*\:*\(*\)*\_*\s*]+[#][\s*\w\\'*\-*\:*\(*\)*\_*s*]+\|*")
doi_citation = re.compile("\{\{[c][ite](?!{{).*[dD][oO][iI]\s*[:|,=\/]*\s*[0-9]+\.[0-9]+.*\}\}")
###### End of your work #########
patterns = {
"emails": email,
"subheadings": subheading,
"links to subheadings": link_to_subheading,
"citations with DOI numbers": doi_citation,
}
with open(RAW_DUMP_XML, encoding="utf-8") as f:
dump_text = f.read()
for name, pattern in patterns.items():
if pattern is None:
continue
matches = pattern.findall(dump_text)
count = len(matches)
example_matches = [matches[i * (count // 5)] for i in range(5)]
print("Found {} occurences of {}".format(count, name))
print("Here are examples:")
print("\n".join(example_matches))
print("\n")
if __name__ == "__main__":
count_regexp()
|
5,718 | 88dfb422b1c9f9a9a8f497e1dbba5598c2710e9b | import pygame
# import random
# import text_scroll
from os import path
img_dir = path.join(path.dirname(__file__), 'img')
# define screen and refresh rate
WIDTH = 720
HEIGHT = 720
FPS = 30
# define colors
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
BLACK = (0, 0, 0)
YELLOW = (255, 255, 0)
BROWN = (165, 42, 42)
WHITE = (255, 255, 255)
# define runtime settings
screen = pygame.display.set_mode((WIDTH, HEIGHT))
background = pygame.Surface(screen.get_size())
pygame.display.set_caption('Space Force Prime')
clock = pygame.time.Clock() |
5,719 | ccfcc5b644d592090786ceb35a85124c9d3275ad | # USAGE
# python predict_video.py --model model/activity.model --label-bin model/lb.pickle --input example_clips/lifting.mp4 --output output/lifting_128avg.avi --size 128
# python predict_video.py --model model/road_activity.model --label-bin model/rd.pickle --input example_clips/fire_footage.mp4 --ou
# tput output/fire_footage2.avi --size 128
# import the necessary packages
from tensorflow.keras.models import load_model
from collections import deque
import numpy as np
import argparse
from mail import sendmail
import pickle
import imutils
import cv2
import datetime
import time
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def index():
return render_template('Main_page.html')
@app.route('/prediction.html')
def predict():
return render_template('prediction.html')
@app.route('/About_us.html')
def about_us():
return render_template('About_us.html')
@app.route('/Result1.html', methods=['POST'])
def Result1():
global annotation
if request.method == 'POST':
MODEL_PATH = 'model/final.model'
PICKLE_PATH = 'model/final.pickle'
#MODEL_PATH = 'model/real_time.model'
#PICKLE_PATH = 'model/real_time.pickle'
INPUT_VIDEO = request.form['inp_video']
out = INPUT_VIDEO.split('.')
INPUT_VIDEO = 'example_clips/'+request.form['inp_video']
out = out[0]
OUTPUT_VIDEO = 'output/' + out + '.avi'
SIZE = 128
print(MODEL_PATH,PICKLE_PATH,INPUT_VIDEO,OUTPUT_VIDEO,SIZE)
#load the trained model and label binarizer from disk
print("[INFO] loading model and label binarizer...")
model = load_model(MODEL_PATH)
lb = pickle.loads(open(PICKLE_PATH, "rb").read())
# initialize the image mean for mean subtraction along with the
# predictions queue
mean = np.array([123.68, 116.779, 103.939][::1], dtype="float32")
Q = deque(maxlen=SIZE)
# initialize the video stream, pointer to output video file, and
# frame dimensions
vs = cv2.VideoCapture(INPUT_VIDEO)
#vs = cv2.VideoCapture(0)
writer = None
(W, H) = (None, None)
count = 0.0
flag = 0
start_frame = 0
end_frame = 0
status = {}
annotation = ""
que = deque()
# loop over frames from the video file stream
while True:
# read the next frame from the file
(grabbed, frame) = vs.read()
count += 1.0
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# if the frame dimensions are empty, grab them
if W is None or H is None:
(H, W) = frame.shape[:2]
# clone the output frame, then convert it from BGR to RGB
# ordering, resize the frame to a fixed 224x224, and then
# perform mean subtraction
output = frame.copy()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (224, 224)).astype("float32")
frame -= mean
# make predictions on the frame and then update the predictions
# queue
preds = model.predict(np.expand_dims(frame, axis=0))[0]
Q.append(preds)
# perform prediction averaging over the current history of
# previous predictions
results = np.array(Q).mean(axis=0)
i = np.argmax(results)
label = lb.classes_[i]
if len(que) == 30:
que.popleft()
if len(que) != 30:
que.append(label)
noOfAlerts = que.count("fire") + que.count("accident")
if que.count("fire") > que.count("accident"):
caseDetect = "fire"
else:
caseDetect = "accident"
# draw the activity on the output frame
text = "Alert!!: {}".format(label)
# Changes starts here
alert = ["fire", "accident"]
#currentFrame = 0
#print(label, flag)
if len(que) == 30:
if caseDetect in alert and noOfAlerts > 20:
cv2.putText(output, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX,
1.25, (0, 0, 255), 5)
if flag == 0:
annotation = caseDetect
start_frame = count - 20
flag = 1
else:
if flag == 1:
end_frame = count - 10
flag = 2
#name = './frame/frame'+str(currentFrame)+'.jpg'
#cv2.imwrite(name,output)
# check if the video writer is None
if writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30,
(W, H), True)
# write the output frame to disk
writer.write(output)
# show the output image
cv2.imshow("Output", output)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# changes made here
if annotation != "":
status = sendmail("harshpatel682@gmail.com", "Anomaly Detected!!!", "yes")
status = status['email_status']
#total_time = end_time - start_time
#print("Time is: {}".format(str(datetime.timedelta(seconds=(total_time)))))
print("count: {}".format(count))
#print("Frame count: {}".format(f_start))
# release the file pointers
print("[INFO] cleaning up...")
writer.release()
vs.release()
start_frame = start_frame//30
end_frame = end_frame // 30
if flag == 1:
end_frame = count
end_frame = end_frame // 30
flag = 2
print(start_frame, end_frame)
return render_template('Result1.html', label=annotation, count=count, start_time=start_frame, end_time=end_frame,
status = status)
if __name__ == '__main__':
app.run(debug=False) |
5,720 | 4015078ee9640c4558a4f29ebbb89f9098a31014 | from collections import Counter
import numpy as np
import random
import torch
import BidModel
from douzero.env.game import GameEnv
env_version = "3.2"
env_url = "http://od.vcccz.com/hechuan/env.py"
Card2Column = {3: 0, 4: 1, 5: 2, 6: 3, 7: 4, 8: 5, 9: 6, 10: 7,
11: 8, 12: 9, 13: 10, 14: 11, 17: 12}
NumOnes2Array = {0: np.array([0, 0, 0, 0]),
1: np.array([1, 0, 0, 0]),
2: np.array([1, 1, 0, 0]),
3: np.array([1, 1, 1, 0]),
4: np.array([1, 1, 1, 1])}
deck = []
for i in range(3, 15):
deck.extend([i for _ in range(4)])
deck.extend([17 for _ in range(4)])
deck.extend([20, 30])
class Env:
"""
Doudizhu multi-agent wrapper
"""
def __init__(self, objective):
"""
Objective is wp/adp/logadp. It indicates whether considers
bomb in reward calculation. Here, we use dummy agents.
This is because, in the orignial game, the players
are `in` the game. Here, we want to isolate
players and environments to have a more gym style
interface. To achieve this, we use dummy players
to play. For each move, we tell the corresponding
dummy player which action to play, then the player
will perform the actual action in the game engine.
"""
self.objective = objective
# Initialize players
# We use three dummy player for the target position
self.players = {}
for position in ['landlord', 'landlord_up', 'landlord_down']:
self.players[position] = DummyAgent(position)
# Initialize the internal environment
self._env = GameEnv(self.players)
self.total_round = 0
self.force_bid = 0
self.infoset = None
def reset(self, model, device, flags=None):
"""
Every time reset is called, the environment
will be re-initialized with a new deck of cards.
This function is usually called when a game is over.
"""
self._env.reset()
# Randomly shuffle the deck
if model is None:
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = {'landlord': _deck[:20],
'landlord_up': _deck[20:37],
'landlord_down': _deck[37:54],
'three_landlord_cards': _deck[17:20],
}
for key in card_play_data:
card_play_data[key].sort()
self._env.card_play_init(card_play_data)
self.infoset = self._game_infoset
return get_obs(self.infoset)
else:
self.total_round += 1
bid_done = False
card_play_data = []
landlord_cards = []
last_bid = 0
bid_count = 0
player_ids = {}
bid_info = None
bid_obs_buffer = []
multiply_obs_buffer = []
bid_limit = 3
force_bid = False
while not bid_done:
bid_limit -= 1
bid_obs_buffer.clear()
multiply_obs_buffer.clear()
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = [
_deck[:17],
_deck[17:34],
_deck[34:51],
]
for i in range(3):
card_play_data[i].sort()
landlord_cards = _deck[51:54]
landlord_cards.sort()
bid_info = np.array([[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1]])
bidding_player = random.randint(0, 2)
# bidding_player = 0 # debug
first_bid = -1
last_bid = -1
bid_count = 0
if bid_limit <= 0:
force_bid = True
for r in range(3):
bidding_obs = _get_obs_for_bid(bidding_player, bid_info, card_play_data[bidding_player])
with torch.no_grad():
action = model.forward("bidding", torch.tensor(bidding_obs["z_batch"], device=device),
torch.tensor(bidding_obs["x_batch"], device=device), flags=flags)
if bid_limit <= 0:
wr = BidModel.predict_env(card_play_data[bidding_player])
if wr >= 0.7:
action = {"action": 1} # debug
bid_limit += 1
bid_obs_buffer.append({
"x_batch": bidding_obs["x_batch"][action["action"]],
"z_batch": bidding_obs["z_batch"][action["action"]],
"pid": bidding_player
})
if action["action"] == 1:
last_bid = bidding_player
bid_count += 1
if first_bid == -1:
first_bid = bidding_player
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
else:
bid_info[r] = [0, 0, 0]
bidding_player = (bidding_player + 1) % 3
one_count = np.count_nonzero(bid_info == 1)
if one_count == 0:
continue
elif one_count > 1:
r = 3
bidding_player = first_bid
bidding_obs = _get_obs_for_bid(bidding_player, bid_info, card_play_data[bidding_player])
with torch.no_grad():
action = model.forward("bidding", torch.tensor(bidding_obs["z_batch"], device=device),
torch.tensor(bidding_obs["x_batch"], device=device), flags=flags)
bid_obs_buffer.append({
"x_batch": bidding_obs["x_batch"][action["action"]],
"z_batch": bidding_obs["z_batch"][action["action"]],
"pid": bidding_player
})
if action["action"] == 1:
last_bid = bidding_player
bid_count += 1
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
break
card_play_data[last_bid].extend(landlord_cards)
card_play_data = {'landlord': card_play_data[last_bid],
'landlord_up': card_play_data[(last_bid - 1) % 3],
'landlord_down': card_play_data[(last_bid + 1) % 3],
'three_landlord_cards': landlord_cards,
}
card_play_data["landlord"].sort()
player_ids = {
'landlord': last_bid,
'landlord_up': (last_bid - 1) % 3,
'landlord_down': (last_bid + 1) % 3,
}
player_positions = {
last_bid: 'landlord',
(last_bid - 1) % 3: 'landlord_up',
(last_bid + 1) % 3: 'landlord_down'
}
for bid_obs in bid_obs_buffer:
bid_obs.update({"position": player_positions[bid_obs["pid"]]})
# Initialize the cards
self._env.card_play_init(card_play_data)
multiply_map = [
np.array([1, 0, 0]),
np.array([0, 1, 0]),
np.array([0, 0, 1])
]
for pos in ["landlord", "landlord_up", "landlord_down"]:
pid = player_ids[pos]
self._env.info_sets[pos].player_id = pid
self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) % 3, pid, (pid + 1) % 3]]
self._env.bid_count = bid_count
# multiply_obs = _get_obs_for_multiply(pos, self._env.info_sets[pos].bid_info, card_play_data[pos],
# landlord_cards)
# action = model.forward(pos, torch.tensor(multiply_obs["z_batch"], device=device),
# torch.tensor(multiply_obs["x_batch"], device=device), flags=flags)
# multiply_obs_buffer.append({
# "x_batch": multiply_obs["x_batch"][action["action"]],
# "z_batch": multiply_obs["z_batch"][action["action"]],
# "position": pos
# })
action = {"action": 0}
self._env.info_sets[pos].multiply_info = multiply_map[action["action"]]
self._env.multiply_count[pos] = action["action"]
self.infoset = self._game_infoset
if force_bid:
self.force_bid += 1
if self.total_round % 100 == 0:
print("发牌情况: %i/%i %.1f%%" % (self.force_bid, self.total_round, self.force_bid / self.total_round * 100))
self.force_bid = 0
self.total_round = 0
return get_obs(self.infoset), {
"bid_obs_buffer": bid_obs_buffer,
"multiply_obs_buffer": multiply_obs_buffer
}
def step(self, action):
"""
Step function takes as input the action, which
is a list of integers, and output the next obervation,
reward, and a Boolean variable indicating whether the
current game is finished. It also returns an empty
dictionary that is reserved to pass useful information.
"""
assert action in self.infoset.legal_actions
self.players[self._acting_player_position].set_action(action)
self._env.step()
self.infoset = self._game_infoset
done = False
reward = 0.0
if self._game_over:
done = True
reward = {
"play": {
"landlord": self._get_reward("landlord"),
"landlord_up": self._get_reward("landlord_up"),
"landlord_down": self._get_reward("landlord_down")
},
"bid": {
"landlord": self._get_reward_bidding("landlord")*2,
"landlord_up": self._get_reward_bidding("landlord_up"),
"landlord_down": self._get_reward_bidding("landlord_down")
}
}
obs = None
else:
obs = get_obs(self.infoset)
return obs, reward, done, {}
def _get_reward(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
self_bomb_num = self._env.pos_bomb_num[pos]
if winner == 'landlord':
if self.objective == 'adp':
return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +self._env.multiply_count[pos]) /8
elif self.objective == 'logadp':
return (1.0 - self._env.step_count * 0.0033) * 1.3**self_bomb_num * 2**self._env.multiply_count[pos] / 4
else:
return 1.0 - self._env.step_count * 0.0033
else:
if self.objective == 'adp':
return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +self._env.multiply_count[pos]) /8
elif self.objective == 'logadp':
return (-1.0 + self._env.step_count * 0.0033) * 1.3**self_bomb_num * 2**self._env.multiply_count[pos] / 4
else:
return -1.0 + self._env.step_count * 0.0033
def _get_reward_bidding(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
if winner == 'landlord':
return 1.0 * 2**(self._env.bid_count-1) / 8
else:
return -1.0 * 2**(self._env.bid_count-1) / 8
@property
def _game_infoset(self):
"""
Here, inforset is defined as all the information
in the current situation, incuding the hand cards
of all the players, all the historical moves, etc.
That is, it contains perferfect infomation. Later,
we will use functions to extract the observable
information from the views of the three players.
"""
return self._env.game_infoset
@property
def _game_bomb_num(self):
"""
The number of bombs played so far. This is used as
a feature of the neural network and is also used to
calculate ADP.
"""
return self._env.get_bomb_num()
@property
def _game_winner(self):
""" A string of landlord/peasants
"""
return self._env.get_winner()
@property
def _acting_player_position(self):
"""
The player that is active. It can be landlord,
landlod_down, or landlord_up.
"""
return self._env.acting_player_position
@property
def _game_over(self):
""" Returns a Boolean
"""
return self._env.game_over
class DummyAgent(object):
"""
Dummy agent is designed to easily interact with the
game engine. The agent will first be told what action
to perform. Then the environment will call this agent
to perform the actual action. This can help us to
isolate environment and agents towards a gym like
interface.
"""
def __init__(self, position):
self.position = position
self.action = None
def act(self, infoset):
"""
Simply return the action that is set previously.
"""
assert self.action in infoset.legal_actions
return self.action
def set_action(self, action):
"""
The environment uses this function to tell
the dummy agent what to do.
"""
self.action = action
def get_obs(infoset, use_general=True):
"""
This function obtains observations with imperfect information
from the infoset. It has three branches since we encode
different features for different positions.
This function will return dictionary named `obs`. It contains
several fields. These fields will be used to train the model.
One can play with those features to improve the performance.
`position` is a string that can be landlord/landlord_down/landlord_up
`x_batch` is a batch of features (excluding the hisorical moves).
It also encodes the action feature
`z_batch` is a batch of features with hisorical moves only.
`legal_actions` is the legal moves
`x_no_action`: the features (exluding the hitorical moves and
the action features). It does not have the batch dim.
`z`: same as z_batch but not a batch.
"""
if use_general:
if infoset.player_position not in ["landlord", "landlord_up", "landlord_down"]:
raise ValueError('')
return _get_obs_general(infoset, infoset.player_position)
else:
if infoset.player_position == 'landlord':
return _get_obs_landlord(infoset)
elif infoset.player_position == 'landlord_up':
return _get_obs_landlord_up(infoset)
elif infoset.player_position == 'landlord_down':
return _get_obs_landlord_down(infoset)
else:
raise ValueError('')
def _get_one_hot_array(num_left_cards, max_num_cards):
"""
A utility function to obtain one-hot endoding
"""
one_hot = np.zeros(max_num_cards)
if num_left_cards > 0:
one_hot[num_left_cards - 1] = 1
return one_hot
def _cards2array(list_cards):
"""
A utility function that transforms the actions, i.e.,
A list of integers into card matrix. Here we remove
the six entries that are always zero and flatten the
the representations.
"""
if len(list_cards) == 0:
return np.zeros(54, dtype=np.int8)
matrix = np.zeros([4, 13], dtype=np.int8)
jokers = np.zeros(2, dtype=np.int8)
counter = Counter(list_cards)
for card, num_times in counter.items():
if card < 20:
matrix[:, Card2Column[card]] = NumOnes2Array[num_times]
elif card == 20:
jokers[0] = 1
elif card == 30:
jokers[1] = 1
return np.concatenate((matrix.flatten('F'), jokers))
# def _action_seq_list2array(action_seq_list):
# """
# A utility function to encode the historical moves.
# We encode the historical 15 actions. If there is
# no 15 actions, we pad the features with 0. Since
# three moves is a round in DouDizhu, we concatenate
# the representations for each consecutive three moves.
# Finally, we obtain a 5x162 matrix, which will be fed
# into LSTM for encoding.
# """
# action_seq_array = np.zeros((len(action_seq_list), 54))
# for row, list_cards in enumerate(action_seq_list):
# action_seq_array[row, :] = _cards2array(list_cards)
# # action_seq_array = action_seq_array.reshape(5, 162)
# return action_seq_array
def _action_seq_list2array(action_seq_list, new_model=True):
"""
A utility function to encode the historical moves.
We encode the historical 15 actions. If there is
no 15 actions, we pad the features with 0. Since
three moves is a round in DouDizhu, we concatenate
the representations for each consecutive three moves.
Finally, we obtain a 5x162 matrix, which will be fed
into LSTM for encoding.
"""
if new_model:
position_map = {"landlord": 0, "landlord_up": 1, "landlord_down": 2}
action_seq_array = np.ones((len(action_seq_list), 54)) * -1 # Default Value -1 for not using area
for row, list_cards in enumerate(action_seq_list):
if list_cards != []:
action_seq_array[row, :54] = _cards2array(list_cards[1])
else:
action_seq_array = np.zeros((len(action_seq_list), 54))
for row, list_cards in enumerate(action_seq_list):
if list_cards != []:
action_seq_array[row, :] = _cards2array(list_cards[1])
action_seq_array = action_seq_array.reshape(5, 162)
return action_seq_array
# action_seq_array = np.zeros((len(action_seq_list), 54))
# for row, list_cards in enumerate(action_seq_list):
# if list_cards != []:
# action_seq_array[row, :] = _cards2array(list_cards[1])
# return action_seq_array
def _process_action_seq(sequence, length=15, new_model=True):
"""
A utility function encoding historical moves. We
encode 15 moves. If there is no 15 moves, we pad
with zeros.
"""
sequence = sequence[-length:].copy()
if new_model:
sequence = sequence[::-1]
if len(sequence) < length:
empty_sequence = [[] for _ in range(length - len(sequence))]
empty_sequence.extend(sequence)
sequence = empty_sequence
return sequence
def _get_one_hot_bomb(bomb_num):
"""
A utility function to encode the number of bombs
into one-hot representation.
"""
one_hot = np.zeros(15)
one_hot[bomb_num] = 1
return one_hot
def _get_obs_landlord(infoset):
"""
Obttain the landlord features. See Table 4 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_up_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((my_handcards_batch,
other_handcards_batch,
last_action_batch,
landlord_up_played_cards_batch,
landlord_down_played_cards_batch,
landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch,
bomb_num_batch,
my_action_batch))
x_no_action = np.hstack((my_handcards,
other_handcards,
last_action,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 15, False), False)
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': 'landlord',
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_landlord_up(infoset):
"""
Obttain the landlord_up features. See Table 5 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
last_landlord_action = _cards2array(
infoset.last_move_dict['landlord'])
last_landlord_action_batch = np.repeat(
last_landlord_action[np.newaxis, :],
num_legal_actions, axis=0)
landlord_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_teammate_action = _cards2array(
infoset.last_move_dict['landlord_down'])
last_teammate_action_batch = np.repeat(
last_teammate_action[np.newaxis, :],
num_legal_actions, axis=0)
teammate_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
teammate_num_cards_left_batch = np.repeat(
teammate_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
teammate_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
teammate_played_cards_batch = np.repeat(
teammate_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((my_handcards_batch,
other_handcards_batch,
landlord_played_cards_batch,
teammate_played_cards_batch,
last_action_batch,
last_landlord_action_batch,
last_teammate_action_batch,
landlord_num_cards_left_batch,
teammate_num_cards_left_batch,
bomb_num_batch,
my_action_batch))
x_no_action = np.hstack((my_handcards,
other_handcards,
landlord_played_cards,
teammate_played_cards,
last_action,
last_landlord_action,
last_teammate_action,
landlord_num_cards_left,
teammate_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 15, False), False)
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': 'landlord_up',
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_landlord_down(infoset):
"""
Obttain the landlord_down features. See Table 5 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
last_landlord_action = _cards2array(
infoset.last_move_dict['landlord'])
last_landlord_action_batch = np.repeat(
last_landlord_action[np.newaxis, :],
num_legal_actions, axis=0)
landlord_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_teammate_action = _cards2array(
infoset.last_move_dict['landlord_up'])
last_teammate_action_batch = np.repeat(
last_teammate_action[np.newaxis, :],
num_legal_actions, axis=0)
teammate_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
teammate_num_cards_left_batch = np.repeat(
teammate_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
teammate_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
teammate_played_cards_batch = np.repeat(
teammate_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((my_handcards_batch,
other_handcards_batch,
landlord_played_cards_batch,
teammate_played_cards_batch,
last_action_batch,
last_landlord_action_batch,
last_teammate_action_batch,
landlord_num_cards_left_batch,
teammate_num_cards_left_batch,
bomb_num_batch,
my_action_batch))
x_no_action = np.hstack((my_handcards,
other_handcards,
landlord_played_cards,
teammate_played_cards,
last_action,
last_landlord_action,
last_teammate_action,
landlord_num_cards_left,
teammate_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 15, False), False)
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': 'landlord_down',
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_landlord_withbid(infoset):
"""
Obttain the landlord features. See Table 4 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_up_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((my_handcards_batch,
other_handcards_batch,
last_action_batch,
landlord_up_played_cards_batch,
landlord_down_played_cards_batch,
landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch,
bomb_num_batch,
my_action_batch))
x_no_action = np.hstack((my_handcards,
other_handcards,
last_action,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 15, False), False)
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': 'landlord',
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_general1(infoset, position):
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {
"landlord": [1, 0, 0],
"landlord_up": [0, 1, 0],
"landlord_down": [0, 0, 1]
}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(infoset.bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :],
num_legal_actions, axis=0)
multiply_info = np.array(infoset.multiply_info)
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array(infoset.three_landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards_left_list = []
for pos in ["landlord", "landlord_up", "landlord_up"]:
if pos != position:
other_handcards_left_list.extend(infoset.all_handcards[pos])
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((position_info_batch, # 3
my_handcards_batch, # 54
other_handcards_batch, # 54
three_landlord_cards_batch, # 54
last_action_batch, # 54
landlord_played_cards_batch, # 54
landlord_up_played_cards_batch, # 54
landlord_down_played_cards_batch, # 54
landlord_num_cards_left_batch, # 20
landlord_up_num_cards_left_batch, # 17
landlord_down_num_cards_left_batch, # 17
bomb_num_batch, # 15
bid_info_batch, # 12
multiply_info_batch, # 3
my_action_batch)) # 54
x_no_action = np.hstack((position_info,
my_handcards,
other_handcards,
three_landlord_cards,
last_action,
landlord_played_cards,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_num_cards_left,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num,
bid_info,
multiply_info))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 32))
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': position,
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_general(infoset, position):
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {
"landlord": [1, 0, 0],
"landlord_up": [0, 1, 0],
"landlord_down": [0, 0, 1]
}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(infoset.bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :],
num_legal_actions, axis=0)
multiply_info = np.array(infoset.multiply_info)
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array(infoset.three_landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards_left_list = []
for pos in ["landlord", "landlord_up", "landlord_up"]:
if pos != position:
other_handcards_left_list.extend(infoset.all_handcards[pos])
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
num_cards_left = np.hstack((
landlord_num_cards_left, # 20
landlord_up_num_cards_left, # 17
landlord_down_num_cards_left))
x_batch = np.hstack((
bid_info_batch, # 12
multiply_info_batch)) # 3
x_no_action = np.hstack((
bid_info,
multiply_info))
z =np.vstack((
num_cards_left,
my_handcards, # 54
other_handcards, # 54
three_landlord_cards, # 54
landlord_played_cards, # 54
landlord_up_played_cards, # 54
landlord_down_played_cards, # 54
_action_seq_list2array(_process_action_seq(infoset.card_play_action_seq, 32))
))
_z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
my_action_batch = my_action_batch[:,np.newaxis,:]
z_batch = np.zeros([len(_z_batch),40,54],int)
for i in range(0,len(_z_batch)):
z_batch[i] = np.vstack((my_action_batch[i],_z_batch[i]))
obs = {
'position': position,
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def gen_bid_legal_actions(player_id, bid_info):
self_bid_info = bid_info[:, [(player_id - 1) % 3, player_id, (player_id + 1) % 3]]
curr_round = -1
for r in range(4):
if -1 in self_bid_info[r]:
curr_round = r
break
bid_actions = []
if curr_round != -1:
self_bid_info[curr_round] = [0, 0, 0]
bid_actions.append(np.array(self_bid_info).flatten())
self_bid_info[curr_round] = [0, 1, 0]
bid_actions.append(np.array(self_bid_info).flatten())
return np.array(bid_actions)
def _get_obs_for_bid_legacy(player_id, bid_info, hand_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 2
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_cards = []
other_cards.extend(all_cards)
for card in hand_cards:
other_cards.remove(card)
other_handcards = _cards2array(other_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_info = np.array([0, 0, 0])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)
bid_info = bid_legal_actions[0]
bid_info_batch = bid_legal_actions
multiply_info = np.array([0, 0, 0])
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array([])
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array([])
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j in range(2):
my_action_batch[j, :] = _cards2array([])
landlord_num_cards_left = _get_one_hot_array(0, 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(0, 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(0, 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array([])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array([])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array([])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(0)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((position_info_batch,
my_handcards_batch,
other_handcards_batch,
three_landlord_cards_batch,
last_action_batch,
landlord_played_cards_batch,
landlord_up_played_cards_batch,
landlord_down_played_cards_batch,
landlord_num_cards_left_batch,
landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch,
bomb_num_batch,
bid_info_batch,
multiply_info_batch,
my_action_batch))
x_no_action = np.hstack((position_info,
my_handcards,
other_handcards,
three_landlord_cards,
last_action,
landlord_played_cards,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_num_cards_left,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq([], 32))
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': "",
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': bid_legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
"bid_info_batch": bid_info_batch.astype(np.int8),
"multiply_info": multiply_info.astype(np.int8)
}
return obs
def _get_obs_for_bid(player_id, bid_info, hand_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 2
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)
bid_info = bid_legal_actions[0]
bid_info_batch = np.hstack([bid_legal_actions for _ in range(5)])
x_batch = np.hstack((my_handcards_batch,
bid_info_batch))
x_no_action = np.hstack((my_handcards))
obs = {
'position': "",
'x_batch': x_batch.astype(np.float32),
'z_batch': np.array([0,0]),
'legal_actions': bid_legal_actions,
'x_no_action': x_no_action.astype(np.int8),
"bid_info_batch": bid_info_batch.astype(np.int8)
}
return obs
def _get_obs_for_multiply(position, bid_info, hand_cards, landlord_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 3
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_cards = []
other_cards.extend(all_cards)
for card in hand_cards:
other_cards.remove(card)
other_handcards = _cards2array(other_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {
"landlord": [1, 0, 0],
"landlord_up": [0, 1, 0],
"landlord_down": [0, 0, 1]
}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :],
num_legal_actions, axis=0)
multiply_info = np.array([0, 0, 0])
multiply_info_batch = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
three_landlord_cards = _cards2array(landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array([])
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j in range(num_legal_actions):
my_action_batch[j, :] = _cards2array([])
landlord_num_cards_left = _get_one_hot_array(0, 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(0, 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(0, 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array([])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array([])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array([])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(0)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((position_info_batch,
my_handcards_batch,
other_handcards_batch,
three_landlord_cards_batch,
last_action_batch,
landlord_played_cards_batch,
landlord_up_played_cards_batch,
landlord_down_played_cards_batch,
landlord_num_cards_left_batch,
landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch,
bomb_num_batch,
bid_info_batch,
multiply_info_batch,
my_action_batch))
x_no_action = np.hstack((position_info,
my_handcards,
other_handcards,
three_landlord_cards,
last_action,
landlord_played_cards,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_num_cards_left,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq([], 32))
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': "",
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': multiply_info_batch,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
"bid_info": bid_info.astype(np.int8),
"multiply_info_batch": multiply_info.astype(np.int8)
}
return obs
|
5,721 | de1262da699a18266ad8673597391f625783a44d | # #writing a file
# fout = open('Session14/output.txt', 'w')
# line1 = "How many roads must a man walk down\n"
# fout.write(line1)
# line2 = "Before you call him a man?\n"
# fout.write(line2)
# #when you are done writing, you should close the file.
# fout.close()
# #if you dont close the file, it gets closed for you when the program dies
#exercise 1
# def sed(pattern, replace, source, dest):
# with open(source, 'r') as f_r:
# with open(dest, 'w') as f_w:
# for line in f_r:
# new_line = line.replace(pattern, replace)
# f_w.write(new_line)
# pattern = " man "
# replace = " woman "
# source = "Session14/output.txt"
# dest = "Session14/output2.txt"
# sed(pattern, replace, source, dest)
import os
cwd = os.getcwd()
#cwd stands for "current working directory"
# print(cwd)
#os.path provides other functions for working with filenames and paths
# os.path.abspath('output.txt')
# os.path.exists('output.txt')
# os.path.isdir('output.txt')
# os.path.isdir('/exercises')
# os.path.isfile('output.txt')
# os.listdir(cwd)
def walk(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for name in os.listdir(dirname):
path = os.path.join(dirname, name)
if os.path.isfile(path):
print(path)
else:
walk(path)
#os.path.join takes a directory and a file name and joins them inot a complete path
def walk2(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for root, dirs, files in os.walk(dirname):
for filename in files:
print(os.path.join(root, filename))
|
5,722 | a3382c3e6e04ccb87b1d55f072ce959b137f9fdd | # Generated by Django 2.2.7 on 2019-11-22 21:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Product', '0003_productimage'),
]
operations = [
migrations.RemoveField(
model_name='productimage',
name='comment',
),
]
|
5,723 | c0e349be45cd964e8e398baaed64eae792189dd1 | sentence = "Practice Problems to Drill List Comprehension in Your Head."
sentence = sentence.split()
sentence = [i.replace(".", "") for i in sentence]
[print(i) for i in sentence if len(i)<5] |
5,724 | b94392c9c6547415326d80ff0923cb8ba9251783 | # V0
class Codec:
def encode(self, strs):
s = ""
for i in strs:
s += str(len(i)) + "#" + i
return s
def decode(self, s):
i, str = 0, []
while i < len(s):
sharp = s.find("#", i)
l = int(s[i:sharp])
str.append(s[sharp + 1:sharp + l + 1])
i = sharp + l + 1
return str
# V1
# http://www.voidcn.com/article/p-hpbzcdjd-zo.html
class Codec:
def encode(self, strs):
"""Encodes a list of strings to a single string.
:type strs: List[str]
:rtype: str
"""
s = ""
for i in strs:
s += str(len(i)) + "#" + i
return s
def decode(self, s):
"""Decodes a single string to a list of strings.
:type s: str
:rtype: List[str]
"""
i, str = 0, []
while i < len(s):
sharp = s.find("#", i)
l = int(s[i:sharp])
str.append(s[sharp + 1:sharp + l + 1])
i = sharp + l + 1
return str
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(strs))
### Test case : dev
# V1'
# https://medium.com/leetcode-%E6%BC%94%E7%AE%97%E6%B3%95%E6%95%99%E5%AD%B8/024-leetcode-271-%E6%BC%94%E7%AE%97%E6%B3%95-encode-and-decode-strings-%E5%AD%97%E4%B8%B2%E5%8A%A0%E8%A7%A3%E5%AF%86-722cafd6238
# IDEA :
# ABC -> 3/ABC
# ABCD -> 4/ABCD
# A B C D ->1/A1/B1/C1/D
#
# JAVA
# // Encodes a list of strings to a single string.
# public String encode(List<String> strs) {
# StringBuilder sb = new StringBuilder();
# for(String s : strs) {
# sb.append(s.length()).append('/').append(s);
# }
# return sb.toString();
# }
#
# // Decodes a single string to a list of strings.
# public List<String> decode(String s) {
# List<String> ret = new ArrayList<String>();
# int i = 0;
# while(i < s.length()) {
# int slash = s.indexOf('/', i);// return the 1st '/' index from i
# int size = Integer.valueOf(s.substring(i, slash)); // the length of encode
# ret.add(s.substring(slash + 1, slash + size + 1)); // cut it off
# i = slash + size + 1;// redefine the i index
# }
# return ret;
# }
# V2
# Time: O(n)
# Space: O(1)
class Codec(object):
def encode(self, strs):
"""Encodes a list of strings to a single string.
:type strs: List[str]
:rtype: str
"""
encoded_str = ""
for s in strs:
encoded_str += "%0*x" % (8, len(s)) + s
return encoded_str
def decode(self, s):
"""Decodes a single string to a list of strings.
:type s: str
:rtype: List[str]
"""
i = 0
strs = []
while i < len(s):
l = int(s[i:i+8], 16)
strs.append(s[i+8:i+8+l])
i += 8+l
return strs |
5,725 | 2e075c3ee6b245b1ffd0bb8c4e205199f794da76 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'ghou'
from datetime import datetime
bGameValid = True
dAskUserInfo = {}
gAccMode = 0
#============UserSyncResource2.py===================
#============前端资源热更白名单测试功能================
#============去读配置表config.xml==================
#============大于配置标号的热更内容只有白名单可见=======
gWhiteTestResourceVersion = None
#============评审版本热更过滤========================
#============去读配置表config.xml==================
#============等于配置标号的热更内容都不可见=============
gInvalidClientVersion = None # 非法的客户端版本号 |
5,726 | 5bbaffb35a89558b5cf0b4364f78d68ff2d69a01 | # from django.urls import path,include
from django.conf.urls import include, url
from . import views
urlpatterns = [
url('buy',views.BuyPage,name='BuyPage'),
url('sell',views.SellPage,name='SellPage'),
url('',views.TradePage,name='TradePage'),
]
|
5,727 | 445e91edbeb88a3e300761342b28369fd9833fbb | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
# import Python so we can mock the parts we need to here.
import IPython.core.display
import IPython.core.magic
import datalab.utils.commands
def noop_decorator(func):
return func
IPython.core.magic.register_line_cell_magic = noop_decorator
IPython.core.magic.register_line_magic = noop_decorator
IPython.core.magic.register_cell_magic = noop_decorator
IPython.core.display.HTML = lambda x: x
IPython.core.display.JSON = lambda x: x
class TestCases(unittest.TestCase):
def test_chart_cell(self):
t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA', 'quantity': 50}]
IPython.get_ipython().user_ns = {}
chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo', 'data': t, 'fields': None},
'')
self.assertTrue(chart.find('charts.render(') > 0)
self.assertTrue(chart.find('\'geo\'') > 0)
self.assertTrue(chart.find('"fields": "*"') > 0)
self.assertTrue(chart.find('{"c": [{"v": "US"}, {"v": 100}]}') > 0 or
chart.find('{"c": [{"v": 100}, {"v": "US"}]}') > 0)
self.assertTrue(chart.find('{"c": [{"v": "ZA"}, {"v": 50}]}') > 0 or
chart.find('{"c": [{"v": 50}, {"v": "ZA"}]}') > 0)
def test_chart_magic(self):
# TODO(gram): complete this test
pass
|
5,728 | f6b38698dbed6c1a48faa86183b601f855a7f737 | {"filter":false,"title":"settings.py","tooltip":"/mysite/settings.py","undoManager":{"mark":53,"position":53,"stack":[[{"start":{"row":107,"column":13},"end":{"row":107,"column":16},"action":"remove","lines":["UTC"],"id":2},{"start":{"row":107,"column":13},"end":{"row":107,"column":23},"action":"insert","lines":["Asia/Tokyo"]}],[{"start":{"row":105,"column":17},"end":{"row":105,"column":22},"action":"remove","lines":["en-us"],"id":3},{"start":{"row":105,"column":17},"end":{"row":105,"column":18},"action":"insert","lines":["j"]},{"start":{"row":105,"column":18},"end":{"row":105,"column":19},"action":"insert","lines":["a"]}],[{"start":{"row":120,"column":0},"end":{"row":120,"column":46},"action":"insert","lines":["STATIC_ROOT = os.path.join(BASE_DIR, 'static')"],"id":4}],[{"start":{"row":27,"column":17},"end":{"row":27,"column":51},"action":"insert","lines":["'127.0.0.1', '.pythonanywhere.com'"],"id":5}],[{"start":{"row":27,"column":51},"end":{"row":27,"column":52},"action":"insert","lines":[","],"id":6}],[{"start":{"row":27,"column":52},"end":{"row":27,"column":53},"action":"insert","lines":[" "],"id":7}],[{"start":{"row":27,"column":53},"end":{"row":27,"column":55},"action":"insert","lines":["''"],"id":8}],[{"start":{"row":27,"column":54},"end":{"row":27,"column":55},"action":"insert","lines":["."],"id":9},{"start":{"row":27,"column":55},"end":{"row":27,"column":56},"action":"insert","lines":["a"]},{"start":{"row":27,"column":56},"end":{"row":27,"column":57},"action":"insert","lines":["m"]},{"start":{"row":27,"column":57},"end":{"row":27,"column":58},"action":"insert","lines":["a"]},{"start":{"row":27,"column":58},"end":{"row":27,"column":59},"action":"insert","lines":["z"]},{"start":{"row":27,"column":59},"end":{"row":27,"column":60},"action":"insert","lines":["o"]}],[{"start":{"row":27,"column":60},"end":{"row":27,"column":61},"action":"insert","lines":["n"],"id":10}],[{"start":{"row":27,"column":61},"end":{"row":27,"column":62},"action":"insert","lines":["a"],"id":11},{"start":{"row":27,"column":62},"end":{"row":27,"column":63},"action":"insert","lines":["w"]},{"start":{"row":27,"column":63},"end":{"row":27,"column":64},"action":"insert","lines":["s"]},{"start":{"row":27,"column":64},"end":{"row":27,"column":65},"action":"insert","lines":["."]},{"start":{"row":27,"column":65},"end":{"row":27,"column":66},"action":"insert","lines":["c"]},{"start":{"row":27,"column":66},"end":{"row":27,"column":67},"action":"insert","lines":["o"]}],[{"start":{"row":27,"column":67},"end":{"row":27,"column":68},"action":"insert","lines":["m"],"id":12}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":53},"action":"remove","lines":["'.pythonanywhere.com', "],"id":13}],[{"start":{"row":27,"column":46},"end":{"row":27,"column":47},"action":"insert","lines":[","],"id":14}],[{"start":{"row":27,"column":47},"end":{"row":27,"column":48},"action":"insert","lines":[" "],"id":15}],[{"start":{"row":27,"column":48},"end":{"row":27,"column":69},"action":"insert","lines":["'.pythonanywhere.com'"],"id":16}],[{"start":{"row":39,"column":0},"end":{"row":40,"column":0},"action":"insert","lines":["",""],"id":17}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":4},"action":"insert","lines":[" "],"id":18}],[{"start":{"row":39,"column":4},"end":{"row":39,"column":6},"action":"insert","lines":["''"],"id":19}],[{"start":{"row":39,"column":5},"end":{"row":39,"column":6},"action":"insert","lines":["b"],"id":20},{"start":{"row":39,"column":6},"end":{"row":39,"column":7},"action":"insert","lines":["l"]},{"start":{"row":39,"column":7},"end":{"row":39,"column":8},"action":"insert","lines":["o"]},{"start":{"row":39,"column":8},"end":{"row":39,"column":9},"action":"insert","lines":["g"]},{"start":{"row":39,"column":9},"end":{"row":39,"column":10},"action":"insert","lines":["."]},{"start":{"row":39,"column":10},"end":{"row":39,"column":11},"action":"insert","lines":["a"]},{"start":{"row":39,"column":11},"end":{"row":39,"column":12},"action":"insert","lines":["p"]}],[{"start":{"row":39,"column":12},"end":{"row":39,"column":13},"action":"insert","lines":["p"],"id":21},{"start":{"row":39,"column":13},"end":{"row":39,"column":14},"action":"insert","lines":["s"]},{"start":{"row":39,"column":14},"end":{"row":39,"column":15},"action":"insert","lines":["."]}],[{"start":{"row":39,"column":15},"end":{"row":39,"column":16},"action":"insert","lines":["B"],"id":22},{"start":{"row":39,"column":16},"end":{"row":39,"column":17},"action":"insert","lines":["l"]},{"start":{"row":39,"column":17},"end":{"row":39,"column":18},"action":"insert","lines":["o"]},{"start":{"row":39,"column":18},"end":{"row":39,"column":19},"action":"insert","lines":["g"]}],[{"start":{"row":39,"column":19},"end":{"row":39,"column":20},"action":"insert","lines":["C"],"id":23},{"start":{"row":39,"column":20},"end":{"row":39,"column":21},"action":"insert","lines":["o"]},{"start":{"row":39,"column":21},"end":{"row":39,"column":22},"action":"insert","lines":["n"]},{"start":{"row":39,"column":22},"end":{"row":39,"column":23},"action":"insert","lines":["f"]},{"start":{"row":39,"column":23},"end":{"row":39,"column":24},"action":"insert","lines":["i"]},{"start":{"row":39,"column":24},"end":{"row":39,"column":25},"action":"insert","lines":["g"]}],[{"start":{"row":39,"column":26},"end":{"row":39,"column":27},"action":"insert","lines":[","],"id":24}],[{"start":{"row":27,"column":47},"end":{"row":27,"column":69},"action":"remove","lines":[" '.pythonanywhere.com'"],"id":25}],[{"start":{"row":121,"column":14},"end":{"row":121,"column":46},"action":"remove","lines":["os.path.join(BASE_DIR, 'static')"],"id":26},{"start":{"row":121,"column":14},"end":{"row":121,"column":15},"action":"insert","lines":["'"]}],[{"start":{"row":121,"column":15},"end":{"row":121,"column":17},"action":"insert","lines":["''"],"id":27}],[{"start":{"row":121,"column":15},"end":{"row":121,"column":17},"action":"remove","lines":["''"],"id":28}],[{"start":{"row":121,"column":14},"end":{"row":121,"column":15},"action":"remove","lines":["'"],"id":29}],[{"start":{"row":121,"column":14},"end":{"row":121,"column":16},"action":"insert","lines":["''"],"id":30}],[{"start":{"row":121,"column":15},"end":{"row":121,"column":16},"action":"remove","lines":["'"],"id":31},{"start":{"row":121,"column":14},"end":{"row":121,"column":15},"action":"remove","lines":["'"]}],[{"start":{"row":121,"column":14},"end":{"row":121,"column":47},"action":"insert","lines":["os.path.join(BASE_DIR, \"static/\")"],"id":32}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"insert","lines":["''"],"id":33}],[{"start":{"row":27,"column":31},"end":{"row":27,"column":33},"action":"remove","lines":["''"],"id":34},{"start":{"row":27,"column":30},"end":{"row":27,"column":31},"action":"remove","lines":["'"]}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"insert","lines":["''"],"id":35}],[{"start":{"row":27,"column":31},"end":{"row":27,"column":32},"action":"insert","lines":[","],"id":36}],[{"start":{"row":27,"column":31},"end":{"row":27,"column":32},"action":"remove","lines":[","],"id":37}],[{"start":{"row":27,"column":31},"end":{"row":27,"column":32},"action":"remove","lines":["'"],"id":38},{"start":{"row":27,"column":30},"end":{"row":27,"column":31},"action":"remove","lines":["'"]}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"insert","lines":["''"],"id":39}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"remove","lines":["''"],"id":40}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"insert","lines":["''"],"id":41}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"insert","lines":["''"],"id":42}],[{"start":{"row":27,"column":32},"end":{"row":27,"column":34},"action":"remove","lines":["''"],"id":43}],[{"start":{"row":27,"column":32},"end":{"row":27,"column":33},"action":"insert","lines":[" "],"id":44}],[{"start":{"row":27,"column":33},"end":{"row":27,"column":35},"action":"insert","lines":["''"],"id":45}],[{"start":{"row":27,"column":33},"end":{"row":27,"column":35},"action":"remove","lines":["''"],"id":46}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":33},"action":"remove","lines":["'' "],"id":47}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":32},"action":"insert","lines":["''"],"id":48}],[{"start":{"row":27,"column":32},"end":{"row":27,"column":33},"action":"insert","lines":[" "],"id":49}],[{"start":{"row":27,"column":31},"end":{"row":27,"column":32},"action":"remove","lines":["'"],"id":50}],[{"start":{"row":27,"column":30},"end":{"row":27,"column":51},"action":"insert","lines":["'.pythonanywhere.com'"],"id":51}],[{"start":{"row":27,"column":51},"end":{"row":27,"column":52},"action":"insert","lines":[","],"id":52}],[{"start":{"row":27,"column":52},"end":{"row":27,"column":53},"action":"insert","lines":[" "],"id":53}],[{"start":{"row":27,"column":54},"end":{"row":27,"column":55},"action":"remove","lines":[" "],"id":54}],[{"start":{"row":27,"column":69},"end":{"row":27,"column":70},"action":"remove","lines":[","],"id":55}]]},"ace":{"folds":[],"scrolltop":1421.5,"scrollleft":0,"selection":{"start":{"row":121,"column":47},"end":{"row":121,"column":47},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1624160953179,"hash":"4d0060b102ea75450e9a622253c7edd2a29aa301"} |
5,729 | 0ddac0aac5bd001504ed37d31b74c6442304e350 | # coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
OpenAPI spec version: 1.0.0-pre.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'token_expiration': 'ConfigNodePropertyString',
'token_length': 'ConfigNodePropertyString',
'token_refresh': 'ConfigNodePropertyBoolean',
'token_cleanup_threshold': 'ConfigNodePropertyInteger',
'password_hash_algorithm': 'ConfigNodePropertyString',
'password_hash_iterations': 'ConfigNodePropertyInteger',
'password_salt_size': 'ConfigNodePropertyInteger'
}
attribute_map = {
'token_expiration': 'tokenExpiration',
'token_length': 'tokenLength',
'token_refresh': 'tokenRefresh',
'token_cleanup_threshold': 'tokenCleanupThreshold',
'password_hash_algorithm': 'passwordHashAlgorithm',
'password_hash_iterations': 'passwordHashIterations',
'password_salt_size': 'passwordSaltSize'
}
def __init__(self, token_expiration=None, token_length=None, token_refresh=None, token_cleanup_threshold=None, password_hash_algorithm=None, password_hash_iterations=None, password_salt_size=None): # noqa: E501
"""OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI""" # noqa: E501
self._token_expiration = None
self._token_length = None
self._token_refresh = None
self._token_cleanup_threshold = None
self._password_hash_algorithm = None
self._password_hash_iterations = None
self._password_salt_size = None
self.discriminator = None
if token_expiration is not None:
self.token_expiration = token_expiration
if token_length is not None:
self.token_length = token_length
if token_refresh is not None:
self.token_refresh = token_refresh
if token_cleanup_threshold is not None:
self.token_cleanup_threshold = token_cleanup_threshold
if password_hash_algorithm is not None:
self.password_hash_algorithm = password_hash_algorithm
if password_hash_iterations is not None:
self.password_hash_iterations = password_hash_iterations
if password_salt_size is not None:
self.password_salt_size = password_salt_size
@property
def token_expiration(self):
"""Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._token_expiration
@token_expiration.setter
def token_expiration(self, token_expiration):
"""Sets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_expiration: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._token_expiration = token_expiration
@property
def token_length(self):
"""Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._token_length
@token_length.setter
def token_length(self, token_length):
"""Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._token_length = token_length
@property
def token_refresh(self):
"""Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyBoolean
"""
return self._token_refresh
@token_refresh.setter
def token_refresh(self, token_refresh):
"""Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyBoolean
"""
self._token_refresh = token_refresh
@property
def token_cleanup_threshold(self):
"""Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._token_cleanup_threshold
@token_cleanup_threshold.setter
def token_cleanup_threshold(self, token_cleanup_threshold):
"""Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._token_cleanup_threshold = token_cleanup_threshold
@property
def password_hash_algorithm(self):
"""Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._password_hash_algorithm
@password_hash_algorithm.setter
def password_hash_algorithm(self, password_hash_algorithm):
"""Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._password_hash_algorithm = password_hash_algorithm
@property
def password_hash_iterations(self):
"""Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._password_hash_iterations
@password_hash_iterations.setter
def password_hash_iterations(self, password_hash_iterations):
"""Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._password_hash_iterations = password_hash_iterations
@property
def password_salt_size(self):
"""Gets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._password_salt_size
@password_salt_size.setter
def password_salt_size(self, password_salt_size):
"""Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._password_salt_size = password_salt_size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
5,730 | 58c7e81d1b3cf1cff7d91bf40641e5a03b9f19ac | import argparse
import json
import os
import warnings
import numpy as np
import pandas as pd
import src.data_loaders as module_data
import torch
from sklearn.metrics import roc_auc_score
from src.data_loaders import JigsawDataBias, JigsawDataMultilingual, JigsawDataOriginal
from torch.utils.data import DataLoader
from tqdm import tqdm
from train import ToxicClassifier
def test_classifier(config, dataset, checkpoint_path, device="cuda:0"):
model = ToxicClassifier(config)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint["state_dict"])
model.eval()
model.to(device)
def get_instance(module, name, config, *args, **kwargs):
return getattr(module, config[name]["type"])(*args, **config[name]["args"], **kwargs)
config["dataset"]["args"]["test_csv_file"] = dataset
test_dataset = get_instance(module_data, "dataset", config, train=False)
test_data_loader = DataLoader(
test_dataset,
batch_size=int(config["batch_size"]),
num_workers=20,
shuffle=False,
)
scores = []
targets = []
ids = []
for *items, meta in tqdm(test_data_loader):
if "multi_target" in meta:
targets += meta["multi_target"]
else:
targets += meta["target"]
ids += meta["text_id"]
with torch.no_grad():
out = model.forward(*items)
# TODO: save embeddings
sm = torch.sigmoid(out).cpu().detach().numpy()
scores.extend(sm)
binary_scores = [s >= 0.5 for s in scores]
binary_scores = np.stack(binary_scores)
scores = np.stack(scores)
targets = np.stack(targets)
auc_scores = []
for class_idx in range(scores.shape[1]):
mask = targets[:, class_idx] != -1
target_binary = targets[mask, class_idx]
class_scores = scores[mask, class_idx]
try:
auc = roc_auc_score(target_binary, class_scores)
auc_scores.append(auc)
except Exception:
warnings.warn(
"Only one class present in y_true. ROC AUC score is not defined in that case. Set to nan for now."
)
auc_scores.append(np.nan)
mean_auc = np.mean(auc_scores)
results = {
"scores": scores.tolist(),
"targets": targets.tolist(),
"auc_scores": auc_scores,
"mean_auc": mean_auc,
"ids": [i.tolist() for i in ids],
}
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyTorch Template")
parser.add_argument(
"-c",
"--config",
default=None,
type=str,
help="config file path (default: None)",
)
parser.add_argument(
"-ckpt",
"--checkpoint",
type=str,
help="path to a saved checkpoint",
)
parser.add_argument(
"-d",
"--device",
default="cuda:0",
type=str,
help="device name e.g., 'cpu' or 'cuda' (default cuda:0)",
)
parser.add_argument(
"-t",
"--test_csv",
default=None,
type=str,
help="path to test dataset",
)
args = parser.parse_args()
config = json.load(open(args.config))
if args.device is not None:
config["gpus"] = args.device
results = test_classifier(config, args.test_csv, args.checkpoint, args.device)
test_set_name = args.test_csv.split("/")[-1:][0]
with open(args.checkpoint[:-4] + f"results_{test_set_name}.json", "w") as f:
json.dump(results, f)
|
5,731 | 407f549cf68660c8f8535ae0bed373e2f54af877 | from odoo import models, fields, api, _
import odoo.addons.decimal_precision as dp
class netdespatch_config(models.Model):
_name = 'netdespatch.config'
name = fields.Char(String='Name')
url = fields.Char(string='URL')
# Royal Mail
rm_enable = fields.Boolean('Enable Royal Mail')
domestic_name = fields.Char(string='Username', help="Netdespatch Username")
domestic_pwd = fields.Char(string='Password', size=256, copy=False, help="Netdespatch Password")
domestic_accountid = fields.Char(string='Account ID', size=256, help="Netdespatch Account Number")
category = fields.Selection([('is_domestic', 'Is Domestic'),
('is_international', 'Is International')
], string='Category', default='is_domestic')
in_name = fields.Char(string='Username', help="Netdespatch Username")
in_pwd = fields.Char(string='Password', size=256, copy=False, help="Netdespatch Password")
in_accountid = fields.Char(string='Account ID', size=256, help="Netdespatch Account Number")
#Apc
apc_enable = fields.Boolean('Enable APC')
apc_name = fields.Char(string='Username', help="Netdespatch Username")
apc_pwd = fields.Char(string='Password', size=256, copy=False, help="Netdespatch Password")
apc_accountid = fields.Char(string='Account ID', size=256, help="Netdespatch Account Number")
#ukMail
ukmail_enable = fields.Boolean('Enable UKmail')
ukmail_name = fields.Char(string='Username', help="Netdespatch Username")
ukmail_pwd = fields.Char(string='Password', size=256, copy=False, help="Netdespatch Password")
ukmail_accountid = fields.Char(string='Account ID', size=256, help="Netdespatch Account Number")
#YODEL
yodel_enable = fields.Boolean('Enable Yodel')
yodel_name = fields.Char(string='Username', help="Netdespatch Username")
yodel_pwd = fields.Char(string='Password', size=256, copy=False, help="Netdespatch Password")
yodel_accountid = fields.Char(string='Account ID', size=256, help="Netdespatch Account Number")
|
5,732 | 6d92b944ab8503d3635626c0c23021fc2b40dce3 | import random
def main():
#print('You rolled a die')
return random.randint(1,6)
if __name__== "__main__":
main()
|
5,733 | fbac2d66f4d69a52c3df5d665b622659e4d8dacd | """
All rights reserved to cnvrg.io
http://www.cnvrg.io
cnvrg.io - Projects Example
last update: Nov 07, 2019.
-------------
rnn.py
==============================================================================
"""
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from cnvrg import Experiment
from sklearn.model_selection import train_test_split
def cast_types(args):
# epochs.
args.epochs = int(args.epochs)
# batch_size.
args.batch_size = int(args.batch_size)
# input_shape.
args.input_shape = args.input_shape.split(' ')
for num in args.input_shape:
if num != '':
num = int(num)
args.input_shape = tuple(args.input_shape)
# ----- #
return args
def init_model(input_shape):
model = keras.Sequential()
model.add(keras.layers.Embedding(input_shape[0], 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def main(args):
args = cast_types(args)
df = pd.read_csv(args.data)
X, y = df.iloc[:, :-1], df.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = init_model(X.shape) # <--- Doesn't work with the shape.
train_metrics = model.fit(X_train, y_train, epochs=args.epochs, batch_size=args.batch_size, validation_split=0.2)
test_metrics = model.evaluate(X_test, y_test)
# train_loss = list(np.round(train_metrics.history['loss'], 3))
# train_acc = list(np.round(train_metrics.history['accuracy'], 3))
# val_loss = list(np.round(train_metrics.history['val_loss'], 3))
# val_acc = list(np.round(train_metrics.history['val_accuracy'], 3))
test_loss = float(test_metrics[0])
test_acc = float(test_metrics[1])
exp = Experiment()
exp.log_param("test_loss", test_loss)
exp.log_param("test_acc", test_acc)
model.save("model.h5")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""RNN Classifier""")
parser.add_argument('--data', action='store', dest='data', required=True, help="""String. path to csv file: The data set for the classifier. Assumes the last column includes the labels. """)
parser.add_argument('--project_dir', action='store', dest='project_dir', help="""String.""")
parser.add_argument('--output_dir', action='store', dest='output_dir', help="""String.""")
parser.add_argument('--input_shape', action='store', dest='input_shape', default="10000", help="""The shape of the input. Look like: a b c.""")
parser.add_argument('--epochs', action='store', default="10", dest='epochs', help="Number of epochs when training.")
parser.add_argument('--batch_size', action='store', default="64", dest='batch_size', help="batch size when training.")
args = parser.parse_args()
main(args)
|
5,734 | 892dd4259950c66669b21c5dbc7b738ddb5aa586 |
___ findmissingnumberusingxor(myarray
print "Given Array:", myarray
#print "Len of the Array:", len(myarray)
arraylen _ l..(myarray)
xorval _ 0
#print "In the First loop"
___ i __ r..(1, arraylen + 2
#print xorval,"^",i,"is",xorval^i
xorval _ xorval ^ i
#print "In the Next loop"
___ i __ r..(0, arraylen
#print xorval,"^",myarray[i],"is",xorval^myarray[i]
xorval _ xorval ^ myarray[i]
print "Missing number is ", xorval
myarray _ [12, 8, 2, 1, 4, 6, 5, 7, 9, 3, 11]
findmissingnumberusingxor(myarray)
|
5,735 | 9c9005acb40e4b89ca215345361e21f08f984847 | def h1_wrap(func):
def func_wrapper(param):
return "<h1>"+func(param) + "</h1>"
return func_wrapper
@h1_wrap
def say_hi(name):
return "Hello, " + name.capitalize()
print(say_hi("Stephan"))
|
5,736 | 66c2d73c100f7fc802e66f2762c92664e4b93fcd | from sklearn.model_selection import train_test_split
from azureml.core import Run
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import argparse
import os
import joblib
import numpy as np
# Get the experiment run context
run = Run.get_context()
# Get arguments
parser = argparse.ArgumentParser()
parser.add_argument('--in_n_estimator', type=int, default=8)
parser.add_argument('--in_criterion', type=str, default="gini")
parser.add_argument('--in_max_depth', type=int, default=2)
args = parser.parse_args()
in_n_estimators = args.in_n_estimator
in_criterion = args.in_criterion
in_max_depth = args.in_max_depth
# read prepared data
df = pd.read_csv("prepared_data.csv")
columns = df.iloc[1:2, :-1].columns
x = df[columns]
y = df.iloc[:, -1:]
# split data into train and test
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=2)
# “gini”, “entropy”
model = RandomForestClassifier(n_estimators=in_n_estimators, criterion=in_criterion, max_depth=in_max_depth)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
run.log("Accuracy", float(accuracy))
os.makedirs('outputs', exist_ok=True)
joblib.dump(model, 'outputs/model_forest.joblib')
|
5,737 | 21b9844fce10d16a14050a782ce7e15e3f6fb657 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Country, TouristPlaces, Users
# Create database and create a shortcut for easier to update database
engine = create_engine('sqlite:///country_catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Creating an user
user_1 = Users(name="admin", email="admin@admin.com")
session.add(user_1)
session.commit()
# India
country_1 = Country(user_id=1, name="India")
session.add(country_1)
session.commit()
# Australia
country_2 = Country(user_id=1, name="Australia")
session.add(country_2)
session.commit()
# England
country_3 = Country(user_id=1, name="England")
session.add(country_3)
session.commit()
# Paris
country_4 = Country(user_id=1, name="Paris")
session.add(country_4)
session.commit()
# USA
country_5 = Country(user_id=1, name="USA")
session.add(country_5)
session.commit()
# Mexico
country_6 = Country(user_id=1, name="Mexico")
session.add(country_6)
session.commit()
# SriLanka
country_7 = Country(user_id=1, name="Srilanka")
session.add(country_7)
session.commit()
# MAldives
country_8 = Country(user_id=1, name="Maldives")
session.add(country_8)
session.commit()
# Adding touristAttractions to Countries
places = TouristPlaces(user_id=1, name="Taj Mahal",
description="Taj Mahal is mausolem by Mughal ruler Shah Jahan for his Wife Mumtaz Mahal "
"It is bultby using white marbel",
country=country_1)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Red Fort",
description="Red fort is the histroric fort in the city of Delhi,India."
"It is the main residence of the emperors of mughal Dynasty.",
country=country_1)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Canberra",
description="It is the home for National GAllery of Australia"
"and a wide varierty of cultural and historic sites",
country=country_2)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Perth",
description="The west side ofAustralia is home to the city of Perth"
"It is bordered by Indian Ocean",
country=country_2)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Tower Of London",
description="It is one of the world Heritage site"
"Other highlights are Crown Jewels Exhibition",
country=country_3)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="British Museum",
description="It contains the collection of worlds finest antiquites"
"The famous artifacts are Eglin marbles",
country=country_3)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Eiffel Tower",
description="The Eiffel-tower is wrought iron lattice"
"It is named after the Engineer Gustav Eiffel",
country=country_4)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="places of Versallies",
description="The Palce of Versallies is the Principle Royal"
"residence.",
country=country_4)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Grand Canyon Village",
description="Grand Canyon is located in south Rim of Grand Canyon"
"It is focussed on accomadating tourists visiting Grand Canyon",
country=country_5)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Statue if Liberty",
description="Statue of Liberty is Colossal neo-classical sculpture"
"In New-york Hourbor Newyork",
country=country_5)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Mexico City",
description="Mexico city is densely populated and high altitude capital Of Mexico"
"It is the home for zoo,Muesuem of modern Art.",
country=country_6)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Tulum",
description="Tulum is a town in the Carribean coatline of Mexico"
"It is well-known for beaches and ruins of Ancient Mayan port city",
country=country_6)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Colombo",
description="It is the Capital city of Srilanka"
"It sheritage is reflected in its Architecture",
country=country_7)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Kandy",
description="Kandy is the largest city of central Sri Lanka."
"It is surrounded by mountains which is home to tea Plantations.",
country=country_7)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Male",
description="It is among the tooped tourist Attractions of Maldives"
"It has considerably moderate tempaerature through out the year",
country=country_8)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Sun Island",
description="It is adorned with some sparkling beaches"
"beuatigul flowers and lavish greenary that pulls a great number of tourists",
country=country_8)
session.add(places)
session.commit()
print("added countries and Tourist Places added")
|
5,738 | 2506c5b042f04d1490ba2199a71e38829d4a0adc | from dataclasses import dataclass
from typing import Optional
@dataclass
class Music(object):
url: str
title: Optional[str] = None
|
5,739 | a61132d2d504ed31d4e1e7889bde670853968559 | #!/usr/bin/env python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft, Intel Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import abc
import itertools
import os
import uuid
from enum import Enum
from pathlib import Path
from typing import Dict, Optional, Sequence, Tuple, Union
import numpy as np
import onnx
from onnx import ModelProto, TensorProto, helper, numpy_helper
import onnxruntime
from .quant_utils import apply_plot, load_model_with_shape_infer, smooth_distribution
class TensorData:
_allowed = frozenset(["avg", "std", "lowest", "highest", "hist", "hist_edges"])
def __init__(self, **kwargs):
for k, v in kwargs.items():
if k not in TensorData._allowed:
raise ValueError(f"Unexpected value {k!r} not in {TensorData._allowed}.")
setattr(self, k, v)
@property
def range_value(self):
if not hasattr(self, "lowest") or not hasattr(self, "highest"):
raise AttributeError(f"Attributes 'lowest' and/or 'highest' missing in {dir(self)}.")
return (self.lowest, self.highest)
@property
def avg_std(self):
if not hasattr(self, "avg") or not hasattr(self, "std"):
raise AttributeError(f"Attributes 'avg' and/or 'std' missing in {dir(self)}.")
return (self.avg, self.std)
class TensorsData:
def __init__(self, calibration_method, data: Dict[str, Union[TensorData, Tuple]]):
self.calibration_method = calibration_method
self.data = {}
for k, v in data.items():
if not isinstance(k, str):
raise TypeError(f"Keys must be strings not {type(k)}.")
if isinstance(v, tuple):
if calibration_method == CalibrationMethod.MinMax and len(v) == 2:
self.data[k] = TensorData(lowest=v[0], highest=v[1])
continue
if len(v) == 4:
self.data[k] = TensorData(lowest=v[0], highest=v[1], histogram=v[2], bins=v[3])
continue
raise TypeError(f"Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.")
if not isinstance(v, TensorData):
raise TypeError(f"Values must be TensorData not {type(v)}.")
self.data[k] = v
def __iter__(self):
yield from self.data
def __contains__(self, key):
return key in self.data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
if key not in self.data:
raise RuntimeError(f"Only an existing tensor can be modified, {key!r} is not.")
self.data[key] = value
def values(self):
return self.data.values()
class CalibrationMethod(Enum):
MinMax = 0
Entropy = 1
Percentile = 2
Distribution = 3
class CalibrationDataReader(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, "get_next") and callable(subclass.get_next) or NotImplemented
@abc.abstractmethod
def get_next(self) -> dict:
"""generate the input data dict for ONNXinferenceSession run"""
raise NotImplementedError
def __iter__(self):
return self
def __next__(self):
result = self.get_next()
if result is None:
raise StopIteration
return result
class CalibraterBase:
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
symmetric=False,
use_external_data_format=False,
):
"""
:param model_path: ONNX model to calibrate. It should be a model file path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
"""
if isinstance(model_path, str):
self.model = load_model_with_shape_infer(Path(model_path))
elif isinstance(model_path, Path):
self.model = load_model_with_shape_infer(model_path)
else:
raise ValueError("model_path should be model path.")
self.op_types_to_calibrate = op_types_to_calibrate
self.augmented_model_path = augmented_model_path
self.symmetric = symmetric
self.use_external_data_format = use_external_data_format
self.augment_model = None
self.infer_session = None
self.execution_providers = ["CPUExecutionProvider"]
def set_execution_providers(self, execution_providers=["CPUExecutionProvider"]): # noqa: B006
"""
reset the execution providers to execute the collect_data. It triggers to re-creating inference session.
"""
self.execution_providers = execution_providers
self.create_inference_session()
def create_inference_session(self):
"""
create an OnnxRuntime InferenceSession.
"""
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
self.infer_session = onnxruntime.InferenceSession(
self.augmented_model_path,
sess_options=sess_options,
providers=self.execution_providers,
)
def select_tensors_to_calibrate(self, model: ModelProto):
"""
select input/output tensors of candidate nodes to calibrate.
returns:
tensors (set): set of tensor name.
value_infos (dict): tensor name to value info.
"""
value_infos = {vi.name: vi for vi in model.graph.value_info}
value_infos.update({ot.name: ot for ot in model.graph.output})
value_infos.update({it.name: it for it in model.graph.input})
initializer = {init.name for init in model.graph.initializer}
tensors_to_calibrate = set()
tensor_type_to_calibrate = {TensorProto.FLOAT}
for node in model.graph.node:
if not self.op_types_to_calibrate or node.op_type in self.op_types_to_calibrate:
for tensor_name in itertools.chain(node.input, node.output):
if tensor_name in value_infos:
vi = value_infos[tensor_name]
if (
vi.type.HasField("tensor_type")
and (vi.type.tensor_type.elem_type in tensor_type_to_calibrate)
and (tensor_name not in initializer)
):
tensors_to_calibrate.add(tensor_name)
return tensors_to_calibrate, value_infos
def get_augment_model(self):
"""
return: augmented onnx model. Call after calling augment_graph
"""
return self.model
def augment_graph(self):
"""
abstract method: augment the input model to prepare for collecting data. It will:
1. augment the model to be able to collect desired statistics data
2. save augmented model to augmented_model_paths
"""
raise NotImplementedError
def collect_data(self, data_reader: CalibrationDataReader):
"""
abstract method: collect the tensors that will be used for range computation. It can be called multiple times.
"""
raise NotImplementedError
def compute_data(self) -> TensorsData:
"""
abstract method: compute data based on the calibration method stored in TensorsData
"""
raise NotImplementedError
class MinMaxCalibrater(CalibraterBase):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
symmetric=False,
use_external_data_format=False,
moving_average=False,
averaging_constant=0.01,
):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.
:param averaging_constant: constant smoothing factor to use when computing the moving average.
"""
super().__init__(
model_path,
op_types_to_calibrate=op_types_to_calibrate,
augmented_model_path=augmented_model_path,
symmetric=symmetric,
use_external_data_format=use_external_data_format,
)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model.graph.output}
self.moving_average = moving_average
if moving_average and (averaging_constant < 0 or averaging_constant > 1):
raise ValueError("Invalid averaging constant, which should not be < 0 or > 1.")
self.averaging_constant = averaging_constant
def augment_graph(self):
"""
Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in
model and ensures their outputs are stored as part of the graph output
:return: augmented ONNX model
"""
tensors, _ = self.select_tensors_to_calibrate(self.model)
reshape_shape_name = str(uuid.uuid4())
reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.int64), reshape_shape_name)
self.model.graph.initializer.append(reshape_shape)
def add_reduce_min_max(tensor_name, reduce_op_name):
# When doing ReduceMax/ReduceMin, ORT can't reduce on dim with value of 0 if 'keepdims' is false.
# To make the code simple, we always let keepdims to be 1.
keepdims = 1
# Adding ReduceMin/ReduceMax nodes: ReduceMin/ReduceMax -> Reshape-> (output)
reduce_output = tensor_name + "_" + reduce_op_name
intermediate_output = reduce_output + "_Reshape"
reduce_node = onnx.helper.make_node(
reduce_op_name, [tensor_name], [intermediate_output], keepdims=keepdims, name=reduce_output
)
reshape_node = onnx.helper.make_node(
"Reshape",
inputs=[intermediate_output, reshape_shape_name],
outputs=[reduce_output],
name=intermediate_output,
)
self.model.graph.node.extend([reduce_node, reshape_node])
self.model.graph.output.append(helper.make_tensor_value_info(reduce_output, TensorProto.FLOAT, [1]))
for tensor in tensors:
add_reduce_min_max(tensor, "ReduceMin")
add_reduce_min_max(tensor, "ReduceMax")
onnx.save(
self.model,
self.augmented_model_path,
save_as_external_data=self.use_external_data_format,
)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None, inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError("No data is collected.")
t = self.compute_data()
if not isinstance(t, TensorsData):
raise TypeError(f"compute_data must return a TensorsData not {type(t)}.")
self.clear_collected_data()
def merge_range(self, old_range, new_range):
if not old_range:
return new_range
for key, value in old_range.items():
if self.moving_average:
min_value = value[0] + self.averaging_constant * (new_range[key][0] - value[0])
max_value = value[1] + self.averaging_constant * (new_range[key][1] - value[1])
else:
min_value = min(value[0], new_range[key][0])
max_value = max(value[1], new_range[key][1])
new_range[key] = (min_value, max_value)
return new_range
def compute_data(self) -> TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }
"""
if len(self.intermediate_outputs) == 0:
return self.calibrate_tensors_range
output_names = [self.infer_session.get_outputs()[i].name for i in range(len(self.intermediate_outputs[0]))]
output_dicts_list = [
dict(zip(output_names, intermediate_output)) for intermediate_output in self.intermediate_outputs
]
merged_output_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_output_dict.setdefault(k, []).append(v)
added_output_names = output_names[self.num_model_outputs :]
calibrate_tensor_names = [
added_output_names[i].rpartition("_")[0] for i in range(0, len(added_output_names), 2)
] # output names
merged_added_output_dict = {
i: merged_output_dict[i] for i in merged_output_dict if i not in self.model_original_outputs
}
pairs = []
for i in range(0, len(added_output_names), 2):
min_value = 0
max_value = 0
if self.moving_average:
min_value_array = np.mean(merged_added_output_dict[added_output_names[i]], axis=0)
max_value_array = np.mean(merged_added_output_dict[added_output_names[i + 1]], axis=0)
else:
min_value_array = min(merged_added_output_dict[added_output_names[i]])
max_value_array = max(merged_added_output_dict[added_output_names[i + 1]])
if type(min_value_array) == int or min_value_array.size > 0:
min_value = float(min_value_array)
if type(max_value_array) == int or max_value_array.size > 0:
max_value = float(max_value_array)
if self.symmetric:
max_absolute_value = max(abs(min_value), abs(max_value))
pairs.append(tuple([-max_absolute_value, max_absolute_value]))
else:
pairs.append(tuple([min_value, max_value]))
new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax, dict(zip(calibrate_tensor_names, pairs)))
if self.calibrate_tensors_range:
self.calibrate_tensors_range = self.merge_range(self.calibrate_tensors_range, new_calibrate_tensors_range)
else:
self.calibrate_tensors_range = new_calibrate_tensors_range
return self.calibrate_tensors_range
class HistogramCalibrater(CalibraterBase):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
use_external_data_format=False,
method="percentile",
symmetric=False,
num_bins=128,
num_quantized_bins=2048,
percentile=99.999,
scenario="same",
):
"""
:param model_path: ONNX model to calibrate. It is a model path.
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
:param scenario: see :class:`DistributionCalibrater`
"""
super().__init__(
model_path,
op_types_to_calibrate=op_types_to_calibrate,
augmented_model_path=augmented_model_path,
symmetric=symmetric,
use_external_data_format=use_external_data_format,
)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model.graph.output}
self.collector = None
self.method = method
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.tensors_to_calibrate = None
self.scenario = scenario
def augment_graph(self):
"""
make all quantization_candidates op type nodes as part of the graph output.
:return: augmented ONNX model
"""
self.tensors_to_calibrate, value_infos = self.select_tensors_to_calibrate(self.model)
for tensor in self.tensors_to_calibrate:
if tensor not in self.model_original_outputs:
self.model.graph.output.append(value_infos[tensor])
onnx.save(
self.model,
self.augmented_model_path,
save_as_external_data=self.use_external_data_format,
)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
"""
Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.
"""
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None, inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError("No data is collected.")
output_names = [self.infer_session.get_outputs()[i].name for i in range(len(self.intermediate_outputs[0]))]
output_dicts_list = [
dict(zip(output_names, intermediate_output)) for intermediate_output in self.intermediate_outputs
]
merged_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_dict.setdefault(k, []).append(v)
clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in self.tensors_to_calibrate}
if not self.collector:
self.collector = HistogramCollector(
method=self.method,
symmetric=self.symmetric,
num_bins=self.num_bins,
num_quantized_bins=self.num_quantized_bins,
percentile=self.percentile,
scenario=self.scenario,
)
self.collector.collect(clean_merged_dict)
self.clear_collected_data()
def compute_data(self) -> TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {tensor name: (min value, max value)}
"""
if not self.collector:
raise ValueError("No collector created and can't generate calibration data.")
if isinstance(self, EntropyCalibrater):
cal = CalibrationMethod.Entropy
elif isinstance(self, PercentileCalibrater):
cal = CalibrationMethod.Percentile
elif isinstance(self, DistributionCalibrater):
cal = CalibrationMethod.Distribution
else:
raise TypeError(f"Unknown calibrater {type(self)}. This method must be overwritten.")
return TensorsData(cal, self.collector.compute_collection_result())
class EntropyCalibrater(HistogramCalibrater):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
use_external_data_format=False,
method="entropy",
symmetric=False,
num_bins=128,
num_quantized_bins=128,
):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
"""
super().__init__(
model_path,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format,
method=method,
symmetric=symmetric,
num_bins=num_bins,
num_quantized_bins=num_quantized_bins,
)
class PercentileCalibrater(HistogramCalibrater):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
use_external_data_format=False,
method="percentile",
symmetric=False,
num_bins=2048,
percentile=99.999,
):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
"""
super().__init__(
model_path,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format,
method=method,
symmetric=symmetric,
num_bins=num_bins,
percentile=percentile,
)
class DistributionCalibrater(HistogramCalibrater):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
use_external_data_format=False,
method="distribution",
num_bins=128,
scenario="same",
):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param scenario: for float 8 only, if `scenario="same"`,
the algorithm weights and float 8 follow the same distribution,
if `scenario="p3"`, it assumes the weights follow
a gaussian law and float 8 ~ X^3 where X is a gaussian law
"""
super().__init__(
model_path,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format,
method=method,
num_bins=num_bins,
scenario=scenario,
)
class CalibrationDataCollector(metaclass=abc.ABCMeta):
"""
Base class for collecting data for calibration-based quantization.
"""
@abc.abstractmethod
def collect(self, name_to_arr):
"""
Generate informative data based on given data.
name_to_arr : dict
tensor name to NDArray data
"""
raise NotImplementedError
@abc.abstractmethod
def compute_collection_result(self):
"""
Get the optimal result among collection data.
"""
raise NotImplementedError
class HistogramCollector(CalibrationDataCollector):
"""
Collecting histogram for each tensor. Percentile and Entropy method are supported.
ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py
ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/
pytorch_quantization/calib/histogram.html
"""
def __init__(self, method, symmetric, num_bins, num_quantized_bins, percentile, scenario):
self.histogram_dict = {}
self.method = method
self.symmetric = symmetric
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.scenario = scenario
def get_histogram_dict(self):
return self.histogram_dict
def collect(self, name_to_arr):
print("Collecting tensor data and making histogram ...")
# TODO: Currently we have different collect() for entropy and percentile method respectively.
# Need unified collect in the future.
if self.method in {"distribution", "entropy"}:
return self.collect_value(name_to_arr)
elif self.method == "percentile":
if self.symmetric:
return self.collect_absolute_value(name_to_arr)
else:
return self.collect_value(name_to_arr)
else:
raise ValueError("Only 'entropy', 'percentile' or 'distribution' methods are supported")
def collect_absolute_value(self, name_to_arr):
"""
Collect histogram on absolute value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr) # noqa: PLW2901
data_arr = data_arr.flatten() # noqa: PLW2901
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
data_arr = np.absolute(data_arr) # only consider absolute value # noqa: PLW2901
if tensor not in self.histogram_dict:
# first time it uses num_bins to compute histogram.
hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)
self.histogram_dict[tensor] = (hist, hist_edges, min_value, max_value)
else:
old_histogram = self.histogram_dict[tensor]
old_min = old_histogram[2]
old_max = old_histogram[3]
old_hist = old_histogram[0]
old_hist_edges = old_histogram[1]
temp_amax = np.max(data_arr)
if temp_amax > old_hist_edges[-1]:
# increase the number of bins
width = old_hist_edges[1] - old_hist_edges[0]
# NOTE: np.arange may create an extra bin after the one containing temp_amax
new_bin_edges = np.arange(old_hist_edges[-1] + width, temp_amax + width, width)
old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))
hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)
hist[: len(old_hist)] += old_hist
self.histogram_dict[tensor] = (hist, hist_edges, min(old_min, min_value), max(old_max, max_value))
def collect_value(self, name_to_arr):
"""
Collect histogram on real value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr) # noqa: PLW2901
data_arr = data_arr.flatten() # noqa: PLW2901
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
threshold = max(abs(min_value), abs(max_value))
if tensor in self.histogram_dict:
old_histogram = self.histogram_dict[tensor]
self.histogram_dict[tensor] = self.merge_histogram(
old_histogram, data_arr, min_value, max_value, threshold
)
else:
hist, hist_edges = np.histogram(data_arr, self.num_bins, range=(-threshold, threshold))
self.histogram_dict[tensor] = (
hist,
hist_edges,
min_value,
max_value,
threshold,
)
def merge_histogram(self, old_histogram, data_arr, new_min, new_max, new_threshold):
(old_hist, old_hist_edges, old_min, old_max, old_threshold) = old_histogram
if new_threshold <= old_threshold:
new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-old_threshold, old_threshold))
return (
new_hist + old_hist,
old_hist_edges,
min(old_min, new_min),
max(old_max, new_max),
old_threshold,
)
else:
if old_threshold == 0:
hist, hist_edges = np.histogram(data_arr, len(old_hist), range=(-new_threshold, new_threshold))
hist += old_hist
else:
old_num_bins = len(old_hist)
old_stride = 2 * old_threshold / old_num_bins
half_increased_bins = int((new_threshold - old_threshold) // old_stride + 1)
new_num_bins = old_num_bins + 2 * half_increased_bins
new_threshold = half_increased_bins * old_stride + old_threshold
hist, hist_edges = np.histogram(data_arr, new_num_bins, range=(-new_threshold, new_threshold))
hist[half_increased_bins : new_num_bins - half_increased_bins] += old_hist
return (
hist,
hist_edges,
min(old_min, new_min),
max(old_max, new_max),
new_threshold,
)
def compute_collection_result(self):
if not self.histogram_dict or len(self.histogram_dict) == 0:
raise ValueError("Histogram has not been collected. Please run collect() first.")
print(f"Finding optimal threshold for each tensor using {self.method} algorithm ...")
if self.method == "entropy":
return self.compute_entropy()
elif self.method == "percentile":
return self.compute_percentile()
elif self.method == "distribution":
return self.compute_distribution()
else:
raise ValueError("Only 'entropy', 'percentile' or 'distribution' methods are supported")
def compute_percentile(self):
if self.percentile < 0 or self.percentile > 100:
raise ValueError("Invalid percentile. Must be in range 0 <= percentile <= 100.")
histogram_dict = self.histogram_dict
percentile = self.percentile
thresholds_dict = {} # per tensor thresholds
print(f"Number of tensors : {len(histogram_dict)}")
print(f"Number of histogram bins : {self.num_bins}")
print(f"Percentile : ({100.0 - percentile},{percentile})")
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
total = hist.sum()
cdf = np.cumsum(hist / total)
if self.symmetric:
idx_right = np.searchsorted(cdf, percentile / 100.0)
thresholds_dict[tensor] = (
-float(hist_edges[idx_right]),
float(hist_edges[idx_right]),
)
else:
percent_to_cut_one_side = (100.0 - percentile) / 200.0
idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)
idx_left = np.searchsorted(cdf, percent_to_cut_one_side)
thresholds_dict[tensor] = (
float(hist_edges[idx_left]),
float(hist_edges[idx_right]),
)
min_value = histogram[2]
max_value = histogram[3]
if thresholds_dict[tensor][0] < min_value:
thresholds_dict[tensor] = (min_value, thresholds_dict[tensor][1])
if thresholds_dict[tensor][1] > max_value:
thresholds_dict[tensor] = (thresholds_dict[tensor][0], max_value)
thresholds_dict[tensor] = (*thresholds_dict[tensor], *hist[:2])
# Plot histogram for debug only
if os.environ.get("QUANTIZATION_DEBUG", 0) in (1, "1"):
apply_plot(hist, hist_edges)
return thresholds_dict
def compute_entropy(self):
histogram_dict = self.histogram_dict
num_quantized_bins = self.num_quantized_bins
thresholds_dict = {} # per tensor thresholds
print(f"Number of tensors : {len(histogram_dict)}")
print(
"Number of histogram bins : {} (The number may increase depends on the data it collects)".format(
self.num_bins
)
)
print(f"Number of quantized bins : {self.num_quantized_bins}")
for tensor, histogram in histogram_dict.items():
optimal_threshold = self.get_entropy_threshold(histogram, num_quantized_bins)
thresholds_dict[tensor] = optimal_threshold
thresholds_dict[tensor] = (*optimal_threshold, *histogram[:2])
# Plot histogram for debug only
if os.environ.get("QUANTIZATION_DEBUG", 0) in (1, "1"):
apply_plot(histogram[0], histogram[1])
return thresholds_dict
@staticmethod
def _avg_std(hist, hist_edges, power=1):
if power <= 0:
raise ValueError(f"power={power} <= 0 is invalid.")
values = (hist_edges[:-1] + hist_edges[1:]) * 0.5
if power == 1:
avg = (hist * values).sum() / hist.sum()
std = ((hist * values**2).sum() / hist.sum() - avg**2) ** 0.5
return avg, std
if int(power) == power and int(power) % 2 == 1:
avg = (hist * values**power).sum() / hist.sum()
std = ((hist * (values**power - avg) ** 2).sum() / hist.sum()) ** 0.5
return avg, std
fact = np.abs(values) / values
fact[np.isnan(fact)] = 1
fact[np.isinf(fact)] = 1
values = np.abs(values) ** power * fact
avg = (hist * values).sum() / hist.sum()
std = ((hist * values**2).sum() / hist.sum() - avg**2) ** 0.5
return avg, std
def compute_distribution(self):
if self.num_bins < 512:
raise ValueError("Invalid num_bins. Must be in range 512 <= num_bins.")
histogram_dict = self.histogram_dict
thresholds_dict = {} # per tensor thresholds
print(f"Number of tensors : {len(histogram_dict)}")
print(f"Number of histogram bins : {self.num_bins}")
print(f"Scenario : {self.scenario!r})")
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
if self.scenario == "same":
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)
elif self.scenario == "p3":
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1.0 / 3.0)
else:
raise ValueError("Invalid scenario. Must be in {'same', 'p3'}.")
thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef, hist=hist, hist_edges=hist_edges)
# Plot histogram for debug only
if os.environ.get("QUANTIZATION_DEBUG", 0) in (1, "1"):
apply_plot(hist, hist_edges)
return thresholds_dict
def get_entropy_threshold(self, histogram, num_quantized_bins):
"""Given a dataset, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
import copy
from scipy.stats import entropy
hist = histogram[0]
hist_edges = histogram[1]
num_bins = hist.size
zero_bin_index = num_bins // 2
num_half_quantized_bin = num_quantized_bins // 2
kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)
thresholds = [(0, 0) for i in range(kl_divergence.size)]
# <------------ num bins ---------------->
# <--- quantized bins ---->
# |======|===========|===========|=======|
# zero bin index
# ^ ^
# | |
# start index end index (start of iteration)
# ^ ^
# | |
# start index end index ...
# ^ ^
# | |
# start index end index (end of iteration)
for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):
start_index = zero_bin_index - i
end_index = zero_bin_index + i + 1 if (zero_bin_index + i + 1) <= num_bins else num_bins
thresholds[i - num_half_quantized_bin] = (
float(hist_edges[start_index]),
float(hist_edges[end_index]),
)
sliced_distribution = copy.deepcopy(hist[start_index:end_index])
# reference distribution p
p = sliced_distribution.copy() # a copy of np array
left_outliers_count = sum(hist[:start_index])
right_outliers_count = sum(hist[end_index:])
p[0] += left_outliers_count
p[-1] += right_outliers_count
# nonzeros[i] incidates whether p[i] is non-zero
nonzeros = (p != 0).astype(np.int64)
# quantize p.size bins into quantized bins (default 128 bins)
quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)
num_merged_bins = sliced_distribution.size // num_quantized_bins
# merge bins into quantized bins
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
quantized_bins[index] = sum(sliced_distribution[start:end])
quantized_bins[-1] += sum(sliced_distribution[num_quantized_bins * num_merged_bins :])
# in order to compare p and q, we need to make length of q equals to length of p
# expand quantized bins into p.size bins
q = np.zeros(p.size, dtype=np.int64)
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
norm = sum(nonzeros[start:end])
if norm != 0:
q[start:end] = float(quantized_bins[index]) / float(norm)
p = smooth_distribution(p)
q = smooth_distribution(q)
if isinstance(q, np.ndarray):
kl_divergence[i - num_half_quantized_bin] = entropy(p, q)
else:
kl_divergence[i - num_half_quantized_bin] = float("inf")
min_kl_divergence_idx = np.argmin(kl_divergence)
optimal_threshold = thresholds[min_kl_divergence_idx]
min_value = histogram[2]
max_value = histogram[3]
if optimal_threshold[0] < min_value:
optimal_threshold = (min_value, optimal_threshold[1])
if optimal_threshold[1] > max_value:
optimal_threshold = (optimal_threshold[0], max_value)
return optimal_threshold
def create_calibrator(
model: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
calibrate_method=CalibrationMethod.MinMax,
use_external_data_format=False,
extra_options={}, # noqa: B006
):
calibrator = None
if calibrate_method == CalibrationMethod.MinMax:
# default settings for min-max algorithm
symmetric = False if "symmetric" not in extra_options else extra_options["symmetric"]
moving_average = False if "moving_average" not in extra_options else extra_options["moving_average"]
averaging_constant = 0.01 if "averaging_constant" not in extra_options else extra_options["averaging_constant"]
calibrator = MinMaxCalibrater(
model,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format=use_external_data_format,
symmetric=symmetric,
moving_average=moving_average,
averaging_constant=averaging_constant,
)
elif calibrate_method == CalibrationMethod.Entropy:
# default settings for entropy algorithm
num_bins = 128 if "num_bins" not in extra_options else extra_options["num_bins"]
num_quantized_bins = 128 if "num_quantized_bins" not in extra_options else extra_options["num_quantized_bins"]
symmetric = False if "symmetric" not in extra_options else extra_options["symmetric"]
calibrator = EntropyCalibrater(
model,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format=use_external_data_format,
symmetric=symmetric,
num_bins=num_bins,
num_quantized_bins=num_quantized_bins,
)
elif calibrate_method == CalibrationMethod.Percentile:
# default settings for percentile algorithm
num_bins = 2048 if "num_bins" not in extra_options else extra_options["num_bins"]
percentile = 99.999 if "percentile" not in extra_options else extra_options["percentile"]
symmetric = True if "symmetric" not in extra_options else extra_options["symmetric"]
calibrator = PercentileCalibrater(
model,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format=use_external_data_format,
symmetric=symmetric,
num_bins=num_bins,
percentile=percentile,
)
elif calibrate_method == CalibrationMethod.Distribution:
# default settings for percentile algorithm
num_bins = 2048 if "num_bins" not in extra_options else extra_options["num_bins"]
scenario = "same" if "scenario" not in extra_options else extra_options["scenario"]
calibrator = DistributionCalibrater(
model,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format=use_external_data_format,
num_bins=num_bins,
scenario=scenario,
)
if calibrator:
calibrator.augment_graph()
calibrator.create_inference_session()
return calibrator
raise ValueError(f"Unsupported calibration method {calibrate_method}")
|
5,740 | 32db21ed7f57f29260d70513d8c34de53adf12d7 | import time,pickle
from CNN_GPU.CNN_C_Wrapper import *
from pathlib import Path
FSIGMOIG = 0
FTANH = 2
FRELU = 4
REQUEST_INPUT = 0
REQUEST_GRAD_INPUT = 1
REQUEST_OUTPUT = 2
REQUEST_WEIGTH = 3
class CNN:
def __init__(self, inputSize, hitLearn=.1, momentum=.9, weigthDecay=.5, multip=1.0):
file = '%s/%s' % (DIR_LIBRARY, 'gpu_function.cl')
file = file.encode('utf-8')
self.cnn = c_Pointer()
clib.createCnnWrapper(c.addressof(self.cnn), c.create_string_buffer(file),
hitLearn, momentum, weigthDecay, multip, inputSize[0], inputSize[1], inputSize[2])
clib.initRandom(time.time_ns())
def __del__(self):
clib.releaseCnnWrapper(c.addressof(self.cnn))
print('end')
def addConvLayer(self, passo, tamanhoFitro, numeroFiltro):
clib.CnnAddConvLayer(self.cnn.p, passo, tamanhoFitro, numeroFiltro)
def addPoolLayer(self, passo, tamanhoFitro):
clib.CnnAddPoolLayer(self.cnn.p, passo, tamanhoFitro)
def addReluLayer(self):
clib.CnnAddReluLayer(self.cnn.p)
def addDropOutLayer(self, pontoAtivacao, seed):
clib.CnnAddDropOutLayer(self.cnn.p, pontoAtivacao, seed)
def addFullConnectLayer(self, saida, funcaoAtivacao):
clib.CnnAddFullConnectLayer(self.cnn.p, saida, funcaoAtivacao)
def predict(self, input):
tinput = self.createInp(*input)
clib.CnnCall(self.cnn.p, tinput)
def learn(self, target):
ttarg = self.targ(*target)
clib.CnnLearn(self.cnn.p, ttarg)
def getData(self, layer, request, nfilter=0):
size = self.getSizeData(layer, request)
if size is None: return None
data = c.c_double * (size[0] * size[1] * size[2])
data = data(0)
err = clib.CnnGetTensorData(self.cnn.p, layer, request, nfilter, data)
if err < 0:
self.lastERROR = err
return None
return list(data)
def getSizeData(self, layer, request):
inx, iny, inz, n = c.c_int(0), c.c_int(0), c.c_int(0), c.c_int(0)
err = clib.CnnGetSize(self.cnn.p, layer, request, c.addressof(inx), c.addressof(iny), c.addressof(inz),
c.addressof(n))
if err < 0:
self.lastERROR = err
return None
return inx.value, iny.value, inz.value, n.value
@property
def output(self):
err = clib.CnnGetTensorData(self.cnn.p, -1, REQUEST_OUTPUT, 0, self.out)
if err < 0:
self.lastERROR = err
return None
return list(self.out)
def compile(self):
if self.error: raise Exception("ERROR")
inx, iny, inz = c.c_int(0), c.c_int(0), c.c_int(0)
err = clib.CnnGetSize(self.cnn.p, 0, REQUEST_INPUT, c.addressof(inx), c.addressof(iny), c.addressof(inz),
c.cast(0, c.c_void_p))
if err != 0: raise Exception('Error when request input size', err)
self.createInp = c.c_double * (inx.value * iny.value * inz.value)
err = clib.CnnGetSize(self.cnn.p, -1, REQUEST_OUTPUT, c.addressof(inx), c.addressof(iny), c.addressof(inz),
c.cast(0, c.c_void_p))
if err != 0: raise Exception('Error when request output size', err)
self.out = c.c_double * (inx.value * iny.value * inz.value)
self.targ = self.out
self.out = self.out(0)
def info(self):
clib.CnnInfo(self.cnn.p)
def save(self, fileName:str):
filedesc = Path(fileName).with_suffix('.cdc')
self.salveCnnDescriptor(filedesc)
fileName = fileName.encode('utf-8')
return clib.CnnSaveInFile(self.cnn.p, c.create_string_buffer(fileName))
@staticmethod
def load(fileName):
self = CNN([2,2,1])
fileName = fileName.encode('utf-8')
clib.CnnLoadByFile(self.cnn.p, c.create_string_buffer(fileName))
self.compile()
return self
@property
def error(self):
return clib.getCnnError(self.cnn.p)
@property
def errorMsg(self):
buff = c.create_string_buffer(''.encode('utf-8'),255)
clib.getCnnErrormsg(self.cnn.p,buff)
return buff.value.decode('utf-8')
def salveCnnDescriptor(self,file):
desc_c = c_Pointer()
clib.generateDescriptor(c.addressof(desc_c),self.cnn.p)
msg = c.cast(desc_c.p,c.c_char_p)
msg = msg.value.decode('utf-8')
clib.freeP(desc_c.p)
desc = eval(msg)
with open(file,'wb') as f:
pickle.dump(desc,f)
# AUXILIAR FUNCTION
def getOutputAsIndexMax(self):
ans = clib.CnnGetIndexMax(self.cnn.p)
return ans
def normalizeVector(self,vector:list,maxOutput,minOutput):
out_TYPE =c.c_double * len(vector)
inp = out_TYPE(*vector)
out = out_TYPE()
clib.normalizeGPU(self.cnn.p,inp,out,len(vector),maxOutput,minOutput)
return list(out)
def normalizeVectorKnowedSpace(self,vector:list,maxInput,minInput,maxOutput,minOutput):
out_TYPE =c.c_double * len(vector)
tmp_inp = out_TYPE(*vector)
tmp_out = out_TYPE(*vector)
clib.normalizeGPUSpaceKnow(self.cnn.p,tmp_inp,tmp_out,len(vector),maxInput,minInput,maxOutput,minOutput)
return list(tmp_out)
def getOutPutAsPPM(self):
p = c_Pointer()
h = c.c_size_t()
w = c.c_size_t()
clib.Py_getCnnOutPutAsPPM(self.cnn.p, c.addressof(p), c.addressof(h), c.addressof(w))
h = h.value
w = w.value
out = (c.c_ubyte*(w*h))()
c.memmove(out,p.p,w*h)
a = bytes(out)
clib.freeP(p.p)
return (h,w,a) |
5,741 | 0de27101675eb8328d9a2831ed468a969b03e7d3 | import sys
prop = float(sys.argv[1])
def kind(n):
s = str(n)
l = len(s)
i = 0
j = i + 1
decr, bouncy, incr = False, False, False
while j < l:
a = int(s[i])
b = int(s[j])
if s[i] > s[j]:
decr = True
elif s[i] < s[j]:
incr = True
i += 1
j += 1
if decr and incr:
return True
return False
def calc(prop):
currentProp = 0
i = 100
countBouncy = 0
while currentProp < prop:
if kind(i):
countBouncy += 1
currentProp = (countBouncy * 100) / i
if currentProp >= prop:
return i
i += 1
return "Proportion was not reached."
calc(prop)
|
5,742 | 92529c4d4c33a7473773f081f730e64bae4d7f54 | # This Python file uses the following encoding: utf-8
import json
import os
import logging
from .utility_helper import (
check_path,
)
from .formats import (
OUTPUT_FORMATS,
FORMATS
)
class OptionsManager(object):
"""
This clas is responsible for storing & retrieving the options.
Args:
config_path (string): Absolute path where OptionsManager
should store the settings file.
Note:
See load_default() method for available options.
Example:
Access the options using the 'options' variable.
opt_manager = OptionsManager('.')
opt_manager.options['save_path'] = '~/Downloads'
"""
SETTINGS_FILENAME = 'settings.json'
SENSITIVE_KEYS = ('sudo_password', 'password', 'video_password')
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt='%H:%M:%S')
logging.getLogger().setLevel(logging.DEBUG)
def __init__(self, config_path):
self.config_path = config_path
self.settings_file = os.path.join(config_path, self.SETTINGS_FILENAME)
self.options = dict()
self.load_default()
self.load_from_file()
def load_default(self):
"""Load the default options.
Note:
This method is automatically called by the constructor.
Options Description:
'save_path' (string): Path where youtube-dl shoult store
the downloaded file. default is $HOME (~\Downloads)
'save_path_dirs' (list): List that contains temporary save paths.
'video_format' (string): Video format to download.
When this option is '0' youtube-dl will choose
the best video format for given URL.
'second_video_format' (string): Video format to mix with the
one (-f 18+17)
'to_audio' (boolean): If True youtube-dl will post process the
video file.
'keep_video' (boolean): If True youtube-dl will keep the video
after post processing it.
'audio_format' (string): Audio format of the post processed file.
values are: mp3, wav, aac, m4a, vorbis, opus.
'audio_quality' (string): Audio quality of the post processed file.
values are: 9, 5, 0. The lowest value the better the quality.
'restrict_filenames' (boolean): If True youtube-dl will restrict
the downloaded file filename to ASCII characters only.
'output_format' (int): This options sets the downloaded file
output template. See formats.OUTPUT_FORMATS for mor info.
'output_template' (string) : Can be any output template supported
by youtube-dl
'playlist_start' (int): Playlist index to start downloading
'playlist_end' (int): Playlist index to stop downloading.
'max_downloads' (int): Maximun number of video files to download
from the given playlist.
'min_filesize' (float): Min file size of the video file.
if the video is smaller than the given size then
youtube-dl will abort the download process.
'max_filesize' (float): Min file size of the video file.
if the video is larger than the given size then
youtube-dl will abort the download process.
'min_filesize_unit' (string): Minimum file size unit.
values are: '', k, m, g, y, p, e, z, y.
'max_filesize_unit' (string): Maximum file size unit.
values are: '', k, m, g, y, p, e, z, y.
'write_subs' (boolean): If True youtube-dl will try to download
the subtitles file for the given URL.
'write_all_subs' (boolean): If True youtube-dl will try to download
all the available subtitles for the given URL.
'write_auto_subs' (boolean): If True youtube-dl will try to download
the automatic subtitlees file for the given URL.
'embed_subs' (boolean): If True youtube-dl will try to merge the
subtitles file with the video. (ONLY mp4 files)
'subs_lang' (string): Language of the subtitles file to download.
Needs 'write_subs' option.
'ignore_errors' (boolean): If True youtube-dl will ignore the errors
and continue the download process.
'open_dl_dir' (boolean): If True youtube-dl will open the destination
folder after download process has been completed.
'write_description' (boolean): If True youtube-dl will the video
description to a *.description file.
'write_info' (boolean): If True youtube-dl will write
video metadata to a *.info.json file.
'write_thumbnail' (boolean): If True youtube-dl will write a
thumbnail image to disk.
'retries' (int): Number of youtube-dl retries.
'user_agent' (string): Specify a custom user agent for youtube-dl
'referer' (string): Specify a custom referer to user if the video
access is restricted to one domain.
'proxy' (string): Use the specified HTTP/HTTPS proxy.
'shutdown' (boolean): Shutdown PC after download process completed.
'sudo_password' (string): SUDO password for the shutdown process
if the user does not have elevated privileges.
'username' (string): Username to login with.
'password' (string): Password to login with.
'video_password' (string): Video Password for the given URL.
'youtubedl_path' (string): Absolute the path to the youtube-dl binary.
Default is the self.config_path. You can change this position
to point anywhere if you want to use the youtube-dl binary on your system.
This is also the directory where youtube-dlg will auto download the
youtube-dl if not exists so you should make sure you have write access
if you want to update the youtube-dl binary from within youtube-dlg.
'cmd_args' (string): String that contains extra youtube-dl options
seperated by spaces.
'enable_log' (boolean): If True youtube-dlg will enable
the LogManager, see main() function under __init__().
'log_time' (boolean): See logmanager.LogManager add_time attribute.
'workers_number' (int): Number of download workers that download manager
will spawn. Must be greater than zero.
'locale_name' (string): Locale name (en_US)
'main_win_size' (tuple): Main window size (width x height).
if window becomes to small the program will reset its size.
see _settings_are_valid method MIN_FRAME_SIZE.
'opts_win_size' (tuple): Main window size (width x height).
'selected_video_formats' (list): List that contains the selected
video formats to display on the main window
'selected_audio_formats' (list): List that contains the selected
audio formats to display on the main window
'selected_format' (string): Current format selected on the main window
'youtube_dl_debug' (boolean): When True will pass '-v' flag to youtube-dl
config file options.
'ignore_config' (boolean): When True will ignore youtube-dl config file option.
'confirm_exit' (boolean): When True create message to confirm exist youtube-dlg
'native_hls' (boolean): When True youtube-dl will use the natives HLS implementation.
'show_completion_popup' (boolean): When True youtube-dl-dlg will create message to inform
the user for the download completion
'confirm_deletion' (boolean): When True ask user before item removal.
'nomtime' (boolean): When True will not use the last-modified header to
set the file modification time.
'embed_thumbnail' (boolean): When True will embed the thumbnail in
the audio file as cover art.
'add_metadata' (boolean): When True will write metadata to file.
"""
#+++++<DEBUG_LOG>
logging.debug("load_options default___________________")
#-----<DEBUG_LOG>
self.options = {
'save_path' : os.path.expanduser('~'),
'save_path_dirs': [
os.path.expanduser('~'),
os.path.join(os.path.expanduser('~'), "Downloads"),
os.path.join(os.path.expanduser('~'), "Desktop"),
os.path.join(os.path.expanduser('~'), "Videos"),
os.path.join(os.path.expanduser('~'), "Music"),
],
'video_format': '0',
'second_video_format': '0',
'to_audio': False,
'keep_video': False,
'audio_format': '',
'audio_quality': '5',
'restrict_filenames': False,
'output_format': 1,
'output_template': os.path.join('%(uploader)s', '%(title)s.%(ext)s'),
'playlist_start': 1,
'playlist_end': 0,
'max_downloads': 0,
'min_filesize': 0,
'max_filesize': 0,
'min_filesize_unit': '',
'max_filesize_unit': '',
'write_subs': True,
'write_all_subs': False,
'write_auto_subs': False,
'embed_subs': False,
'subs_lang': 'en',
'ignore_errors': True,
'open_dl_dir': False,
'write_description': False,
'write_info': False,
'write_thumbnail': False,
'retries': 10,
'user_agent': '',
'referer': '',
'proxy': '',
'shutdown': False,
'sudo_password': '',
'username': '',
'password': '',
'video_password': '',
'youtubedl_path': self.config_path,
'cmd_args': '',
'enable_log': True,
'log_time': True,
'workers_number': 3,
'locale_name': 'en_US',
'main_win_size': (740, 490),
'opts_win_size': (640, 490),
'selected_video_formats': ['default', 'mp4', 'webm'],
'selected_audio_formats': ['mp3', 'm4a', 'vorbis'],
'selected_format': '0',
'youtube_dl_debug': False,
'ignore_config': True,
'confirm_exit': True,
'native_hls': True,
'show_completion_popup': True,
'confirm_deletion': True,
'nomtime': False,
'embed_thumbnail': False,
'add_metadata': False
}
def load_from_file(self):
"""
Load options from settings file.
"""
if not os.path.exists(self.settings_file):
return
with open(self.settings_file, 'rb') as settings_file:
try:
options = json.load(settings_file)
if self._settings_coordinate(options):
self.options = options
except:
self.load_default()
def save_to_file(self):
"""Save options to settings file.
"""
check_path(self.config_path)
with open(self.settings_file, 'w') as settings_file:
options = self._get_options()
json.dump(options,
settings_file,
indent=4,
separators=(',', ': '))
def _settings_coordinate(self, settings_dict):
"""
Check settings.json dictionary
Args:
settings_dict: Options dict loaded. See load_from_file() method.
Return:
True if settings.json is valid / else False
"""
VALID_VIDEO_FORMAT = ('0', '17', '36', '5', '34', '35', '43', '44', '45',
'46', '18', '22', '37', '38', '160', '133', '134', '135', '136','137',
'264', '138', '242', '243', '244', '247', '248', '271', '272', '82',
'83', '84', '85', '100', '101', '102', '139', '140', '141', '171', '172')
VALID_AUDIO_FORMAT = ('mp3', 'wav', 'aac', 'm4a', 'vorbis', 'opus', '')
VALID_AUDIO_QUALITY = ('0', '5', '9')
VALID_FILESIZE_UNIT = ('', 'k', 'm', 'g', 't', 'p', 'e', 'z', 'y')
VALID_SUB_LANGUAGE = ('en', 'el', 'pt', 'fr', 'it', 'ru', 'es', 'de', 'he', 'sv', 'tr')
MIN_FRAME_SIZE = 100
for key in self.options:
if key not in settings_dict:
return False
if type(self.options[key]) != type(settings_dict[key]):
return False
# Check if each key has a valid value
rules_dict = {
'video_format': FORMATS.keys(),
'second_video_format': VALID_VIDEO_FORMAT,
'audio_format': VALID_AUDIO_FORMAT,
'audio_quality': VALID_AUDIO_QUALITY,
'output_format': OUTPUT_FORMATS.keys(),
'min_filesize_unit': VALID_FILESIZE_UNIT,
'max_filesize_unit': VALID_FILESIZE_UNIT,
'subs_lang': VALID_SUB_LANGUAGE
}
for key, valid_list in rules_dict.items():
if settings_dict[key] not in valid_list:
return False
if settings_dict['workers_number'] < 1:
return False
return True
def _get_options(self):
"""
Return options dictionary.
"""
tmp_options = self.options.copy()
for key in self.SENSITIVE_KEYS:
tmp_options[key] = ''
return tmp_options
|
5,743 | 464980a2f17aeedfa08548d6c4e247f8c047e2cb | # Generated by Django 3.2.3 on 2021-07-24 12:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('profiles', '0018_userprofile_membership_fee_pending'),
]
operations = [
migrations.RenameField(
model_name='userprofile',
old_name='membership_fee_pending',
new_name='membership_fee_paid',
),
]
|
5,744 | 0b42f458097d11d66160bcb8e706ccb9b5c4682a | def K_Wilson(w, Tr, Pr):
# Inserting necessary libraries
import numpy as np
# Calculating K-value using Wilson correlation
K_value_Output = (1 / Pr) * np.exp(5.37 * (1 + w) * (1 - 1 / Tr))
# Returning output value
return K_value_Output |
5,745 | 76420ec1b37d4b9b85f35764a7f8a0e1f19a15dd | import boring.dialog
import boring.form
FORMSTRING = '''
Project name@string
Width@int|Height@int
Background color@color
Fullscreen@check
'''
class NewProjectWindow(boring.dialog.DefaultDialog):
def __init__(self, master, _dict=None):
self._dict = _dict
self.output = None
boring.dialog.DefaultDialog.__init__(self, master)
def body(self, master):
initial_values = [
'',
640, 480,
'#dadada',
False
]
if self._dict:
initial_values = [
self._dict.get('name'),
self._dict.get('width'), self._dict.get('height'),
self._dict.get('bgcolor'),
self._dict.get('fullscreen')
]
self.form = boring.form.FormFrame(master, FORMSTRING, initial_values=initial_values, title='%s Project' % ('Edit' if self._dict else 'New'))
self.form.grid(pady=10, padx=10)
return self.form.inputs[0]
def apply(self):
'''
called when ok button is pressed
'''
self.output = {
'name': self.form.values[0],
'width': self.form.values[1],
'height': self.form.values[2],
'bgcolor': self.form.values[3],
'fullscreen': self.form.values[4]
}
def validate(self):
width = self.form.values[1]
height = self.form.values[2]
if width <= 0 or height <= 0:
boring.dialog.MessageBox.warning(parent=self,
title='Wrong data',
message='Invalid width/height')
return False
if not self.form.values[0]:
boring.dialog.MessageBox.warning(parent=self,
title='Project title',
message='Invalid project name')
return False
return True |
5,746 | 2d444c00e4dbdcb143d19752cd1a751169de73d3 | import sys
import os
import csv
import urllib2, socket, time
import gzip, StringIO
import re, random, types
from bs4 import BeautifulSoup
from datetime import datetime
import json
from HTMLParser import HTMLParser
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def extractData(url,title):
data=""
req=urllib2.Request(url)
response=urllib2.urlopen(req)
html_data=response.read()
soup=BeautifulSoup(html_data)
[s.extract() for s in soup('script')]
d=re.compile(r'.*%s.*' % title)
last_elem=0
for elem in soup(text=d):
last_elem=elem
if last_elem!=0:
p1=last_elem.parent
try1=1
while len(data)<1000:
parent=p1.parent
p1=parent
data=""
for each_child in parent.findChildren():
data+=each_child.get_text().strip().replace('\n','')
print try1
try1+=1
else:
data=""
for each_child in soup.body.findChildren():
data+=each_child.get_text().strip().replace('\n','')
return data
def readData(input_file):
data=json.loads(input_file.read())
for each_r in data:
if each_r['ID']>=1:
s = MLStripper()
s.feed(each_r['title'])
title =s.get_data()
val=len(title)/2
val=val/2
print title[:-val]
article_data=extractData(each_r['url'],title)
print 'url',each_r['url']
print article_data
print '##############################################'
raw_input()
if __name__=="__main__":
if sys.argv>=2:
input_file=open(sys.argv[1],"r")
readData(input_file)
else:
print "Usage: python extractnew.py <data_file_location>"
|
5,747 | d5acde6c6139833c6631a2d88a181cd019d3d2da | #Charlie Quinn if.py
#Check < in an 'if' statement
#use a 'while' loop to make testing easier
def income_input(prompt_message):
prompt = prompt_message + ' '
temp = input(prompt)
#get input from user
return float(temp)
do_again = 'y'
while do_again =='y':
income = income_input("\nHow much did you make this year?: ")
if income < 15001:
rate = .005
elif income < 17501:
rate = .006
elif income < 22401:
rate = .010
elif income < 47301:
rate = .014
elif income < 75001:
rate = .018
elif income < 112450:
rate = .022
elif income < 277000:
rate = .027
else:
rate = .03
taxesdue = income *rate
print("Income: ",income,"\nRate: ",rate,"\nTaxes Due: ",taxesdue)
#loop will end when you type in an n
do_again = input("\nAnother one? (y or no) ")
|
5,748 | 8d8c211895fd43b1e2a38216693b0c00f6f76756 | #Main thread for starting the gui
import cv2
import PIL
from PIL import Image,ImageTk
from tkinter import *
from matplotlib import pyplot as pt
from matplotlib.image import imread
from control.control import Control
control=Control()
#gives the indtruction for saving the current frame
def takePicture():
global setImage
setImage=True
#add the rectangles to the image where the IA found a products
#**ARGS: List with the triangles
def addRectangles (locations):
_, axe = pt.subplots()
img=imread("hola.jpg")
cv2image = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)
axe.imshow(cv2image)
alto, ancho, _ = img.shape
for item in locations:
axe.add_patch(item)
pt.savefig('result.png')
#window metric
width, height = 800, 700
#we use open cv to take the frames
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
#UI elementes
root = Tk()
root.bind('<Escape>', lambda e: root.quit())
lmain = Label(root)
lmain.pack()
lProccessedData = Label(root)
lProccessedData.pack()
B = Button( text ="Start", command = takePicture)
B.pack()
textEditor = Text(root, width=43, height=5)
textEditor.pack()
textEditor.place(x=400, y=400)
#Initial wigets state
setImage=False
selectedImage=None
root.geometry("900x600")
root.resizable(False, False)
#set the total price
#**ARGS: List with all the products found
def set_count(products):
div={"Harina de Trigo La Nieve":1700,"Papitas margarita":1000,"Lentejas":1800,"Shampoo":13900,"Tarrito rojo":13000,"Polvo de bizcocho":2000}
div_temp=[]
a=""
b=0
print(products)
for item in products:
if item in div_temp:
continue
div_temp.append(item)
c=products.count(item)*div[item]
b=b+c
print (item)
a=a+item+ " "+str(products.count(item))+ " " +str(c)+" \n"
textEditor.insert('1.0', "")
a=a+" \n\n\n Toral:"+str(b)+"\n\n"
textEditor.insert('1.0', a)
#show the frame captured by the camera
def show_frame():
global setImage
global selectedImage
_, frame = cap.read()
frame = cv2.flip(frame, 1)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
orig=cv2image
img = PIL.Image.fromarray(cv2image)
img=img.resize((400, 300))
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
if setImage:
selectedImage=frame
setImage = False
cv2.imwrite('hola.jpg', selectedImage)
res=control.get_results('hola.jpg')
addRectangles(res[0])
set_count(res[1])
selectedImage=cv2.imread('result.png')
selectedImage=PIL.Image.fromarray(selectedImage)
selectedImage=selectedImage.resize((400, 300))
imgtk = ImageTk.PhotoImage(image=selectedImage)
selectedImage=imgtk
B["state"] = NORMAL
lmain.after(10, show_frame)
lmain.place(x=10, y=40)
B.place(x=10, y=500)
lProccessedData.place(x=470, y=40)
lProccessedData.configure(image=selectedImage)
#Start UI
show_frame()
root.mainloop()
|
5,749 | 276d7ac493ddcb327dbce279d9f4bc8a74c98245 | __author__ = 'Jager'
from equipment import Equipment
class Weapon (Equipment):
def __init__(self, name, power):
super(Weapon, self).__init__(name)
self.power = power
@staticmethod
def fromJSON(jsonstr):
obj = Equipment.fromJSON(jsonstr)
return Weapon(obj["name"], obj["power"])
def __str__(self):
return "{}: Power({})".format(self.name, self.power) |
5,750 | c847e7abe36b62c4518bb535789064e22b5f1db7 | import pymel.core as pm
from alShaders import *
class AEalLayerColorTemplate(alShadersTemplate):
controls = {}
params = {}
def setup(self):
self.params.clear()
self.params["layer1"] = Param("layer1", "Layer 1", "The background layer (will be blended over black if its alpha is not 1.", "rgb", presets=None)
self.params["layer1a"] = Param("layer1a", "Layer 1 Alpha", "The alpha of the background layer", "float", presets=None)
self.params["layer1blend"] = Param("layer1blend", "Mode", "Blend mode for the background layer.", "enum", presets=None)
self.params["layer2"] = Param("layer2", "Layer 2", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None)
self.params["layer2a"] = Param("layer2a", "Layer 2 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None)
self.params["layer2blend"] = Param("layer2blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None)
self.params["layer3"] = Param("layer3", "Layer 3", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None)
self.params["layer3a"] = Param("layer3a", "Layer 3 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None)
self.params["layer3blend"] = Param("layer3blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None)
self.params["layer4"] = Param("layer4", "Layer 4", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None)
self.params["layer4a"] = Param("layer4a", "Layer 4 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None)
self.params["layer4blend"] = Param("layer4blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None)
self.params["layer5"] = Param("layer5", "Layer 5", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None)
self.params["layer5a"] = Param("layer5a", "Layer 5 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None)
self.params["layer5blend"] = Param("layer5blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None)
self.params["layer6"] = Param("layer6", "Layer 6", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None)
self.params["layer6a"] = Param("layer6a", "Layer 6 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None)
self.params["layer6blend"] = Param("layer6blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None)
self.params["layer7"] = Param("layer7", "Layer 7", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None)
self.params["layer7a"] = Param("layer7a", "Layer 7 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None)
self.params["layer7blend"] = Param("layer7blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None)
self.params["layer8"] = Param("layer8", "Layer 8", "The color plugged in here will be blended over the layers below according to its alpha and blend mode.", "rgb", presets=None)
self.params["layer8a"] = Param("layer8a", "Layer 8 Alpha", "The alpha used to blend this layer over the layers below.", "float", presets=None)
self.params["layer8blend"] = Param("layer8blend", "Mode", "The blend mode used to blend this layer over the layers below.", "enum", presets=None)
self.addSwatch()
self.beginScrollLayout()
self.addCustomRgb("layer1")
self.addCustomFlt("layer1a")
self.addControl("layer1blend", label="Mode", annotation="Blend mode for the background layer.")
self.addCustomRgb("layer2")
self.addCustomFlt("layer2a")
self.addControl("layer2blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.")
self.addCustomRgb("layer3")
self.addCustomFlt("layer3a")
self.addControl("layer3blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.")
self.addCustomRgb("layer4")
self.addCustomFlt("layer4a")
self.addControl("layer4blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.")
self.addCustomRgb("layer5")
self.addCustomFlt("layer5a")
self.addControl("layer5blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.")
self.addCustomRgb("layer6")
self.addCustomFlt("layer6a")
self.addControl("layer6blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.")
self.addCustomRgb("layer7")
self.addCustomFlt("layer7a")
self.addControl("layer7blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.")
self.addCustomRgb("layer8")
self.addCustomFlt("layer8a")
self.addControl("layer8blend", label="Mode", annotation="The blend mode used to blend this layer over the layers below.")
pm.mel.AEdependNodeTemplate(self.nodeName)
self.addExtraControls()
self.endScrollLayout()
|
5,751 | b07d042c61e9e6647822989444e72db2e01c64d0 | # Generated by Django 3.0.3 on 2020-02-09 06:29
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('devices_collect', '0004_auto_20200209_1304'),
]
operations = [
migrations.AlterField(
model_name='collectdevices',
name='generated_time',
field=models.DateTimeField(default=datetime.datetime(2020, 2, 9, 6, 28, 34, 547300, tzinfo=utc)),
),
]
|
5,752 | da2b946238b429188fe3fa50286658d4b5cdbf41 | import krait
from ctrl import ws
krait.mvc.set_init_ctrl(ws.WsPageController())
|
5,753 | d6213698423902771011caf6b5206dd4e3b27450 | import numpy as np
a = np.ones((3,4))
b = np.ones((4,1))
# a.shape = (3,4)
# b.shape = (4,1)
c = np.zeros_like(a)
for i in range(3):
for j in range(4):
c[i][j] = a[i][j] + b[j]
print(c)
d = a+b.T
print(d)
|
5,754 | ab343f88c84d45cf90bddd52623362f047c72d3c | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-18 07:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50, unique=True, verbose_name='\u7528\u6237\u540d')),
('password', models.CharField(max_length=200, verbose_name='\u5bc6\u7801')),
('email', models.EmailField(blank=True, max_length=254, null=True, unique=True, verbose_name='\u7535\u5b50\u90ae\u4ef6')),
('phone', models.CharField(blank=True, max_length=20, null=True, unique=True, verbose_name='phone')),
('gender', models.SmallIntegerField(choices=[(0, 'unset'), (1, 'male'), (2, 'female')], default=0, null=True, verbose_name='gender')),
('real_name', models.CharField(blank=True, max_length=100, null=True, verbose_name='real name')),
('birth_of_date', models.DateField(null=True, verbose_name='birth of date')),
('is_superuser', models.BooleanField(default=False, verbose_name='whether super user or not')),
('is_staff', models.BooleanField(default=False, verbose_name='whether enter backend or not')),
('last_login', models.DateTimeField(null=True, verbose_name='last login datetime')),
('create', models.DateTimeField(auto_now_add=True, verbose_name='create datetime')),
('modify', models.DateTimeField(auto_now=True, verbose_name='modify datetime')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'db_table': 'member',
},
),
]
|
5,755 | 00c57e7e26a3181ab23697a25257aca479d9ee05 | frase = "todos somos promgramadores"
palabras = frase.split()
for p in palabras:
print(palabras[p])
#if p[-2] == "o":
|
5,756 | 468b5bd8d7b045ca8dd46c76a1829fc499e16950 | import time
import ephem
import serial
import nmea
import orientation
import sys
import threading
from geomag import geomag
#Constants
initial_az = 180
initial_alt = 90
min_elevation = 10.0
sleep_time = 1.0
unwind_threshold = 180
sleep_on_unwind = 45.0
last_lon = '-88.787'
last_lat = '41.355'
last_heading = 0.0
mount_port = '/dev/ttyUSB0'
arduino_port = '/dev/ttyACM0'
class SerialTester:
def write(self,line):
print(line)
def read(self, num):
return
class Antenna:
azimuth = initial_az
altitude = initial_alt
parked = True
def set_position(self, az, alt):
self.azimuth = az
self.altitude = alt
az_int = round(az)
alt_int = round(alt)
ser.write(":Sz " + str(az_int) + "*00:00#")
ser.write(":Sa +" + str(alt_int) + "*00:00#")
ser.write(":MS#")
ser.read(64)
def park(self):
if (self.parked):
print('Antenna Parked')
else:
print('Parking Antenna')
self.set_position(initial_az, initial_alt)
self.parked = True
def move(self, az, alt):
if (self.parked):
self.parked = False
# Unwrap Cable if Azimuth will cross through True North
# In the above case, Set Azimuth to 180 Degrees, then pick up
# normal tracking
# Then sleep 45 seconds to give the positioner time to
# reposition
if ((self.azimuth - az) > unwind_threshold):
self.set_position(initial_az, self.altitude)
print('Repositioning to unwrap cable')
time.sleep(sleep_on_unwind)
else:
print('Tracking Mode')
self.set_position(az, alt)
def reset():
obs = ephem.Observer()
#Set LAT/LON Coordinates to IMSA's location
obs.date = ephem.now()
obs.lon = last_lon
obs.lat = last_lat
obs.elevation = 0.0
return obs
def update_gps(gprmc, obs):
obsc = obs.copy()
try:
if gprmc.is_fixed() and gprmc.checksum():
datetime = gprmc.get_date() + " " + gprmc.get_time()
obsc.date = datetime
obsc.lat = str(gprmc.get_lat())
last_lat = str(gprmc.get_lat())
obsc.lon = str(gprmc.get_lon())
last_lon = str(gprmc.get_lon())
return obsc
except:
return obs
def setup_serial(port, baud):
# Set Serial Port - USB0
ser = serial.Serial(port, baud)
print("Port used:" + ser.name)
return ser
# return SerialTester()
def setup_satellite():
# Read in TLE for target satellite ICO F2
icof2 = ephem.readtle('ICO F2',
'1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997',
'2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058')
return icof2
def to_degrees(radians):
return radians / ephem.degree
def get_sat_position(icof2, home):
icof2.compute(home)
icof2_az = to_degrees(icof2.az)
icof2_alt = to_degrees(icof2.alt)
print('Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' % (icof2_az, icof2_alt))
return icof2_az, icof2_alt
def read_message(port):
while True:
try:
line = port.readline().decode("ascii").replace('\r', '').replace('\n', '')
except:
line = ""
if len(line) > 0 and line[0] == "$":
return line
def nmea_tester(sentence):
mes = nmea.nmea(sentence)
print("Checksum: ")
print(mes.checksum())
print("Reformatted Date & Time: ")
print(mes.get_date())
print(mes.get_time())
print("Lat, Lon: ")
print(str(mes.get_lat()) + ", " + str(mes.get_lon()))
print("Heading, MagVar")
print(str(mes.get_magnetic_heading()) + ", " + str(mes.get_magnetic_var()))
def arduino_tester():
ard = setup_serial(arduino_port, 115200)
icof2 = setup_satellite()
while True:
try:
line = read_nmea(ard)
home = reset()
home, heading = update(nmea.nmea(line))
print(home.lat)
print(home.lon)
print(home.date)
print(heading)
except:
break
def display_stats(orient, position, obs):
try:
print("\n"*65)
magvar = get_magnetic_var(float(last_lat), float(last_lon))
print(''' _.:::::._
.:::'_|_':::.
/::' --|-- '::\\
|:" .---"---. ':|
|: ( O R E O ) :|
|:: `-------' ::|
\:::.......:::/
':::::::::::'
`'"""'`\n\n''')
print("Time: {}\n".format(ephem.now()))
print('GPS\n===\nFix: {fix}, Lat: {lat}, Lon: {lon}'
.format(fix = position.is_fixed(), lat = obs.lat, lon = obs.lon))
print(position.unparsed)
print("Sensor\n===")
print('Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, '\
'Roll: {roll:7.2f}\n---'.format(heading = orient.get_heading(),
pitch = orient.get_pitch(),
roll = orient.get_roll()))
print('CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]},'\
' Acc: {cal[2]}, Mag: {cal[3]}\n'
.format(cal=orient.get_calibration()))
print("\nMagnetic Declination: {magvar:7.2f}, "
"Adjusted Heading: {true_heading:7.2f}"
.format(magvar = magvar,
true_heading= (orient.get_heading() +
magvar+720)%360))
print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'
.format(bearing = position.get_bearing(),
speed = position.get_speed()))
except:
pass
def get_magnetic_var(lat, lon):
gm = geomag.GeoMag()
magobj = gm.GeoMag(lat, lon)
return magobj.dec
home = reset()
ard = setup_serial(arduino_port, 115200)
counter = time.time()
f = open("logs/log_"+str(float(ephem.now()))+".csv", 'w')
f.write("Epoch Time,Speed,Sensor,GPS,Waypoint\n")
orient = orientation.orientation("$IMU,0,0,0,0,0,0,0,0,0")
position = nmea.nmea("$GPRMC,0,V,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0")
magvar = get_magnetic_var(float(last_lat), float(last_lon))
class myThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global val
global ii
val = '@'
ii = ''
while True:
ii = input()
if ii == "q":
break
val = chr(ord(val) + 1)
pass
thread1 = myThread()
thread1.start()
while True:
mes = (read_message(ard))
if mes[:2] == "$G":
try:
position = nmea.nmea(mes)
except:
pass
elif mes[:2] == "$I":
try:
orient = orientation.orientation(mes)
except:
pass
# home.date = "2016-06-28 12:00:00"
# Operate the antenna if the satellite's elevation is greater than 10
# degrees
# If the elevation IS above 10 degrees and the antenna is parked, then
# unlatch the park_latch variable
home = update_gps(position, home)
home.date = ephem.now()
magvar = get_magnetic_var(float(last_lat), float(last_lon))
display_stats(orient, position, home)
print(val)
if time.time() - counter >= 1.0:
counter = time.time()
try:
f.write(str(ephem.now())+",")
f.write(str(position.get_speed())+",")
f.write(str(orient.get_heading())+",")
f.write(str(position.get_bearing())+",")
f.write(val+"\n")
except:
f.write("x\n")
if ii == "q":
f.close()
break
''' icof2_az, icof2_alt = get_sat_position(icof2, home)
if (icof2_alt >= min_elevation):
antenna.set_position(icof2_az - heading, icof2_alt)
else:
antenna.park()'''
|
5,757 | e82b9aa0f7dc669b3d5622c093b766c7e168221c | import mxnet as mx
import numpy as np
import logging
# Example performance:
# INFO:root:Epoch[34] Train-accuracy=0.601388
# INFO:root:Epoch[34] Validation-accuracy=0.620949
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# running device
dev = mx.gpu()
# batch size and input shape
batch_size = 64
data_shape = (3, 36, 36)
# training data info for learning rate reduction
num_examples = 20000
epoch_size = num_examples / batch_size
lr_factor_epoch = 15
# model saving parameter
model_prefix = "./models/sample_net"
# train data iterator
train = mx.io.ImageRecordIter(
path_imgrec = "tr.rec",
mean_r = 128,
mean_g = 128,
mean_b = 128,
scale = 0.0078125,
max_aspect_ratio = 0.35,
data_shape = data_shape,
batch_size = batch_size,
rand_crop = True,
rand_mirror = True)
# validate data iterator
val = mx.io.ImageRecordIter(
path_imgrec = "va.rec",
mean_r = 128,
mean_b = 128,
mean_g = 128,
scale = 0.0078125,
rand_crop = False,
rand_mirror = False,
data_shape = data_shape,
batch_size = batch_size)
# network definition
# stage 1
net = mx.sym.Variable("data")
net = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=32, pad=(2, 2))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=64, pad=(2, 2))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Pooling(data=net, pool_type="max", kernel=(3, 3), stride=(2, 2))
# stage 2
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=128, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Pooling(data=net, pool_type="max", kernel=(3, 3), stride=(2, 2))
# stage 3
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Pooling(data=net, pool_type="avg", kernel=(9, 9), stride=(1, 1))
# stage 4
net = mx.sym.Flatten(data=net)
net = mx.sym.Dropout(data=net, p=0.25)
net = mx.sym.FullyConnected(data=net, num_hidden=121)
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
# Model parameter
# This model will reduce learning rate by factor 0.1 for every 15 epoch
model = mx.model.FeedForward(
ctx = dev,
symbol = net,
num_epoch = 35,
learning_rate = 0.01,
momentum = 0.9,
wd = 0.0001,
clip_gradient = 5,
lr_scheduler = mx.lr_scheduler.FactorScheduler(step=epoch_size * lr_factor_epoch, factor = 0.1),
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34))
# fit the model
model.fit(
X = train,
eval_data = val,
batch_end_callback = mx.callback.Speedometer(batch_size, 50),
epoch_end_callback = mx.callback.do_checkpoint(model_prefix))
|
5,758 | d1200006b8d7a18b11b01eff4fbf38d9dfd8958e | t = int(input())
while t:
x = list(map(int, input().split()))
x.sort()
if(x[0]+x[1]==x[2]):
print("YES")
else:
print("NO")
t-=1 |
5,759 | f563bb5bb32d3653d8a4115c75eda80b676ae3c6 | import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# Version: major.minor.patch
VERSION = "1.0.1"
REQUIREMENTS = (HERE / "requirements.txt").read_text()
REQUIREMENTS = REQUIREMENTS.split('\n')
# This call to setup() does all the work
setup(
name = "Antennass",
version = VERSION,
description = "A class project that plots far field antenna array patterns",
long_description = README,
long_description_content_type = "text/markdown",
url = "https://github.com/MdeVillefort/Antennass",
author = "Monsieur de Villefort",
author_email = "ethanmross92@gmail.com",
classifiers = [
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8"
],
packages = ["Antennass"],
install_requires = REQUIREMENTS,
entry_points = {
"console_scripts" : [
"antennass-cli=Antennass.antennass_cli:main",
"antennass-gui=Antennass.antennass_gui:main",
]
},
)
|
5,760 | d5d31920f7fd4ed2913c5880dba61c2015181be9 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 17 22:28:30 2019
@author: donsdev
"""
arr = []
sub = []
n = int(input())
while n > 0:
arr.append(n)
n-=1
while len(arr) + len(sub) > 1:
while len(arr) > 1:
arr.pop()
sub.append(arr.pop())
arr = sub[::-1] + arr
sub = []
print(arr[0]) |
5,761 | b25e9374458ead85535495e77a5c64117a8b1808 | """
You have a number and you need to determine which digit in this number is the biggest.
Input: A positive int.
Output: An Int (0-9).
Example:
max_digit(0) == 0
max_digit(52) == 5
max_digit(634) == 6
max_digit(10000) == 1
"""
def max_digit(number: int) -> int:
return max(int(i) for i in str(number))
print(max_digit(634))
print(max_digit(102475))
|
5,762 | 7fb568880c40895870a0c541d9a88a8070a79e5b | import datetime
# weightloss script
currentWeight = 73
goalWeight = 67
avgKgPerWeek = 0.45
startDate = datetime.date.today()
endDate = startDate
while currentWeight > goalWeight:
# adding 7 days to simulate a week passing
endDate += datetime.timedelta(days=7)
currentWeight -= avgKgPerWeek
print(endDate, round(currentWeight, 2))
print(f"Start date: {startDate.month.no}, end date: {endDate} ")
print(f"Weeks to achieve weight goal: {(endDate - startDate).days // 7}, {(endDate - startDate).days} days") |
5,763 | 93b00b5c1bec38d2a4ac109f1533d3c0d9e99044 | n = input("Enter a number: ")
def fact(num):
factorial = 1
if int(num) >= 1:
for i in range (1,int(n)+1):
factorial = factorial * i
return factorial
print(fact(n)) |
5,764 | db9919ab15988828d24b4430a124841f225860cc | from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
import cryptography.hazmat.primitives.ciphers as ciphers
import struct
import secrets
import random
from typing import List
LOCO_PUBLICKEY = serialization.load_pem_public_key(b"""
-----BEGIN PUBLIC KEY-----
MIIBIDANBgkqhkiG9w0BAQEFAA
OCAQ0AMIIBCAKCAQEApElgRBx+
g7sniYFW7LE8ivrwXShKTRFV8l
XNItMXbN5QSC8vJ/cTSOTS619X
v5Zx7xXJIk4EKxtWesEGbgZpEU
P2xQ+IeH9oz0JxayEMvvD1nVNA
WgpWE4pociEoArsK7qY3YwXb1C
iDHo9hojLv7djbo3cwXvlyMh4T
UrX2RjCZPlVJxk/LVjzcl9ohJL
kl3eoSrf0AE4kQ9mk3+raEhq5D
v+IDxKYX+fIytUWKmrQJusjtre
9oVUX5sBOYZ0dzez/XapusEhUW
ImmB6mciVXfRXQ8IK4IH6vfNyx
MSOTfLEhRYN2SMLzplAYFiMV53
6tLS3VmG5GJRdkpDubqPeQIBAw==
-----END PUBLIC KEY-----"""
)
class V2SLClient:
"""
V2SL Socket Client
"""
def __init__(self):
self._aeskey = secrets.randbits(128).to_bytes(16, "little")
self._readbuf = bytearray()
self._handshaked = False
def handshake(self):
encrypted_key = LOCO_PUBLICKEY.encrypt(
self._aeskey,
padding.OAEP(
padding.MGF1(hashes.SHA1()),
hashes.SHA1(), None
)
)
handshake_pkt = struct.pack("<III", len(encrypted_key), 12, 2) + encrypted_key
return handshake_pkt
def _send(self, data: bytes) -> bytes:
iv = random.randbytes(16)
self._aes = ciphers.Cipher(
ciphers.algorithms.AES(self._aeskey),
ciphers.modes.CFB(iv)
)
enc = self._aes.encryptor()
enc_data = enc.update(data) + enc.finalize()
enc_pkt = struct.pack("<I", len(enc_data)+16) + iv + enc_data
return enc_pkt
def _recv(self) -> bytes:
if len(self._readbuf) < 4:
return None
enc_len, = struct.unpack("<I", self._readbuf[:4])
if len(self._readbuf[4:]) < enc_len:
return None
dec = self._aes.decryptor()
data = dec.update(self._readbuf[4:4+enc_len]) + dec.finalize()
del self._readbuf[:4+enc_len]
iv = data[:16]
return data[16:]
def send(self, data: bytes, split=2048) -> List[bytes]:
segments = []
if not self._handshaked:
self._handshaked = True
segments.append(self.handshake())
sentbytes = 0
while sentbytes < len(data):
segments.append(self._send(data[sentbytes:sentbytes+split]))
sentbytes += split
return segments
def recv(self, data) -> List[bytes]:
segments = []
self._readbuf += data
while (segment := self._recv()):
segments.append(segment)
return segments
|
5,765 | 81cec5c1f28e92bf8e4adc2e2c632e072ed1f901 | # 1장 말뭉치와 워드넷 - 외부 말뭉치 다운로드, 로드하고 액세스하기
from nltk.corpus import CategorizedPlaintextCorpusReader
from random import randint
# 말뭉치 읽기
reader = CategorizedPlaintextCorpusReader(r'/workspace/NLP_python/tokens', r'.*\.txt', cat_pattern=r'(\w+)/*')
print(reader.categories())
print(reader.fileids())
# 샘플 문서 출력
# pos, neg 카테고리의 샘플 목록
posFiles = reader.fileids(categories='pos')
negFiles = reader.fileids(categories='neg')
# pos, neg 카테고리에서 각각 임의의 파일 선택
fileP = posFiles[randint(0, len(posFiles)-1)]
fileN = negFiles[randint(0, len(negFiles)-1)]
print(fileP)
print(fileN)
# 액세스한 임의 파일을 문장으로 출력
for w in reader.words(fileP):
print(w + ' ', end='')
if w is '.':
print()
for w in reader.words(fileN):
print(w + ' ', end='')
if w is '.':
print()
|
5,766 | fe1c499efe492dbd4f5c9b99bd6339c503c7902b | import os
from conan import ConanFile
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import copy, get, replace_in_file, rmdir
from conan.tools.scm import Version
from conan.errors import ConanInvalidConfiguration
required_conan_version = ">=1.57.0"
class RuyConan(ConanFile):
name = "ruy"
description = "ruy is a matrix multiplication library.\n" \
"Its focus is to cover the matrix multiplication needs of neural network inference engines\n"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/google/ruy"
license = "Apache-2.0"
topics = ("matrix", "multiplication", "neural", "network", "AI", "tensorflow")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "15",
"msvc": "191",
"gcc": "5",
"clang": "3.4",
"apple-clang": "5.1",
}
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, 14)
minimum_version = self._minimum_compilers_version.get(str(self.settings.compiler), False)
if not minimum_version:
self.output.warning("Compiler is unknown. Assuming it supports C++14.")
elif Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration("Build requires support for C++14. Minimum version for {} is {}"
.format(str(self.settings.compiler), minimum_version))
if str(self.settings.compiler) == "clang" and Version(self.settings.compiler.version) <= 5 and self.settings.build_type == "Debug":
raise ConanInvalidConfiguration("Debug builds are not supported on older versions of Clang (<=5)")
def config_options(self):
if self.settings.os == "Windows":
self.options.rm_safe("fPIC")
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def requirements(self):
self.requires("cpuinfo/cci.20220228")
def layout(self):
cmake_layout(self, src_folder="src")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.cache_variables["RUY_MINIMAL_BUILD"] = True
tc.cache_variables["RUY_FIND_CPUINFO"] = True
# Ruy public headers don't have API decorators,
# export everything to support shared libraries on Windows
tc.variables["CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS"] = True
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def _patch_sources(self):
cmakelists = os.path.join(self.source_folder, "CMakeLists.txt")
patches = {
#Remove the invocation after project(), see https://github.com/google/ruy/issues/328
"cmake_minimum_required(VERSION 3.13)": "",
# Ensure `cmake_minimum_required` is called first
"# Copyright 2021 Google LLC": "# Copyright 2021 Google LLC\ncmake_minimum_required(VERSION 3.13)",
}
for pattern, patch in patches.items():
replace_in_file(self, cmakelists, pattern, patch)
# 1. Allow Shared builds
replace_in_file(self, os.path.join(self.source_folder, "cmake", "ruy_cc_library.cmake"),
"add_library(${_NAME} STATIC",
"add_library(${_NAME}"
)
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
cmake = CMake(self)
cmake.install()
copy(self, "LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.libs = ["ruy_frontend",
"ruy_context",
"ruy_trmul",
"ruy_thread_pool",
"ruy_blocking_counter",
"ruy_prepare_packed_matrices",
"ruy_ctx",
"ruy_allocator",
"ruy_prepacked_cache",
"ruy_tune",
"ruy_wait",
"ruy_apply_multiplier",
"ruy_block_map",
"ruy_context_get_ctx",
"ruy_cpuinfo",
"ruy_denormal",
"ruy_have_built_path_for_avx",
"ruy_have_built_path_for_avx2_fma",
"ruy_have_built_path_for_avx512",
"ruy_kernel_arm",
"ruy_kernel_avx",
"ruy_kernel_avx2_fma",
"ruy_kernel_avx512",
"ruy_pack_arm",
"ruy_pack_avx",
"ruy_pack_avx2_fma",
"ruy_pack_avx512",
"ruy_system_aligned_alloc",
"ruy_profiler_instrumentation",
"ruy_profiler_profiler"
]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.extend(["m", "pthread"])
|
5,767 | 9f760c0cf2afc746a1fc19ac68d1b2f406c7efe1 | from elasticsearch import Elasticsearch, helpers
from bso.server.main.config import ES_LOGIN_BSO_BACK, ES_PASSWORD_BSO_BACK, ES_URL
from bso.server.main.decorator import exception_handler
from bso.server.main.logger import get_logger
client = None
logger = get_logger(__name__)
@exception_handler
def get_client():
global client
if client is None:
client = Elasticsearch(ES_URL, http_auth=(ES_LOGIN_BSO_BACK, ES_PASSWORD_BSO_BACK))
return client
@exception_handler
def get_doi_not_in_index(index, dois):
es = get_client()
results = es.search(
index=index,
body={"query": {"bool": {"filter": [{'terms': {'doi.keyword': dois}}]}}, "fields": ['doi'], "size": len(dois),
"_source": False},
request_timeout=60*5
)
existing_dois = set([e['fields']['doi'][0] for e in results['hits']['hits']])
not_indexed_dois = set(dois) - existing_dois
res = []
for doi in list(not_indexed_dois):
res += get_doi_not_in_index_one(index, doi)
logger.debug(f'{len(res)} dois not in index detected')
return res
@exception_handler
def get_doi_not_in_index_one(index, doi):
es = get_client()
results = es.search(
index=index,
request_cache=False,
body={"query": {"bool": {"filter": [{'term': {'doi.keyword': doi}}]}}, "fields": ['doi'], "_source": True},
request_timeout=60*5
)
existing_dois = set([e['fields']['doi'][0] for e in results['hits']['hits']])
not_indexed_dois = set([doi]) - existing_dois
return list(not_indexed_dois)
@exception_handler
def update_local_affiliations(index, current_dois, local_affiliations):
es = get_client()
logger.debug(f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois')
body = {
"script": {
"lang": "painless",
"refresh": True,
"conflicts": "proceed",
"inline": "if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations ="
" new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);"
"ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct()"
".sorted().collect(Collectors.toList())",
"params": {"local_affiliations": local_affiliations}
},
"query": {
"bool": {
"filter": [{
"terms": {
"doi.keyword": current_dois
}
}]
}
}
}
es.update_by_query(index=index, body=body, request_timeout=60*5)
@exception_handler
def delete_index(index: str) -> None:
logger.debug(f'Deleting {index}')
es = get_client()
response = es.indices.delete(index=index, ignore=[400, 404])
logger.debug(response)
@exception_handler
def update_alias(alias: str, old_index: str, new_index: str) -> None:
es = get_client()
logger.debug(f'updating alias {alias} from {old_index} to {new_index}')
response = es.indices.update_aliases({
'actions': [
{'remove': {'index': old_index, 'alias': alias}},
{'add': {'index': new_index, 'alias': alias}}
]
})
logger.debug(response)
def get_analyzers() -> dict:
return {
'light': {
'tokenizer': 'icu_tokenizer',
'filter': [
'lowercase',
'french_elision',
'icu_folding'
]
}
}
def get_filters() -> dict:
return {
'french_elision': {
'type': 'elision',
'articles_case': True,
'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu', 'quoiqu', 'lorsqu', 'puisqu']
}
}
@exception_handler
def reset_index(index: str) -> None:
es = get_client()
delete_index(index)
settings = {
'analysis': {
'filter': get_filters(),
'analyzer': get_analyzers()
}
}
dynamic_match = None
if 'bso-publications' in index:
# dynamic_match = "*oa_locations"
dynamic_match = None
elif 'publications-' in index:
dynamic_match = "*authors"
mappings = { 'properties': {} }
# attention l'analyzer .keyword ne sera pas présent pour ce champs !
for f in ['title', 'affiliations.name', 'authors.first_name', 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:
mappings['properties'][f] = {
'type': 'text',
'analyzer': 'light'
}
if dynamic_match:
mappings["dynamic_templates"] = [
{
"objects": {
"match": dynamic_match,
"match_mapping_type": "object",
"mapping": {
"type": "nested"
}
}
}
]
response = es.indices.create(
index=index,
body={'settings': settings, 'mappings': mappings},
ignore=400 # ignore 400 already exists code
)
if 'acknowledged' in response and response['acknowledged']:
response = str(response['index'])
logger.debug(f'Index mapping success for index: {response}')
@exception_handler
def load_in_es(data: list, index: str) -> list:
es = get_client()
actions = [{'_index': index, '_source': datum} for datum in data]
ix = 0
indexed = []
for success, info in helpers.parallel_bulk(client=es, actions=actions, chunk_size=500, request_timeout=60,
raise_on_error=False):
if not success:
logger.debug(f'A document failed: {info}')
else:
indexed.append(data[ix])
ix += 1
logger.debug(f'{len(data)} elements imported into {index}')
return indexed
|
5,768 | ea4a55ed17c5cc2c6f127112af636ca885159c86 | n=int(input("please enter the number : "))
for i in range(11):
print(n," X ",i," = ",n*i) |
5,769 | 86e97e7eaf0d23ccf4154b5ffc853c5aee966326 | import random
from datetime import datetime
from slackbot.bot import respond_to
from .term_model import Term, Response
from ..botmessage import botsend, botwebapi
# すでに存在するコマンドは無視する
RESERVED = (
'drive', 'manual', 'jira', 'wikipedia', 'plusplus',
'translate', '翻訳',
'weather', '天気',
'term',
'shuffle', 'help', 'choice', 'ping', 'version', 'random', 'cal',
'google', 'image', 'map', 'gadmin',
'github',
'suddendeath',
'pycamp',
'lgtm',
)
# コマンド一覧を初期化
commands = {term.command for term in Term.select()}
@respond_to('^term\s+([\w-]+)$')
@respond_to('^term\s+create\s+([\w-]+)$')
@respond_to('^term\s+add\s+([\w-]+)$')
def term_create(message, command):
"""
指定されたコマンドを生成する
"""
if command in ('list', 'help'):
return
# コマンドは小文字に統一
command = command.lower()
# 予約語の場合は実行しない
if command in RESERVED:
botsend(message, 'コマンド `${}` は予約語なので登録できません'.format(command))
return
creator = message.body['user']
term, created = Term.get_or_create(command=command, creator=creator)
if not created:
# すでに登録してあるコマンドは登録しない
botsend(message, 'コマンド `${}` はすでに登録されています'.format(command))
else:
msg = 'コマンド `${}` を作成しました。\n'.format(command)
msg += '`${} add (レスポンス)` でレスポンスを追加できます'.format(command)
botsend(message, msg)
# コマンド一覧の set に追加
commands.add(command)
@respond_to('^term\s+(drop|del|delete)\s+([\w-]+)$')
def term_drop(message, subcommand, command):
"""
指定されたコマンドを消去する
"""
# コマンドは小文字に統一
command = command.lower()
# コマンドの存在チェック
if not _available_command(message, command):
return
# 用語コマンドと応答をまとめて削除
term = Term.get(command=command)
term.delete_instance(recursive=True)
term.save()
# コマンド一覧の set から削除
commands.remove(command)
botsend(message, 'コマンド `${}` を消去しました'.format(command))
def _create_attachments_for_list(pretext, data, command=True):
"""
指定されたリストの一覧を message.send_webapi で送信するための
attachments を生成する
"""
if command:
# ['foo', 'bar', 'baz'] -> '`$far`, `$bar`, `$baz`'
list_text = ', '.join(['`${}`'.format(x) for x in data])
else:
list_text = '\n'.join([x for x in data])
attachments = [{
'pretext': pretext,
'text': list_text,
'mrkdwn_in': ['pretext', 'text'],
}]
return attachments
@respond_to('^term\s+search\s+([\w-]+)$')
def term_search(message, keyword):
"""
指定したキーワードを含む用語コマンドの一覧を返す
"""
pretext = '`{}` を含む用語コマンドの一覧です'.format(keyword)
data = []
for command in sorted(commands):
if keyword in command:
data.append(command)
attachments = _create_attachments_for_list(pretext, data)
botwebapi(message, attachments)
@respond_to('^term\s+list$')
def term_list(message):
"""
現在使用可能な用語コマンドの一覧を返す
"""
pretext = '用語コマンドの一覧です'
attachments = _create_attachments_for_list(pretext, sorted(commands))
botwebapi(message, attachments)
def _available_command(message, command):
"""
指定されたコマンドが有効化どうかを返す
"""
result = True
if command in RESERVED:
result = False
elif command not in commands:
botsend(message, 'コマンド `${}` は登録されていません'.format(command))
result = False
return result
def _send_markdown_text(message, text):
"""
指定されたtextをmarkdown形式で送信する
"""
attachments = [{
'pretext': text,
'mrkdwn_in': ['pretext'],
}]
botwebapi(message, attachments)
@respond_to('^([\w-]+)$')
def return_response(message, command):
"""
用語コマンドに登録されている応答をランダムに返す
"""
if not _available_command(message, command):
return
response_set = Term.get(command=command).response_set
if len(response_set) == 0:
msg = 'コマンド `${}` には応答が登録されていません\n'.format(command)
msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)
botsend(message, msg)
else:
response = random.choice(response_set)
_send_markdown_text(message, response.text)
@respond_to('^([\w-]+)\s+(.*)')
def response(message, command, params):
"""
用語コマンドの処理をする
"""
if not _available_command(message, command):
return
data = params.split(maxsplit=1)
subcommand = data[0]
try:
if subcommand == 'pop':
# 最後に登録された応答を削除
pop_response(message, command)
elif subcommand == 'list':
# 応答の一覧を返す
get_responses(message, command)
elif subcommand == 'search':
# 応答を検索
search_responses(message, command, data[1])
elif subcommand in ('del', 'delete', 'remove'):
# 応答を削除
del_response(message, command, data[1])
elif subcommand == 'add':
# 応答を追加
add_response(message, command, data[1])
else:
# サブコマンドが存在しない場合も追加
add_response(message, command, params)
except IndexError:
# ヘルプを返す
term_help(message)
pass
def _exist_response(command, text):
"""
指定されたコマンドに応答が登録されているかを調べて返す
"""
term = Term.get(command=command)
count = Response.select().where(Response.term == term,
Response.text == text).count()
if count == 0:
return False
else:
return True
def add_response(message, command, text):
"""
用語コマンドに応答を追加する
"""
# 登録済かどうかを確認する
if _exist_response(command, text):
reply = 'コマンド `${}` に「{}」は登録済みです'.format(command, text)
_send_markdown_text(message, reply)
return
term = Term.get(command=command)
creator = message.body['user']
# 用語を登録する
resp, created = Response.get_or_create(term=term, text=text,
creator=creator,
created=datetime.now())
resp.save()
text = 'コマンド `${}` に「{}」を追加しました'.format(command, text)
_send_markdown_text(message, text)
def del_response(message, command, text):
"""
用語コマンドから応答を削除する
"""
term = Term.get(command=command)
try:
response = Response.get(term=term, text=text)
except Response.DoesNotExist:
reply = 'コマンド `${}` に「{}」は登録されていません'.format(command, text)
_send_markdown_text(message, reply)
return
# 応答を削除する
response.delete_instance()
reply = 'コマンド `${}` から「{}」を削除しました'.format(command, text)
_send_markdown_text(message, reply)
def pop_response(message, command):
"""
用語コマンドで最後に登録された応答を削除する
"""
response_set = Term.get(command=command).response_set
# 応答が登録されていない
if len(response_set) == 0:
msg = 'コマンド `${}` には応答が登録されていません\n'.format(command)
msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)
botsend(message, msg)
return
last_response = response_set.order_by(Response.created.desc())[0]
text = last_response.text
last_response.delete_instance()
reply = 'コマンド `${}` から「{}」を削除しました'.format(command, text)
_send_markdown_text(message, reply)
def search_responses(message, command, keyword):
"""
用語コマンドに登録されている応答のうち、キーワードにマッチするものを返す
"""
term = Term.get(command=command)
pat = '%{}%'.format(keyword)
responses = Response.select().where(term == term, Response.text ** pat)
if len(responses) == 0:
botsend(message, 'コマンド `${}` に `{}` を含む応答はありません'.format(command, keyword))
else:
pretext = 'コマンド `${}` の `{}` を含む応答は {} 件あります\n'.format(
command, keyword, len(responses))
data = [x.text for x in responses]
attachments = _create_attachments_for_list(pretext, data, False)
botwebapi(message, attachments)
def get_responses(message, command):
"""
用語コマンドに登録されている応答の一覧を返す
"""
response_set = Term.get(command=command).response_set
if len(response_set) == 0:
msg = 'コマンド `${}` には応答が登録されていません\n'.format(command)
msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)
botsend(message, msg)
else:
pretext = 'コマンド `${}` の応答は {} 件あります\n'.format(
command, len(response_set))
data = [x.text for x in response_set]
attachments = _create_attachments_for_list(pretext, data, False)
botwebapi(message, attachments)
@respond_to('term\s+help')
def term_help(message):
"""
term pluginのヘルプを返す
"""
botsend(message, '''- `$term (用語)`: 用語コマンドを作成する
- `$term create (用語)`: 用語コマンドを作成する
- `$term drop (用語)`: 用語コマンドを消去する
- `$term search (キーワード)`: キーワードを含む用語コマンドの一覧を返す
- `$term list`: 用語コマンドの一覧を返す
- `$(用語)`: 用語コマンドに登録してある応答からランダムに一つ返す
- `$(用語) add (応答)`: 用語コマンドに応答を追加する
- `$(用語) del (応答)`: 用語コマンドから応答を削除する
- `$(用語) pop`: 用語コマンドの最後に登録した応答を削除する
- `$(用語) list`: 用語コマンドの応答一覧を返す
- `$(用語) search (キーワード)`: 用語コマンドのうちキーワードを含む応答一覧を返す
```
> $term create 酒
コマンド `$酒` を作成しました。
`$酒 add (レスポンス)` でレスポンスを追加できます
> $酒 add ビール
コマンド `$酒` に `ビール` を追加しました
> $酒 add ワイン
コマンド `$酒` に `ワイン` を追加しました
> $酒
ビール
```
''')
|
5,770 | 43dc69c66d94d85337c11eb4cfed48d7fdef2074 | # Generated by Django 3.0.8 on 2020-08-11 13:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipe', '0006_recipe_description'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='portions',
field=models.FloatField(default=1),
),
]
|
5,771 | 1a66e7f59ada43deb8e28b9806dc4fb9be4ae247 | # 同一目录下的引用调用还是随意导入使用的
# 跨包使用就需要使用TwoUsage里面的两种方式。
import Importex
Importex.atest()
|
5,772 | d145f4c061c8f364756012832a07adc305e35e5c | # Generated by Django 3.2.5 on 2021-07-27 17:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, null=True)),
('description', models.CharField(max_length=500)),
('priority', models.IntegerField(choices=[(0, 'unimportant'), (1, 'insignificant'), (2, 'important'), (3, 'Necessary')], default=0)),
('status', models.CharField(choices=[('deleted', 'deleted'), ('doing', 'doing'), ('done', 'done'), ('expire', 'expire'), ('archive', 'archive')], default='doing', max_length=10)),
('expired', models.DateTimeField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('category', models.ManyToManyField(default='unknown', to='todo.Category')),
],
),
]
|
5,773 | 4e715ccb4f95e7fe7e495a1181ad5df530f5a53f | from torchtext import data
from torchtext import datasets
import re
import spacy
spacy_de = spacy.load('de')
spacy_en = spacy.load('en')
url = re.compile('(<url>.*</url>)')
def tokenize_de(text):
return [tok.text for tok in spacy_de.tokenizer(url.sub('@URL@', text))]
def tokenize_en(text):
return [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))]
DE = data.Field(tokenize=tokenize_de)
EN = data.Field(tokenize=tokenize_en)
train, val = datasets.TranslationDataset.splits(
path='~/iwslt2016/de-en/', train='train.tags.de-en',
validation='IWSLT16.TED.tst2013.de-en', exts=('.de', '.en'),
fields=(DE, EN))
print(train.fields)
print(len(train))
print(vars(train[0]))
print(vars(train[100]))
DE.build_vocab(train.src, min_freq=3)
EN.build_vocab(train.trg, max_size=50000)
train_iter, val_iter = data.BucketIterator.splits(
(train, val), batch_size=3, device=0)
print(DE.vocab.freqs.most_common(10))
print(DE.vocab.size)
print(EN.vocab.freqs.most_common(10))
print(EN.vocab.size)
batch = next(iter(train_iter))
print(batch.src)
print(batch.trg)
|
5,774 | 1651865f120ba4fe440549567a8d9903e5455788 | #!/usr/bin/env python
import argparse
import sys
import logging
import vafator
from vafator.power import DEFAULT_FPR, DEFAULT_ERROR_RATE
from vafator.hatchet2bed import run_hatchet2bed
from vafator.ploidies import PloidyManager
from vafator.annotator import Annotator
from vafator.multiallelic_filter import MultiallelicFilter
from vafator.vafator2decifer import run_vafator2decifer
epilog = "Copyright (c) 2019-2021 TRON gGmbH (See LICENSE for licensing details)"
def annotator():
# set up logger
parser = argparse.ArgumentParser(description="vafator v{}".format(vafator.VERSION),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog=epilog)
parser.add_argument("--input-vcf", dest="input_vcf", action="store", help="The VCF to annotate", required=True)
parser.add_argument("--output-vcf", dest="output_vcf", action="store", help="The annotated VCF", required=True)
parser.add_argument('--bam', action='append', nargs=2,
metavar=('sample_name', 'bam_file'), default=[],
help='A sample name and a BAM file. Can be used multiple times to input multiple samples and '
'multiple BAM files. The same sample name can be used multiple times with different BAMs, '
'this will treated as replicates.')
parser.add_argument("--mapping-quality", dest="mapping_quality", action="store", type=int, default=1,
help="All reads with a mapping quality below this threshold will be filtered out")
parser.add_argument("--base-call-quality", dest="base_call_quality", action="store", type=int, default=30,
help="All bases with a base call quality below this threshold will be filtered out")
parser.add_argument('--purity', action='append', nargs=2,
metavar=('sample_name', 'purity'), default=[],
help='A sample name and a tumor purity value. Can be used multiple times to input multiple '
'samples in combination with --bam. If no purity is provided for a given sample the '
'default value is 1.0')
parser.add_argument("--tumor-ploidy", action='append', nargs=2,
metavar=('sample_name', 'tumor_ploidy'), default=[],
help='A sample name and a tumor ploidy. Can be used multiple times to input multiple '
'samples in combination with --bam. The tumor ploidy can be provided as a genome-wide '
'value (eg: --tumor-ploidy primary 2) or as local copy numbers in a BED file '
'(eg: --tumor-ploidy primary /path/to/copy_numbers.bed), see the documentation for '
'expected BED format (default: 2)')
parser.add_argument("--normal-ploidy", dest="normal_ploidy", required=False, default=2, type=int,
help="Normal ploidy for the power calculation (default: 2)")
parser.add_argument("--fpr", dest="fpr", required=False, default=DEFAULT_FPR, type=float,
help="False Positive Rate (FPR) to use in the power calculation")
parser.add_argument("--error-rate", dest="error_rate", required=False, default=DEFAULT_ERROR_RATE, type=float,
help="Error rate to use in the power calculation")
parser.add_argument("--include-ambiguous-bases", dest="include_ambiguous_bases", action='store_true',
help="Flag indicating to include ambiguous bases from the DP calculation")
args = parser.parse_args()
logging.info("Vafator starting...")
bams = {}
for sample_name, bam in args.bam:
if sample_name in bams:
bams[sample_name].append(bam)
else:
bams[sample_name] = [bam]
purities = {}
for sample_name, purity in args.purity:
if sample_name in purities:
raise ValueError('Multiple purity values provided for sample: {}'.format(sample_name))
if sample_name not in bams:
raise ValueError('Provided a purity value for a sample for which no BAM is provided: {}'.format(sample_name))
purities[sample_name] = float(purity)
tumor_ploidies = {}
for sample_name, tumor_ploidy in args.tumor_ploidy:
if sample_name in tumor_ploidies:
raise ValueError('Multiple tumor ploidy values provided for sample: {}'.format(sample_name))
if sample_name not in bams:
raise ValueError(
'Provided a tumor ploidy value for a sample for which no BAM is provided: {}'.format(sample_name))
try:
# checks if a genome-wide purity value was passed
tumor_ploidies[sample_name] = PloidyManager(genome_wide_ploidy=float(tumor_ploidy))
except ValueError:
# checks if the non float-like value is a path to an existing file
tumor_ploidies[sample_name] = PloidyManager(local_copy_numbers=tumor_ploidy)
if len(bams) == 0:
raise ValueError("Please, provide at least one bam file with '--bam sample_name /path/to/file.bam'")
try:
annotator = Annotator(
input_vcf=args.input_vcf,
output_vcf=args.output_vcf,
input_bams=bams,
mapping_qual_thr=args.mapping_quality,
base_call_qual_thr=args.base_call_quality,
purities=purities,
tumor_ploidies=tumor_ploidies,
normal_ploidy=int(args.normal_ploidy),
fpr=args.fpr,
error_rate=args.error_rate,
include_ambiguous_bases=args.include_ambiguous_bases
)
annotator.run()
except Exception as e:
logging.error(str(e))
sys.exit(-1)
logging.info("Vafator finished!")
def multiallelics_filter():
# set up logger
parser = argparse.ArgumentParser(description="vafator v{}".format(vafator.VERSION),
formatter_class=argparse.ArgumentDefaultsHelpFormatter, epilog=epilog)
parser.add_argument("--input-vcf", dest="input_vcf", action="store", help="The VCF to annotate", required=True)
parser.add_argument("--output-vcf", dest="output_vcf", action="store", help="The annotated VCF", required=True)
parser.add_argument("--tumor-sample-name", dest="tumor_sample_name", action="store",
help='The tumor sample name (will look for annotation ${SAMPLE_NAME}_af)', default='tumor')
args = parser.parse_args()
logging.info("Vafator multiallelic filter starting...")
try:
filter = MultiallelicFilter(
input_vcf=args.input_vcf,
output_vcf=args.output_vcf,
tumor_sample_name=args.tumor_sample_name
)
filter.run()
except Exception as e:
logging.error(str(e))
sys.exit(-1)
logging.info("Vafator multiallelic filter finished!")
def vafator2decifer():
parser = argparse.ArgumentParser(description='Generate input for Decifer using VCF file and HATCHet CNA file')
parser.add_argument("-V", "--vcf_file", required=True, type=str, help="single or multi-sample VCF file")
parser.add_argument("-S", "--samples", required=True, type=str,
help="comma separated list of sample name prefixes to use for VAFator annotations, "
"eg: primary_tumor,metastasis_tumor; the annotations primary_tumor_ac, primary_tumor_dp, "
"etc. will be expected to exist")
parser.add_argument("-C", "--cna_file", required=True, type=str, help="HATCHet CNA file: best.seg.ucn ")
parser.add_argument("-O", "--out_dir", required=True, default="./", type=str,
help="directory for printing files; please make unique for each patient!")
parser.add_argument("-M", "--min_depth", required=True, type=int, help="minimum depth PER sample")
parser.add_argument("-A", "--min_alt_depth", required=True, type=int,
help="minimum depth of ALT allele in at least one sample")
parser.add_argument("-F", "--min_vaf", required=True, type=float,
help="minimum VAF of ALT allele in at least one sample")
parser.add_argument("-N", "--max_CN", required=False, default=6, type=int,
help="maximum total copy number for each observed clone")
parser.add_argument("-B", "--exclude_list", required=False, default=None, type=str,
help="BED file of genomic regions to exclude")
parser.add_argument("-p", "--min_purity", required=False, default=0.0, type=float,
help="minimum purity to consider samples")
parser.add_argument("--snp_file", required=False, default=None, type=str,
help="HATCHet file containing germline SNP counts in tumor samples, baf/tumor.1bed")
args = parser.parse_args()
run_vafator2decifer(args)
def hatchet2bed():
parser = argparse.ArgumentParser(description='Generate input for Decifer using VCF file and HATCHet CNA file')
parser.add_argument("-i", "--input-file", required=True, type=str, help="input *.ucn hatchet file")
parser.add_argument("-o", "--output-prefix", required=True, type=str,
help="output BED file prefix, one file will be created per sample in the input with the "
"average tumor copy number in each segment")
args = parser.parse_args()
run_hatchet2bed(input_file=args.input_file, output_prefix=args.output_prefix)
|
5,775 | 31f302775ef19a07137622ef9d33495cc2a8eed2 | #pymongo and mongo DB search is like by line inside in a document then it moves to the other document
from enum import unique
import pymongo
from pymongo import MongoClient
MyClient = MongoClient() # again this is connecting to deault host and port
db = MyClient.mydatabase #db is a variable to store the database my database
users = db.users #this is the table
db.users.create_index([("names" ,pymongo.ASCENDING)]) #to create an in dex for a whole row
|
5,776 | 0bc72a558b9bd3b5f74ce5dfce586dd66c579710 | import sys
import os
import json
from collections import OrderedDict
from config import folder, portfolio_value
from datetime import datetime
import logging
# Logger setup
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def valid_date(datestring):
""" Determine if something is a valid date """
try:
datetime.strptime(datestring, '%Y-%m-%d')
return True
except ValueError as e:
logger.info('not a valid date: ' + e)
return False
def portfolio_value_on_date(date):
""" Retrieve the total portfolio value on a given data """
if valid_date(date):
try:
with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file:
data = json.loads(read_file.read(),
object_pairs_hook=OrderedDict)
return data[date]['daily_value']
except Exception:
logger.critical('couldnt read portfolio.json')
return 'something went horribly wrong trying to open the portfolio.json'
else:
return 'error on date format or date not in range'
def net_gain_loss_percentage():
""" Retrieve the net gain percentage in total value of portfolio at the end of the backtest """
try:
with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file:
data = json.loads(read_file.read(),
object_pairs_hook=OrderedDict)
net_gain_loss = data['final_portfolio'] / portfolio_value
logger.info('net gain loss is ' + net_gain_loss)
if net_gain_loss > 0:
return 'Your net gain is ' + str(net_gain_loss) + '%'
elif net_gain_loss == 0:
return 'You broke even'
else:
return 'Your net loss is ' + str(net_gain_loss) + '%'
except Exception:
logger.critical('couldnt read portfolio.json')
return 'something went horribly wrong trying to open the portfolio.json'
def max_drawdown():
""" Maximum percentage drawdown experienced in the backtest """
try:
with open(folder + 'portfolio_balance.json', encoding='utf-8') as read_file:
data = json.loads(read_file.read(),
object_pairs_hook=OrderedDict)
def daily_price():
""" Record daily volume in a generator """
for item in data:
if valid_date(item):
yield data[item]['daily_value']
# since the daily portfolio is already a running tally
# we just need to find the max and the min between them
max_price = max(daily_price())
min_price = min(daily_price())
draw = max_price / min_price
logger.info('draw percent: ' + draw)
return 'Max Drawdown is ' + str(draw) + '%'
except Exception:
logger.critical('couldnt read portfolio.json')
return 'something went horribly wrong trying to open the portfolio.json'
|
5,777 | 23a4ca8eec50e6ab72be3f1b1077c61f676b3cce | """Command 'run' module."""
import click
from loguru import logger
from megalus.main import Megalus
@click.command()
@click.argument("command", nargs=1, required=True)
@click.pass_obj
def run(meg: Megalus, command: str) -> None:
"""Run selected script.
:param meg: Megalus instance
:param command: command/script to execute
:return: None
"""
line_to_run = meg.config_data["defaults"].get("scripts", {}).get(command, None)
if not line_to_run:
logger.warning('Command "{}" not found in configuration file.'.format(command))
else:
meg.run_command(line_to_run)
|
5,778 | 488c111c051796b481794678cb04108fcf11ac39 | import sys
import bisect
t = int(raw_input())
for i in xrange(1, t+1):
n, k = map(int, raw_input().strip().split())
s = [n]
for j in xrange(k):
num = s.pop()
if num % 2 != 0:
ls = num/2
lr = num/2
if ls != 0:
bisect.insort_left(s,ls)
bisect.insort_left(s,lr)
else:
ls = num/2 -1
lr = num/2
if ls != 0:
bisect.insort_left(s,ls)
bisect.insort_left(s,lr)
else:
bisect.insort_left(s,lr)
print "Case #{}: {} {}".format(i, lr, ls)
|
5,779 | 31304c3b0f41b848a36115f1ef098a2104c170ac | import itertools
import math
def score(stack):
syrup = math.pi * max(x[0] for x in stack)**2
for item in stack:
syrup += 2*math.pi*item[0]*item[1]
return syrup
def ring_score(item):
return 2*math.pi*item[0]*item[1]
def solve(pancakes, k):
return max(itertools.combinations(pancakes, k), key=score)
"""
x = [(1, 10), (2, 9), (3, 8), (4, 7), (5, 6), (6, 5), (7, 4), (8, 3), (9, 2), (10, 1)]
r = solve(x, 5)
print(r, score(r))
"""
with open('A-large.in') as infile:
with open('A-large.out', 'w') as outfile:
cases = int(next(infile))
for case in range(1, cases+1):
n, k = map(int, next(infile).split())
pancakes = []
for _ in range(n):
pancakes.append(tuple(map(int, next(infile).split())))
pancakes.sort(key=ring_score, reverse=True)
preliminary = pancakes[:k]
remaining = pancakes[k:]
curr_score = score(preliminary)
if remaining:
other_score = score(preliminary[:-1] + [max(remaining)])
if other_score > curr_score:
curr_score = other_score
print(case, curr_score)
print("Case #{}: {}".format(case, curr_score), file=outfile)
|
5,780 | cb9ea8791009a29a24a76bc2b161e7f8599fec1b | """
definition of a sensor
"""
import datetime
import pytz
class tlimit:
def __init__(self, name, text):
self.name = name
self.text = text
time_limit = [
tlimit("All", "All Data"),
tlimit("day", "Current day"),
tlimit("24hours", "Last 24 hours"),
tlimit("3days", "Three last days"),
tlimit("7days", "Seven last days"),
tlimit("month", "Current month"),
tlimit("30days", "Last 30 days"),
tlimit("year", "Current year"),
]
tz = pytz.timezone("Europe/Paris")
utz = pytz.timezone("UTC")
def request_meteodata(request: str):
"""
execute a request in the MeteoData database
:param request: the request to execute
:return: the feteched result
"""
import MySQLdb
import platform
if platform.system() == "Windows":
MySQLParams = {
'host' : "192.168.5.1",
'user' : "MeteoRobot",
'passwd': "robot",
'db' : "MeteoData"
}
else:
MySQLParams = {
'host' : "localhost",
'user' : "MeteoRobot",
'passwd': "robot",
'db' : "MeteoData"
}
try:
con = MySQLdb.connect(**MySQLParams)
cur = con.cursor()
cur.execute(request)
con.commit()
data = cur.fetchall()
except MySQLdb.Error as err:
print(str(err))
return []
except Exception as err:
print(str(err))
return []
con.close()
return data
class SensorData:
date = datetime.datetime(1970, 1, 1, 0, 0, 0)
server_room_temperature = 0.0
server_room_humidity = 0.0
def __init__(self, d, t, h):
self.date = d
self.server_room_temperature = t
self.server_room_humidity = h
def __str__(self):
return str(self.date) + " {:.2f}°C {:.1f}%".format(self.server_room_temperature, self.server_room_humidity)
def get_data(last):
"""
get the database data on the last period
:param last: duration of the period
:return: the data
"""
Table = "ServerRoom"
filter = ""
if last == "lastone":
data = request_meteodata("SELECT * from `ServerRoom` ORDER BY id DESC LIMIT 1 ")
if len(data) == 0:
return [SensorData(datetime.datetime.now(), 0, 0)]
res = []
for d in data:
res.append(SensorData(d[1], d[2], d[3]))
return res
if last != "All":
limit = datetime.datetime.now().astimezone(utz)
if last == "24hours":
limit -= datetime.timedelta(hours=24)
else:
limit = limit.replace(hour=0, minute=0, second=0, microsecond=0)
if last == "3days":
limit -= datetime.timedelta(days=3)
elif last == "7days":
limit -= datetime.timedelta(days=7)
elif last == "month":
limit = limit.replace(day=1)
elif last == "30days":
limit -= datetime.timedelta(days=30)
elif last == "year":
limit = limit.replace(day=1, month=1)
filter = " WHERE `date` > '" + str(limit) + "'"
order = " ORDER BY `date` ASC"
req = "SELECT * FROM `" + Table + "`" + filter + order
data = request_meteodata(req)
if len(data) == 0:
print("no data: get all")
req = "SELECT * FROM `" + Table + "`" + order
data = request_meteodata(req)
res = []
for d in data:
res.append(SensorData(d[1], d[2], d[3]))
return res
def smooth_data(data, smooth_width):
"""
smooth the curve plotted by data
:param data: the input data
:param smooth_width: the width of the mobile average
:return: the smoothed data
"""
out = []
for i, dat in enumerate(data):
low = max(0, i - smooth_width)
high = min((len(data) - 1), low + 2 * smooth_width)
n = 0
s_temperature = 0
s_humidity = 0
for d in data[low:high]:
n += 1
s_temperature += d.server_room_temperature
s_humidity += d.server_room_humidity
s_temperature /= float(max(1, n))
s_humidity /= float(max(1, n))
out.append(SensorData(dat.date, s_temperature, s_humidity))
return out
def resample_data(data, entity_number):
"""
limit the amount of dat
:param data: input data
:param entity_number: maximum number of entity in output
:return: he resampled data
"""
if len(data) <= entity_number:
# not that many entity: nothing to do
return data
interval = int(len(data)/entity_number + 1)
out = []
for i, dat in enumerate(data):
if i % interval == 0:
out.append(dat)
return out
class displaydata:
"""
lass to encapsulate the meteo result to display
"""
def __init__(self):
self.temperature = "0"
self.temp_tendance = ""
self.temp_max = "0"
self.temp_min = "0"
self.temp_max_date = "0"
self.temp_min_date = "0"
self.temp_mean = "0"
self.humidity = "0"
self.hum_tendance = ""
self.hum_max = "0"
self.hum_min = "0"
self.hum_max_date = "0"
self.hum_min_date = "0"
self.hum_mean = "0"
def __tendance(self, dt, seuil):
if len(dt) < 3:
return "mdi-arrow-left-right-bold-outline tgreen"
if len(dt) > 20:
p1 = dt[-20]
p2 = dt[-10]
p3 = dt[-1]
else:
p1 = dt[0]
p2 = dt[len(dt)/2]
p3 = dt[-1]
if abs(p3 - p2) < seuil:
return "mdi-arrow-left-right-bold-outline tgreen"
elif (abs(p2 - p1) < seuil and p3 > p2) or (abs(p3 - p2) < seuil and p2 > p1):
return "mdi-arrow-top-right-bold-outline torange"
elif (abs(p2 - p1) < seuil and p3 < p2) or (abs(p3 - p2) < seuil and p2 < p1):
return "mdi-arrow-bottom-right-bold-outline tlightblue"
elif p1 > p2 > p3:
return "mdi-arrow-bottom-right-bold-outline tlightblue"
elif p1 < p2 < p3:
return "mdi-arrow-up-bold-outline tred"
else:
return "mdi-arrow-left-right-bold-outline tgreen"
def compute_from_data(self, dta, dha, date):
self.temp_max = -2000
self.temp_min = 2000
self.temp_mean = 0
for i, t in enumerate(dta):
self.temp_mean += t
if t > self.temp_max:
self.temp_max = t
self.temp_max_date = date[i]
if t < self.temp_min:
self.temp_min = t
self.temp_min_date = date[i]
if len(dta) > 0:
self.temp_mean = "{:.2f}".format(self.temp_mean / float(len(dta)))
self.temp_max = "{:.2f}".format(self.temp_max)
self.temp_min = "{:.2f}".format(self.temp_min)
self.temperature = "{:.2f}".format(dta[-1])
self.temp_tendance = self.__tendance(dta, 0.05)
self.hum_max = -2000
self.hum_min = 2000
self.hum_mean = 0
for i, t in enumerate(dha):
self.hum_mean += t
if t > self.hum_max:
self.hum_max = t
self.hum_max_date = date[i]
if t < self.hum_min:
self.hum_min = t
self.hum_min_date = date[i]
if len(dha) > 0:
self.hum_mean = "{:.2f}".format(self.hum_mean / float(len(dha)))
self.hum_max = "{:.2f}".format(self.hum_max)
self.hum_min = "{:.2f}".format(self.hum_min)
self.hum_tendance = self.__tendance(dha, 0.05)
self.humidity = "{:.2f}".format(dha[-1])
def getData(ll, smoo):
data = resample_data(get_data(ll), 1000)
if smoo > 0:
data = smooth_data(data, smoo)
print(len(data))
dates = []
temperatures = []
humidity = []
i = 0
for sset in data:
i += 1
dates.append(sset.date.strftime("%Y-%m-%d %H:%M:%S"))
temperatures.append(sset.server_room_temperature)
humidity.append(sset.server_room_humidity)
d = displaydata()
d.compute_from_data(temperatures, humidity, dates)
return dates, temperatures, humidity, d
def get_actual_data():
data = get_data("lastone")
return "{:.2f}".format(data[0].server_room_temperature), "{:.2f}".format(data[0].server_room_humidity)
|
5,781 | 720d37e35eb335cc68ff27763cfe5c52f76b98d2 | /home/sbm367/anaconda3/lib/python3.5/types.py |
5,782 | a0284eba1a0e6c498f240068c586e7f8b79cd86c | """Exercise 9c"""
import time
import numpy as np
import matplotlib.pyplot as plt
from plot_results import plot_2d
from run_simulation import run_simulation
from simulation_parameters import SimulationParameters
def exercise_9c(world, timestep, reset):
"""Exercise 9c"""
n_joints = 10
Rhead = 0.44
Rtail = 0.23
parameter_set = [
SimulationParameters(
simulation_duration=15,
drive=4.0,
amplitudes=None,
phase_lag=None,
turn=None,
amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],
backward = None,
frequency = 1,
# ...
)
#for Rhead in np.linspace(0.2,0.5,10)
#for Rtail in np.linspace(0.5,0.2,10)
# for amplitudes in ...
# for ...
]
# Grid search
for simulation_i, parameters in enumerate(parameter_set):
reset.reset()
run_simulation(
world,
parameters,
timestep,
int(1000*parameters.simulation_duration/timestep),
logs="./logs/9c/simulation_{}.npz".format(simulation_i)
)
plot_9c(parameter_set)
def main():
n_joints = 10
#Rhead = 0.44
#Rtail = 0.27
parameter_set = [
SimulationParameters(
simulation_duration=15,
drive=4.0,
amplitudes=None,
phase_lag=None,
turn=None,
amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],
backward = None,
frequency = 1,
# ...
)
for Rhead in np.linspace(0.2,0.5,10)
for Rtail in np.linspace(0.5,0.2,10)
# for amplitudes in ...
# for ...
]
plot_9c(parameter_set)
def plot_9c(parameter_set):
results_vel = np.zeros([len(parameter_set),3])
results_en = np.zeros([len(parameter_set),3])
ratio_vel_en = np.zeros([len(parameter_set),3])
sal_pos_t = []
sal_pos_t_bad = []
t = time.time()
#path = os.path.dirname(__file__)
path = 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'
print(path)
for i in range(len(parameter_set)):
with np.load(path+'/logs/9c/simulation_'+str(i)+'.npz',allow_pickle=True) as data:
#? initialisation for the computation
position = data["links"][:, 0, :]
n_steps = len(position)
timestep = float(data["timestep"])
results_vel[i][0] = data["amplitude_gradient"][0]
results_vel[i][1] = data["amplitude_gradient"][1]
results_en[i][:2] = results_vel[i][:2]
ratio_vel_en[i][:2] = results_vel[i][:2]
#! Velocity
begin_step = (int)(4/timestep)
vel = (position[n_steps-1,:] - position[begin_step,:])**2
results_vel[i][2] = np.sqrt(np.sum(vel))/((n_steps-begin_step)*timestep)
#! Energy
joint_vel = data["joints"][begin_step:,:,1]
joint_tor = data["joints"][begin_step:,:,3]
energy = joint_vel * joint_tor
results_en[i][2] = np.log10(np.mean(np.sum(energy,1)))
#! Ratio
ratio_vel_en[i][2] = results_vel[i][2]/results_en[i][2]
print ('Time elapsed for the velocity plot' + str(time.time()-t))
plt.figure("Velocity")
plot_2d(results_vel,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Velocity [m/s]'])
plt.figure("Energy")
plot_2d(results_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', '$log_{10}(Energy)$[J]'])
plt.figure("Ratio")
plot_2d(ratio_vel_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Ratio V/E $[s\cdot kg^{-1}\cdot m^{-1}]$'])
t = time.time()
plt.show()
if __name__ == '__main__':
main()
|
5,783 | 14b9927435536a4b29b0930791ab4525acd80bc9 | from flask import Flask, render_template, jsonify, request, make_response #BSD License
import requests #Apache 2.0
#StdLibs
import json
from os import path
import csv
###################################################
#Programmato da Alex Prosdocimo e Matteo Mirandola#
###################################################
application = Flask(__name__)
@application.route("/") # Index
def index():
return make_response(render_template("index.html"))
@application.route("/getGraph", methods=["POST", "GET"])
def getgraph():
#Metodo POST: responsabile di ottnere i dati in formato json dal server.
#Il server si aspetta un campo data che contenga il nome di un file esistente nel server nella cartella /static/json/
#Se non trova il file da un 404
#Se non trova il campo data da un 400
if request.method == "POST":
if('data' in request.form):
if(path.exists("static/jsons/" + request.form['data'] + ".json")):
with open("static/jsons/" + request.form['data'] + ".json", "r") as file:
jsonStr = file.read()
jsonStr = json.loads(jsonStr)
return jsonify(jsonStr)
else:
return "<h1>404 NOT FOUND"
else:
return "<h1>400 BAD REQUEST"
else:
#Metodo GET:
#si aspetta un campo graph che contenga uno dei nomi sotto presenti
#nel caso di mf e emig si aspetta anche un secondo campo che specifichi
#l'università o la provincia-
#Inoltre, iscrittiAtn e mf POSSONO (ma non devono necessariamente) avere
#un campo aggiuntivo che filtri i dati di uno specifico anno o per uno specifico sesso2
if 'graph' in request.args:
# HBar Graph per la paga oraria provinciale a seconda del livello di istruzione
if(request.args['graph'] == "pagaOra"):
return make_response(render_template("graphs/pagaOra.html"))
# Line Graph per gli iscritti alle università nel veneto per anno
elif(request.args['graph'] == "iscrittiAtn"):
if('sex' in request.args):
return make_response(render_template("graphs/iscrittiAtn.html", sex=int(request.args['sex'])))
else:
return make_response(render_template("graphs/iscrittiAtn.html", sex=0))
elif(request.args['graph'] == "disoccupati"):
return make_response(render_template("graphs/disoccupatiGraph.html"))
elif(request.args['graph'] == "iscrittiProv"):
return make_response(render_template("graphs/iscrittiProv.html"))
# Donut Graph per la distribuzione di m/f nelle università in veneto
elif(request.args['graph'] == "mf" and 'atn' in request.args):
dir = "graphs/mf/mf" + request.args['atn'] + ".html"
print(dir)
if(path.exists("templates/" + dir)):
if('year' in request.args):
return make_response(render_template(dir, year=int(request.args['year'])))
else:
return make_response(render_template(dir, year=0))
# Polar Area Graph per gli studenti emigrati in altre regioni
elif(request.args['graph'] == "emig" and "prov" in request.args):
dir = "graphs/emig/iscrittiEmig" + \
request.args['prov'] + ".html"
if(path.exists("templates/" + dir)):
return make_response(render_template(dir))
return "<h1>400 BAD REQUEST"
#Per aggiornare i dataset:
#A causa di un errore nella creazione del file riguardante gli iscritti per ogni ateneo da parte del MIUR il file
#riguardante gli iscritti per ateneo non sono scaricabili dinamicamente e va sostituito manualmente.
#Allo stesso modo, i dati ottenuti tramite l'istat non sono scaricabili dinamicamente tramite la api in quanto
#le sue prestazioni sono limitate (oltre a non permettere i filtri necessari per ottenere i file).
#Il dataset delle provincie viene aggiornato automaticamente ogni settimana. Gli altri vanno sostituiti manualmente.
#I dataset statici vanno inseriti nella cartella /static/notUpdating/
#Il dataset riguardante gli iscritti per ateneo va scaricato a questo link http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/32d26e28-a0b5-45f3-9152-6072164f3e63/download/iscrittixateneo.csv
#e rinominato iscrittiAteneo.csv
#Il dataset riguardante gli iscritti emigrati dalla regione è stato creato manualmente a partire da altri dati e non può essere aggiornato
#I dataset riguardanti la percentuale di disoccupazione e la retribuzione oraria media sono reperibili a questo portale http://dati.istat.it/
#Sfortunatamente la funzione di ricerca del sito è molto lenta e limitata, comunque sia i due data set sono "Tasso di Disoccupazione - Dati Provinciali"
#e "Retribuzione oraria media per titolo di studio". In entrambi i casi, è necessario filtrare i risultati per le sole provincie del Veneto.
#I file vanno rinominati retribuzioneMedia.csv e taxDisocc.csv
#Fortunatamente, si aggiornano solo annualmente
@application.route("/doUpdate")
def updateData():
#File iscritti per ateneo
#I dati vengono inseriti in un dizionario come array, il formato è più sotto
with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiAteneo = {
'Venezia CF': [],
'Verona': [],
'Venezia IUAV': [],
'Padova': []}
for row in data:
row = row[0].split(';')
if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1] == 'Venezia Iuav' or row[1] == 'Verona':
tmp = row[1]
if 'Venezia C' in row[1]:
tmp = 'Venezia CF'
if tmp == 'Venezia Iuav':
tmp = 'Venezia IUAV'
iscrittiAteneo[tmp].append(
row[0] + ';' + row[3] + ';' + row[4])
iscrittiAteneoJson = json.dumps(iscrittiAteneo)
# Formato: {"nomeAteneo" : ["annoScolastico;numeroIscrittiMaschi;numeroIscrittiFemmine",...,...],...,...}
open('static/jsons/iscrittiAteneo.json',
"wb").write(iscrittiAteneoJson.encode())
# File iscritti emigrati in altre regioni
with open('static/notUpdating/iscrittiEmig.json', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = json.load(f)
iscrittiEmig = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in reader['records']:
if row[4].lower() == 'padova' or row[4].lower() == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower() == 'verona' or row[4].lower() == 'treviso' or row[4].lower() == 'belluno' or row[4].lower() == 'rovigo':
iscrittiEmig[row[4].lower()].append(
row[1] + ';' + row[4] + ';' + row[2] + ';' + str(row[6]))
lista = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []
}
count = 0
for key in iscrittiEmig.keys():
while len(iscrittiEmig[key]) > 2:
tmp = iscrittiEmig[key].pop(0).split(';')
if count == 0:
count = int(tmp[3])
tmp2 = iscrittiEmig[key][0].split(';')[2]
if tmp[2] == tmp2:
count += int(tmp[3])
else:
lista[tmp[1].lower()].append(
tmp[0] + ';' + tmp[2] + ';' + str(count))
count = 0
iscrittiEmigJson = json.dumps(lista)
# Formato: {"cittàInMinuscolo" : ["annoScolastico;CittàDiProvenienzaInMaiuscolo;RegioneDiEsodo;NumeroStudenti",...,...],...,...}
open('static/jsons/iscrittiEmig.json',
"wb").write(iscrittiEmigJson.encode())
# File paga media oraria per titolo di studio
with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
retribuzione = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] == 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or row[1] == 'Belluno' or row[1] == 'Rovigo') and (row[5] != 'totale') and 'media)' in row[3]:
# La lista è divisa in titolo di studio, reddito medio orario
tmp = row[5]
if 'nessun' in tmp:
tmp = 'nessuno'
retribuzione[row[1]].append(tmp + ';' + str(row[8]))
retribuzioneMediaJson = json.dumps(retribuzione)
# Formato: {"nomeCittà" : ["laurea;media", "diploma;media", "nulla;media"],...,...}
open('static/jsons/retribuzioneMedia.json',
"wb").write(retribuzioneMediaJson.encode())
# File %disoccupazione
with open('static/notUpdating/taxDisocc.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
lavoro = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[7] == '15-24 anni') and row[5] != 'totale':
if row[5] == 'femmine':
lavoro[row[1]].append(str(row[10]))
else:
lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))
for key in lavoro.keys():
tmp = lavoro[key][0] + ';' + lavoro[key][2]
tmp2 = lavoro[key][1] + ';' + lavoro[key][3]
lavoro[key].clear()
lavoro[key].append(tmp)
lavoro[key].append(tmp2)
disoccupazioneJson = json.dumps(lavoro)
# Formato: {"nomeCittà" : ["anno;percMaschi;percFemmine","anno;percMaschi;percFemmine"x],...,...}
open('static/jsons/disoccupazione.json',
"wb").write(disoccupazioneJson.encode())
# File iscritti totali per provincia
iscritti = requests.get(
'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv', allow_redirects=True)
open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content) #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
with open('static/iscrittiProvincia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiProvincia = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in data:
row = row[0].split(';')
if row[2].lower() == 'padova' or row[2].lower() == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower() == 'verona' or row[2].lower() == 'treviso' or row[2].lower() == 'belluno' or row[2].lower() == 'rovigo':
iscrittiProvincia[row[2].lower()].append(
str(row[0]) + ';' + str(int(row[3])+int(row[4])))
iscrittiProvinciaJson = json.dumps(iscrittiProvincia)
# Formato: {"nomeCittà" : ["anno;numero"],...,...}
open('static/jsons/iscrittiProvincia.json',
"wb").write(iscrittiProvinciaJson.encode())
return "200"
#########
#Startup#
#########
#Ad ogni riavvio forzato dell'applicazione, i dati vengono aggiornati (ci impiega qualche secondo al maassimo)
updateData()
if __name__ == '__main__':
application.run(debug=True, port=80)
|
5,784 | 06e01dce7e2342be994569099ed51d1fe28eea1c | from django.db import models
# Create your models here.
class Task(models.Model):
level = models.PositiveSmallIntegerField()
topic = models.CharField(max_length=100)
content = models.TextField()
correct_answer = models.CharField(max_length=50)
class Answer(models.Model):
content = models.TextField()
user = models.CharField(max_length = 100, null = True)
task = models.ForeignKey(
'Task',
on_delete=models.CASCADE,
)
|
5,785 | 9340c9055a7e0d74d232d878b43d91a3e6cd32e5 | from tkinter import *
import tkinter.messagebox
import apikey
import tinify
class Setting_GUI(Toplevel):
def __init__(self,parent):
super().__init__()
self.parent = parent
key = "Input your key here"
self.keystringvar = StringVar()
self.wm_title("Settings - TingImage")
self.wm_attributes("-topmost", 1)
title = Label(self, text="Settings")
try:
key = apikey.loadkey()
statustext = "continue with this key"
except Exception as e:
statustext = e
statuslabel = Label(self, text=statustext)
self.keystringvar.set(key)
keytext = Entry(self, textvariable=self.keystringvar, width=40)
continuebutton = Button(self, text="Continue",command=self.loadkey, width=12)
title.grid(row=0, sticky=W + E + N + S)
statuslabel.grid(row=1, sticky=W + E + N + S)
keytext.grid(row=2, sticky=W + E + N + S)
continuebutton.grid(row=3,padx=5,pady=5)
def loadkey(self):
key = self.keystringvar.get()
try:
apikey.inputkey(key)
except Exception as e:
tkinter.messagebox.showerror("Error", e)
else:
tkinter.messagebox.showinfo("Success", "Update API-Key successful!")
self.parent.cont.set(str(tinify.compression_count))
self.destroy() |
5,786 | ea918bdf96572b38461dc1810bd0b8c16efd0f0d | # Generated by Django 3.0.7 on 2020-07-05 07:34
from django.db import migrations, models
import location_field.models.plain
class Migration(migrations.Migration):
dependencies = [
('driver', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='driver',
name='address',
),
migrations.AddField(
model_name='driver',
name='city',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='driver',
name='image',
field=models.ImageField(default='', upload_to='mechanic_img'),
preserve_default=False,
),
migrations.AddField(
model_name='driver',
name='location',
field=location_field.models.plain.PlainLocationField(default='', max_length=63),
preserve_default=False,
),
migrations.AlterField(
model_name='driver',
name='first_name',
field=models.CharField(max_length=150),
),
migrations.AlterField(
model_name='driver',
name='last_name',
field=models.CharField(max_length=150),
),
]
|
5,787 | 3acd592594ae4f12b9b694aed1aa0d48ebf485f5 | import glob
import json
import pickle
import gzip
import os
import hashlib
import re
import bs4, lxml
import concurrent.futures
URL = 'http://mangamura.org'
def _map(arg):
key, names = arg
size = len(names)
urls = set()
for index, name in enumerate(names):
html = gzip.decompress(open('htmls/' + name, 'rb').read()).decode()
soup = bs4.BeautifulSoup(html, 'lxml')
for a in soup.findAll('a', href=True):
url = a.get('href')
if len(url) >= 2 and url[0] == '/':
url = URL + url
if URL not in url:
continue
if re.search(r'kai_pc_viewer\?p=', url) is None:
continue
print(f'{key} {index}/{size} {url}')
urls.add(url)
return urls
args = {}
for index, name in enumerate([name.split('/').pop() for name in glob.glob('htmls/*')]):
key = index%12
if args.get(key) is None:
args[key] = []
args[key].append( name )
args = [(key,names) for key, names in args.items()]
urls = set()
with concurrent.futures.ProcessPoolExecutor(max_workers=12) as exe:
for _urls in exe.map(_map,args) :
[urls.add(url) for url in _urls]
open('pc_viewer_urls.pkl.gz', 'wb').write(gzip.compress(pickle.dumps(urls)))
|
5,788 | 41e981e2192b600cdf9c9b515fe9f397cd1b8826 | import re
def make_slug(string):
print(re.sub(^'\w','',string))
make_slug('#$gejcb#$evnk?.kjb')
|
5,789 | 17b0baef5e366d70ea393259df1965e75b7d12e1 | #!/usr/bin/env python
# made for comparing unfiltered and filtered scorefiles for Rosetta enzdes post analysis
import argparse
import collections
import re
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def data_from_sc_file(axes, f, uf, true_max):
"""initializes two dictionaries and poplulates them based on -f and -u options"""
f_combo_dict = collections.defaultdict(list)
uf_combo_dict = collections.defaultdict(list)
max_x = -10000
max_y = -10000
min_x = 10000
min_y = 10000
for fileType in [uf, f]:
for i, item in enumerate(fileType):
with open(item) as f:
header = f.readline().split()
indices = [header.index(a) for a in axes]
for line in f:
line_list = line.split()
if (not line_list) or (line_list[0].startswith("#")) or (line_list[0][0].isalpha()):
continue
try:
desc_str = line_list[indices[-1]]
found_desc = re.search('A([0-9]+)_P([0-9]+)', desc_str).group()
except AttributeError:
continue
point_list = [line_list[i] for i in indices[:-1]]
point_tuple = tuple(map(float, point_list))
if point_tuple[0] > max_x:
max_x = point_tuple[0]
if point_tuple[0] < min_x:
min_x = point_tuple[0]
if point_tuple[1] > max_y:
max_y = point_tuple[1]
if point_tuple[1] < min_y:
min_y = point_tuple[1]
if not true_max:
if max_x > 0:
max_x = 0
if max_y > 0:
max_y = 0
if fileType == uf:
uf_combo_dict[found_desc].append(point_tuple)
else:
f_combo_dict[found_desc].append(point_tuple)
return uf_combo_dict, f_combo_dict, min_x, max_x, min_y, max_y
def gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total):
"""makes pdf of plots - one plot for each A[0-9]_P[0-9]"""
with PdfPages(name) as pdf:
total_xuf = []
total_yuf = []
total_xf = []
total_yf = []
for entry in uf_dict:
print 'Making plot for ' + entry
xuf, yuf = zip(*uf_dict[entry])
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(xuf, yuf, c='#ad4851', marker='o', label='initial structures')
try:
xf, yf = zip(*f_dict[entry])
ax1.scatter(xf, yf, c='orange', marker='x', label='selected structures')
except ValueError:
xf = []
yf = []
plt.legend(loc='upper right')
plt.title(entry, fontsize=30)
plt.xlim(min_x, max_x)
plt.ylim(min_y, max_y)
plt.xlabel(axes[0], fontsize=20)
plt.ylabel(axes[1], fontsize=20)
pdf.savefig(fig)
plt.close()
if total:
total_xuf.extend(xuf)
total_yuf.extend(yuf)
total_xf.extend(xf)
total_yf.extend(yf)
if histogram:
bins = np.linspace(min_y, max_y, num=10)
plt.hist(yuf, bins, alpha=0.5, color='b', label='initial structures')
try:
plt.hist(yf, bins, alpha=0.5, color='orange', label='selected structures')
except ValueError:
pass
plt.legend(loc='upper right')
plt.title(entry, fontsize=30)
plt.xlabel(axes[1], fontsize=20)
plt.ylabel('Frequency', fontsize=20)
pdf.savefig()
plt.close()
if total:
print 'Making composite plot'
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(total_xuf, total_yuf, c='#ad4851', marker='o', label='initial structures')
ax1.scatter(total_xf, total_yf, c='orange', marker='x', label='selected structures')
plt.legend(loc='upper right')
plt.title('Composite Plot', fontsize=30)
plt.xlim(min_x, max_x)
plt.ylim(min_y, max_y)
plt.xlabel(axes[0], fontsize=20)
plt.ylabel(axes[1], fontsize=20)
pdf.savefig(fig)
plt.close()
def main(x_axis, y_axis, filtered, unfiltered, name, histogram, total, true_max):
"""create axes variable and calls previous functions"""
axes = [x_axis, y_axis, 'description']
uf_dict, f_dict, min_x, max_x, min_y, max_y = data_from_sc_file(axes, filtered, unfiltered, true_max)
gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generates scatter plot of data from rosetta score files")
parser.add_argument("-x", "--xaxis",
help="criterion to be plotted on x-axis (default: total_score)",
default='total_score')
parser.add_argument("-y", "--yaxis",
help="criterion to be plotted on y-axis (default: SR_1_total_score)",
default='SR_1_total_score')
parser.add_argument("-n", "--name", default='postProcessPlot.pdf',
help='name of output pdf (default: postProcessPlot.pdf')
parser.add_argument("-b", "--histogram", action="store_true",
help="turn on histogram for y-axis parameter")
parser.add_argument("-c", "--composite", action="store_true",
help='make a composite plot that combines all subplots')
parser.add_argument("-t", "--true_max", action="store_true",
help='make plots with true maximum - will not cap max at 0')
requiredO = parser.add_argument_group('required arguments')
requiredO.add_argument("-s", "--selected", nargs='*', required=True,
help="one or more filtered score files from which data is pulled")
requiredO.add_argument("-i", "--initial", nargs='*', required=True,
help="one or more unfiltered score files from which data is pulled")
args = parser.parse_args()
main(args.xaxis, args.yaxis, args.selected, args.initial, args.name, args.histogram, args.composite, args.true_max)
|
5,790 | 96778a238d8ed8ae764d0cf8ec184618dc7cfe18 |
if __name__== '__main__':
with open('./input/day6', 'r') as f:
orbit_input = [l.strip().split(")") for l in f.readlines()]
planets = [planet[0] for planet in orbit_input]
planets1 = [planet[1] for planet in orbit_input]
planets = set(planets+planets1)
system = {}
print(orbit_input)
for inp in orbit_input:
system[inp[1]] = inp[0]
def compute_orbits(planet, system):
if planet == 'COM':
return 0
next_p = system[planet]
return 1 + compute_orbits(next_p, system)
num_orb = 0
for planet in planets:
num_orb = num_orb + compute_orbits(planet, system)
print(num_orb)
|
5,791 | 8fecfdf4b3772e5304f0b146317f94cdbd7fbd53 | from otree.api import Currency as c, currency_range
from . import models
from ._builtin import Page, WaitPage
from .models import Constants
class Introduction(Page):
timeout_seconds = 60
class Welcome(Page):
timeout_seconds = 60
class Priming(Page):
form_model = models.Player
form_fields = ['text']
class Eye1(Page):
form_model = models.Player
form_fields = ['option_1']
timeout_seconds = 10
class Eye2(Page):
form_model = models.Player
form_fields = ['option_2']
timeout_seconds = 10
class Eye3(Page):
form_model = models.Player
form_fields = ['option_3']
timeout_seconds = 10
class Eye4(Page):
form_model = models.Player
form_fields = ['option_4']
timeout_seconds = 10
class Eye5(Page):
form_model = models.Player
form_fields = ['option_5']
timeout_seconds = 10
class Eye6(Page):
form_model = models.Player
form_fields = ['option_6']
timeout_seconds = 10
class Eye7(Page):
form_model = models.Player
form_fields = ['option_7']
timeout_seconds = 10
class Eye8(Page):
form_model = models.Player
form_fields = ['option_8']
timeout_seconds = 10
class Eye9(Page):
form_model = models.Player
form_fields = ['option_9']
timeout_seconds = 10
class Eye10(Page):
form_model = models.Player
form_fields = ['option_10']
timeout_seconds = 10
class Eye11(Page):
form_model = models.Player
form_fields = ['option_11']
timeout_seconds = 10
class Eye12(Page):
form_model = models.Player
form_fields = ['option_12']
timeout_seconds = 10
class Eye13(Page):
form_model = models.Player
form_fields = ['option_13']
timeout_seconds = 10
class Eye14(Page):
form_model = models.Player
form_fields = ['option_14']
timeout_seconds = 10
class Eye15(Page):
form_model = models.Player
form_fields = ['option_15']
timeout_seconds = 10
class Eye16(Page):
form_model = models.Player
form_fields = ['option_16']
timeout_seconds = 10
class Eye17(Page):
form_model = models.Player
form_fields = ['option_17']
timeout_seconds = 10
class Eye18(Page):
form_model = models.Player
form_fields = ['option_18']
timeout_seconds = 10
class Eye19(Page):
form_model = models.Player
form_fields = ['option_19']
timeout_seconds = 10
class Eye20(Page):
form_model = models.Player
form_fields = ['option_20']
timeout_seconds = 10
class Eye21(Page):
form_model = models.Player
form_fields = ['option_21']
timeout_seconds = 10
class Eye22(Page):
form_model = models.Player
form_fields = ['option_22']
timeout_seconds = 10
class Eye23(Page):
form_model = models.Player
form_fields = ['option_23']
timeout_seconds = 10
class Eye24(Page):
form_model = models.Player
form_fields = ['option_24']
timeout_seconds = 10
class Eye25(Page):
form_model = models.Player
form_fields = ['option_25']
timeout_seconds = 10
class Eye26(Page):
form_model = models.Player
form_fields = ['option_26']
timeout_seconds = 10
class Eye27(Page):
form_model = models.Player
form_fields = ['option_27']
timeout_seconds = 10
class Eye28(Page):
form_model = models.Player
form_fields = ['option_28']
timeout_seconds = 10
class Eye29(Page):
form_model = models.Player
form_fields = ['option_29']
timeout_seconds = 10
class Eye30(Page):
form_model = models.Player
form_fields = ['option_30']
timeout_seconds = 10
class Eye31(Page):
form_model = models.Player
form_fields = ['option_31']
timeout_seconds = 10
class Eye32(Page):
form_model = models.Player
form_fields = ['option_32']
timeout_seconds = 10
class Eye33(Page):
form_model = models.Player
form_fields = ['option_33']
timeout_seconds = 10
class Eye34(Page):
form_model = models.Player
form_fields = ['option_34']
timeout_seconds = 10
class Eye35(Page):
form_model = models.Player
form_fields = ['option_35']
timeout_seconds = 10
class Eye36(Page):
form_model = models.Player
form_fields = ['option_36']
timeout_seconds = 10
class ResultsWaitPage(WaitPage):
def after_all_players_arrive(self):
self.group.set_payoffs()
def is_displayed(self):
return self.player.treatment != 4
class MyWaitPage(WaitPage):
group_by_arrival_time = True
players_per_group = 2
def after_all_players_arrive(self):
self.group.get_treatment()
class Player1(Page):
form_model = models.Player
form_fields = ['Message_12']
def is_displayed(self):
return self.player.id_in_group == 1 and self.player.treatment != 4
timeout_seconds = 120
timeout_submission = {'Message_12': 'Message 1'}
class Player2(Page):
form_model = models.Player
form_fields = ['option_AB']
def is_displayed(self):
return self.player.id_in_group == 2 and self.player.treatment != 4
timeout_seconds = 120
timeout_submission = {'option_AB': 'Option A'}
class treatment_4(Page):
form_model = models.Player
form_fields = ['option4_1', 'option4_2']
def before_next_page(self):
self.player.payoff = 0.10
self.player.total = 0.30
def is_displayed(self):
return self.player.treatment == 4
class Result_123(Page):
def vars_for_template(self):
return {'task2': self.player.payoff - 0.20}
class Demographic(Page):
form_model = models.Player
form_fields = ['gender', 'age', 'religion', 'service'] #'getcode_1', 'getcode_2']
class WaitforP1(WaitPage):
def is_displayed(self):
return self.player.treatment != 4
class Task3(Page):
def is_displayed(self):
return self.player.id_in_group == 2 and self.player.treatment != 4
page_sequence = [
MyWaitPage,
Welcome,
Priming,
Introduction,
Eye1,
Eye2,
Eye3,
Eye4,
Eye5,
Eye6,
Eye7,
Eye8,
Eye9,
Eye10,
Eye11,
Eye12,
Eye13,
Eye14,
Eye15,
Eye16,
Eye17,
Eye18,
Eye19,
Eye20,
Eye21,
Eye22,
Eye23,
Eye24,
Eye25,
Eye26,
Eye27,
Eye28,
Eye29,
Eye30,
Eye31,
Eye32,
Eye33,
Eye34,
Eye35,
Eye36,
Player1,
Task3,
WaitforP1,
Player2,
treatment_4,
Demographic,
ResultsWaitPage,
Result_123
]
|
5,792 | 59047a113d76c64be48858258441fae5da505790 | from tkinter import ttk
from chapter04a.validated_mixin import ValidatedMixin
class RequiredEntry(ValidatedMixin, ttk.Entry):
def _focusout_validate(self, event):
valid = True
if not self.get():
valid = False
self.error.set('A value is required')
return valid
|
5,793 | 2e60781da004fb86d3a33deae970c1faf2a5037d | class Rectangulo():
def __init__(self, base, altura):
self.base = base
self.altura = altura
def calcular_area(self):
return self.base * self.altura
base = float(input("Ingrese la base del rectangulo: \n"))
altura = float(input("Ingrese la altura del rectangulo: \n"))
#Primera instancia de rectangulo
rectangulo_1 = Rectangulo(base, altura)
area_rectangulo = rectangulo_1.calcular_area()
print(f"El area del rectangulo de {base} * {altura} = {area_rectangulo}")
|
5,794 | f0deb8ccaf50ea0abb9e1632eaa4354a4f21dece | # uploadops.py
# CS304-Final Project
# Created by: Megan Shum, Maxine Hood, Mina Hattori
#!/usr/local/bin/python2.7
# This file handles all the SQL calls for the upload page.
import sys
import MySQLdb
import dbconn2
def uploadPost(conn, username, description, location, time_stamp, pathname):
'''Inserts post in Posts table'''
curs = conn.cursor(MySQLdb.cursors.DictCursor) # results as Dictionaries
curs.execute('insert into posts(username, description, location, time_stamp, pic) values(%s, %s, %s, %s, %s)', [username, description, location, time_stamp, pathname])
# ================================================================
# This starts the ball rolling, *if* the script is run as a script,
# rather than just being imported.
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: {name} nm".format(name=sys.argv[0])
else:
DSN = dbconn2.read_cnf()
DSN['db'] = 'mmm_db' # the database we want to connect to
dbconn2.connect(DSN)
print lookupByNM(sys.argv[1])
|
5,795 | 6f331eedcdaceaded142c3ffe9400aaa817613c1 | # coding=utf-8
from lxml import etree
import frontik.handler
class Page(frontik.handler.PageHandler):
def get_page(self):
self.set_xsl(self.get_argument('template', 'simple.xsl'))
self.doc.put(etree.Element('ok'))
if self.get_argument('raise', 'false') == 'true':
raise frontik.handler.HTTPError(400, xml=etree.Element('not-ok'))
|
5,796 | 7dd5ac1110f38c40f2fddf9d7175a5ac40303d73 | # Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def insert(self, intervals, newInterval):
"""
:type intervals: List[Interval]
:type newInterval: Interval
:rtype: List[Interval]
"""
if not intervals:
return [newInterval]
starts, ends = [], []
for intv in intervals:
starts.append(intv.start)
ends.append(intv.end)
left = self.search1(ends, newInterval.start)
right = self.search2(starts, newInterval.end)
print left, right
if left > len(intervals) - 1:
intervals.append(newInterval)
elif right < 0:
intervals.insert(0, newInterval)
else:
newInterval.start = min(newInterval.start, intervals[left].start)
newInterval.end = max(newInterval.end, intervals[right].end)
intervals = intervals[:left] + [newInterval] + intervals[right + 1:]
return intervals
def search1(self, nums, target):
left, right = 0, len(nums) - 1
while left + 1 < right:
mid = (left + right) / 2
if nums[mid] == target:
return mid
elif nums[mid] > target:
right = mid
else:
left = mid
if nums[right] < target:
return right + 1
elif nums[left] < target:
return right
else:
return left
def search2(self, nums, target):
left, right = 0, len(nums) - 1
while left + 1 < right:
mid = (left + right) / 2
if nums[mid] == target:
return mid
elif nums[mid] > target:
right = mid
else:
left = mid
if nums[left] > target:
return left - 1
elif nums[right] > target:
return left
else:
return right
|
5,797 | e51ca78ca6751f8238a39d3eae55d6cc6ab65128 | # -*- coding: utf-8 -*-
'''
:Title
Insert Date
:Planguage
Python
:Requires
VoodooPad 3.5+
:Description
Inserts Date
EOD
'''
VPScriptSuperMenuTitle = "GTD"
VPScriptMenuTitle = "Insert Date"
VPShortcutMask = "control"
VPShortcutKey = "J"
import AppKit
import time
def main(windowController, *args, **kwargs):
textView = windowController.textView()
document = windowController.document()
if textView != None:
dateFormat = time.strftime("%Y.%m.%d")
textView.insertText_(dateFormat)
|
5,798 | 96d7963faf720a3dc0d96b55ad65ee7ac83c1818 | # testa se uma aplicacao em modo de teste esta sendo construida
def test_config(app):
assert app.testing
|
5,799 | 1aacd04234d60e495888fc44abe3fbacf404e0ce | mapName =input('\nEnter map name(s) (omitting the mp_ prefix)\nSeparate map names with comma\n:').lower()
mapNameList =mapName.split(',')
def convertWPFile(mapName):
#Converts mapname_waypoints.gsc file (old style PEzBot format) to newer mapname.gsc file (new style Bot Warfare format)
fullMapName ='mp_'+mapName+'_waypoints.gsc'
waypoints = open(fullMapName,'r')
wpLines =waypoints.readlines()
waypoints.close()
wpLinesNew =[]
temp =0
for i,j in enumerate(wpLines):
if i >31:
if 'level.' in j:
#if ('waypoints['+str(temp+1)+']') in j and 'size' not in j:
#wpLinesNew.append('waypoints['+str(temp)+'].use = true;')
#temp +=1
wpLinesNew.append(' '+j[10:])
wpLinesNew.append('return waypoints;\n}')
newMapName =mapName.capitalize()+'.gsc'
newWPFile =open(newMapName,'w')
newWPFile.write(mapName.capitalize()+'()\n{\n waypoints = [];\n')
for i in wpLinesNew:
if 'waypointCount' not in i:
newWPFile.write(' '+i.strip()+'\n')
print('\n%s.gsc successfully converted' %mapName)
newWPFile.close()
for name in mapNameList:
convertWPFile(name.strip())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.