index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
992,700 | 128cb740e0b4da1146c321ee8319f16b7047c2a7 | #!/usr/bin/env python3
# -----------------------------------------------------------------------
# amber_test_driver.py
# Authors: Hari Raval and Tyler Sorensen
# -----------------------------------------------------------------------
import argparse
import os
import sys
sys.path.insert(0,"../src/")
import re
import csv
import socket # to get hostname
import subprocess
from configuration import Configuration
import amber_test_generation
from tabulate import tabulate
from datetime import date
from datetime import datetime
import time
AMBER_PATHS = ["amber",
"/data/amber/amber/out/Debug/amber",
"/localdisk/tsoren99/amber/amber/out/Debug/amber",
"/home/tyler/Documents/amber/amber/out/Debug/amber"]
LOG_FILE = None
# define a log print function
def log_print(s):
global LOG_FILE
LOG_FILE.write(s + "\n")
print(s)
# create amber tests with provided input directory and specified configuration object and path/build details
def run_amber_test(input_dir, output_dir, each_cfg_option, amber_build_path, amber_build_flags, num_iter, android):
simple_test_results = []
verbose_test_results = []
all_test_results = []
# prepare to name each file according to the level of saturation being done and subgroup status
saturation_level = each_cfg_option.get_saturation_level()
subgroup_setting = each_cfg_option.get_subgroup_setting()
threads_per_wg = each_cfg_option.get_threads_per_workgroup()
if saturation_level == 0:
if subgroup_setting == 0:
output_file_name_extension = "no_saturation_same_subgroup_" + str(threads_per_wg) + "_threads_per_wg"
elif subgroup_setting == 1:
output_file_name_extension = "no_saturation_diff_subgroup_" + str(threads_per_wg) + "_threads_per_wg"
elif saturation_level == 1:
output_file_name_extension = "round_robin"
elif saturation_level == 2:
output_file_name_extension = "chunking"
else:
output_file_name_extension = "_" + str(saturation_level) + "level_saturation"
# iterate over all files in the input directory, create a .amber file for each .txt file, and run the amber file
for file_name in os.listdir(input_dir):
if file_name.endswith('.txt'):
# input file name shouldn't end with .amber as the amber_test_generation.py script will add the extension
output_file_name = file_name[:-4] + "_txt_" + output_file_name_extension
input_file_name = os.path.join(input_dir, file_name)
log_print("generating amber test for: " + file_name + " in " + output_dir)
# create the amber file associated with input_file_name
amber_test_generation.generate_amber_test(input_file_name, output_file_name, each_cfg_option)
output_file_name = output_file_name + ".amber"
# generate command to run the amber test for a specified iteration count and append results to a temp file
run__test = "timeout -k 1 5 " + amber_build_path + output_file_name + amber_build_flags + ">> temp_results.txt"
if android:
# push test file on the device
os.system("adb push " + output_file_name + " /data/local/tmp/")
# prepare the specific run command to run amber on-device
run__test = "timeout -k 1 5 adb shell 'cd /data/local/tmp ; ./amber_ndk " + amber_build_flags + " " + os.path.basename(
output_file_name) + "' >> temp_results.txt"
for i in range(int(num_iter)):
log_print("running test: " + output_file_name)
log_print(run__test)
os.system(run__test)
# analyze the results of the temporary file to determine whether the test passed (P) or failed (F)
with open('temp_results.txt', 'r') as file:
results = file.read().split("\n")
test_iteration = file_name[:-4]
failure_count = 0
pass_count = 0
# count the number of failures, if any, and update both simple and verbose results accordingly
for current_line in results:
if "1 fail" in current_line:
failure_count = failure_count + 1
elif "1 pass" in current_line:
pass_count = pass_count + 1
# if there were no failures, indicate a "P" in both sets of tables
if failure_count == 0:
log_print("P")
temp_item = [test_iteration, "P"]
simple_test_results.append(temp_item)
verbose_test_results.append(temp_item)
# if there is at least one failure, update simple table with "F" and verbose table with fraction of "F"
else:
log_print("F")
fract = "F (" + str(failure_count) + "/" + str(num_iter) + ")"
temp_item_verbose = [test_iteration, fract]
temp_item_simple = [test_iteration, "F"]
simple_test_results.append(temp_item_simple)
verbose_test_results.append(temp_item_verbose)
os.system("rm -f temp_results.txt")
# create a directory of the amber test scripts generated at the specified output directory
log_print("")
move_amber_test_file = "mv " + output_file_name + " " + output_dir
os.system(move_amber_test_file)
os.system("rm -f temp_results.txt")
all_test_results.append(simple_test_results)
all_test_results.append(verbose_test_results)
return all_test_results
# main driver function to create amber files with the specified list of configuration objects, provided
# input directory, and details of the build path/type
def amber_driver(all_config_variants, input_dir, output_dir, amber_build_path, amber_build_flags, num_iter, android):
simple_results = []
verbose_results = []
# iterate over each configuration type and run directory of .txt files on each configuration using run_amber_test()
for each_cfg_opt in all_config_variants:
temp_results = run_amber_test(input_dir, output_dir, each_cfg_opt, amber_build_path, amber_build_flags,
num_iter, android)
if len(temp_results) != 2:
print("An error occured during the generation of the amber tests in run_amber_test()", file=sys.stderr)
exit(1)
simple_results.append(temp_results[0])
verbose_results.append(temp_results[1])
# verify that the results based on each configuration are the same size to ensure all tests ran
default_config_length = len(simple_results[0])
for (result1, result2) in zip(simple_results, verbose_results):
cur_simple_length = len(result1)
cur_verbose_length = len(result2)
if cur_simple_length != default_config_length or cur_verbose_length != default_config_length:
print("The number of results from each of the configuration settings must be the same", file=sys.stderr)
exit(1)
final_simple_results = []
final_verbose_results = []
# create a row to contain the running sum of all of the columns for both types of output tables
running_failure_sum_simple = [0] * (len(all_config_variants) + 1)
running_failure_sum_simple[0] = "Total failures:"
running_failure_sum_verbose = [0] * (len(all_config_variants) + 1)
running_failure_sum_verbose[0] = "Total failures:"
# iterate over both the simple results and the verbose results to process the number of failures per column
for index, (simple_list, verbose_list) in enumerate(zip(simple_results, verbose_results)):
counter_simple = 0
counter_verbose = 0
for simple_result in simple_list:
current_status = simple_result[1]
if current_status == "F":
counter_simple = counter_simple + 1
for verbose_result in verbose_list:
current_status = verbose_result[1]
if "F" in current_status:
counter_verbose = counter_verbose + 1
running_failure_sum_simple[index + 1] = counter_simple
running_failure_sum_verbose[index + 1] = counter_verbose
# counter variable to hold the number of failures in the last column ("any passed" column)
any_passed_sum_simple = 0
any_passed_sum_verbose = 0
# group the test results for each input file together based off of the test numbers
for i in range(default_config_length):
current_simple_list = [i]
current_verbose_list = [i]
for (each_simple_config_result, each_verbose_config_result) in zip(simple_results, verbose_results):
for (cur_s_test, cur_v_test) in zip(each_simple_config_result, each_verbose_config_result):
if int(cur_s_test[0]) == i:
current_simple_list.append(cur_s_test[1])
if int(cur_v_test[0]) == i:
current_verbose_list.append(cur_v_test[1])
# if any of the test results fail, then indicate a "F" for the final column, otherwise if all pass, indicate "P"
if "F" in current_simple_list:
current_simple_list.append("F")
else:
current_simple_list.append("P")
for index, result in enumerate(current_verbose_list[1:]):
if "F" in result:
current_verbose_list.append("F")
break
if index == len(current_verbose_list[1:]) - 1:
current_verbose_list.append("P")
final_simple_results.append(current_simple_list)
final_verbose_results.append(current_verbose_list)
# if the last item contains an F, increment the counter for the number of failures in the final column
if current_simple_list[-1] == "F":
any_passed_sum_simple = any_passed_sum_simple + 1
if "F" in str(current_verbose_list[-1]):
any_passed_sum_verbose = any_passed_sum_verbose + 1
# for both tables, append the total for the final column to the final row
running_failure_sum_simple.append(any_passed_sum_simple)
running_failure_sum_verbose.append(any_passed_sum_verbose)
# for both tables, append the failure sum statistics row to the final results
final_simple_results.append(running_failure_sum_simple)
final_verbose_results.append(running_failure_sum_verbose)
log_print("")
log_print("Finished running tests!")
log_print("")
# call the formatter function to generate tables of data
format_output_results(final_simple_results, final_verbose_results, all_config_variants, output_dir)
# prepare the headers and file name information for each type of file outputted
def format_output_results(final_simple_results, final_verbose_results, all_config_variants, output_dir):
today = date.today()
td = today.strftime("%Y-%m-%d")
# create a list of headers for the output files based off of the configuration types used
headers = ["Test File Name"]
for each_config in all_config_variants:
current_saturation = each_config.get_saturation_level()
subgroup_setting = each_config.get_subgroup_setting()
threads_per_wg = each_config.get_threads_per_workgroup()
if current_saturation == 0:
if subgroup_setting == 0:
if threads_per_wg == 1:
headers.append("Plain")
else:
headers.append("No saturation (same subgroup, " + str(threads_per_wg) + " threads per workgroup)")
elif subgroup_setting == 1:
headers.append("No saturation (diff. subgroup, " + str(threads_per_wg) + " threads per workgroup)")
elif current_saturation == 1:
headers.append("Round Robin")
elif current_saturation == 2:
headers.append("Chunked")
else:
headers.append(str(current_saturation))
headers.append("All Passed")
format_ascii_table_output(final_simple_results, final_verbose_results, output_dir, headers, td)
format_csv_table_output(final_simple_results, final_verbose_results, output_dir, headers, td)
format_html_table_output(final_simple_results, output_dir, headers, td)
# output both the iterative and non-iterative results into ascii tables
def format_ascii_table_output(final_simple_results, final_verbose_results, output_dir, headers, td):
output_name_simple_txt = output_dir + "/" + "simple_final_results-" + td + ".txt"
output_name_verbose_txt = output_dir + "/" + "iteration_based_final_results-" + td + ".txt"
# open and write results to the simple .txt file
output_file_txt = open(output_name_simple_txt, "w+")
log_print("writing simple ascii table to:")
log_print(output_name_simple_txt)
log_print("")
output_file_txt.write(tabulate(final_simple_results, headers=headers, tablefmt="fancy_grid"))
output_file_txt.write("\n")
output_file_txt.close()
# open and write results to the verbose .txt file
output_file_txt = open(output_name_verbose_txt, "w+")
log_print("writing iteration-based ascii table to:")
log_print(output_name_verbose_txt)
log_print("")
output_file_txt.write(tabulate(final_verbose_results, headers=headers, tablefmt="fancy_grid"))
output_file_txt.write("\n")
output_file_txt.close()
# output both the iterative and non-iterative results into csv tables
def format_csv_table_output(final_simple_results, final_verbose_results, output_dir, headers, td):
output_name_simple_csv = output_dir + "/" + "simple_final_results-" + td + ".csv"
output_name_verbose_csv = output_dir + "/" + "iteration_based_final_results-" + td + ".csv"
log_print("writing simple csv table to:")
log_print(output_name_simple_csv)
log_print("")
# open and write simple results to a .csv file
with open(output_name_simple_csv, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(headers)
for line in final_simple_results:
writer.writerow(line)
csv_file.close()
log_print("writing simple csv table to:")
log_print(output_name_simple_csv)
log_print("")
# open and write verbose results to a .csv file
with open(output_name_verbose_csv, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(headers)
for line in final_verbose_results:
writer.writerow(line)
csv_file.close()
# output the non-iterative results into a formatted html table
def format_html_table_output(final_simple_results, output_dir, headers, td):
output_name_html = output_dir + "/" + "html-colored-table" + td + ".html"
# open and write results to the html .txt file
output_file_txt = open(output_name_html, "w+")
log_print("writing colored html table to:")
log_print(output_name_html)
log_print("")
html = tabulate(final_simple_results, headers, tablefmt="html").split("\n")
# extract the rows that contain pass/fail data
data_rows = []
for each_line in html:
if "<tr><td>" in each_line:
data_rows.append(each_line)
colored_data_rows = []
# color each cell of data according to the result status (pass = green, fail = red)
for row, curr_line in enumerate(data_rows):
curr_line_list = curr_line.split(" ")
while "" in curr_line_list:
curr_line_list.remove("")
if row < (len(data_rows) - 1):
href = "https://www.cs.princeton.edu/~ls24/testExplorer.html?threads=" + input_test_type[0] + "&instructions=" + input_test_type[1] + "&test=" + str(row)
curr_line_list[0] = '<tr><td><a href="' + href + '">' + str(row) + '</a>'
for index, item in enumerate(curr_line_list):
if "P" in item:
curr_line_list[index] = '<td bgcolor="#009900">P'
if "F" in item:
curr_line_list[index] = '<td bgcolor="#CC0000">F'
colored_data_rows.append(curr_line_list)
# merge the data back into proper html string format
merged_data = [' '.join(row) for row in colored_data_rows]
updated_data_as_str = "\n".join(merged_data)
# create the string of html code to be written to the output file
begin_html = html[:5]
beg_str = "\n".join(begin_html)
end_html = html[-2:]
end_str = "\n".join(end_html)
html_table = beg_str + updated_data_as_str + end_str
# boiler plate code for html_table (Cite: https://www.w3schools.com/tags/tryit.asp?filename=tryhtml_td_bgcolor)
start_boiler_plate = "<!DOCTYPE html> \n<html> \n<head> \n<style> \ntable, th, td {" \
"\n border: 1px solid black; \n} \n</style> \n</head> \n<body> \n" \
"<h1>Colored Amber Test Results</h1>\n"
end_boiler_plate = "\n</body> \n</html> \n"
output_file_txt.write(start_boiler_plate + html_table + end_boiler_plate)
output_file_txt.write("\n")
output_file_txt.close()
# automatically find the amber build path
def find_amber():
for a in AMBER_PATHS:
cmd = "which " + a + " > /dev/null"
ret = os.system(cmd)
if ret == 0:
log_print("found amber executable: ")
log_print(a)
log_print("")
return a
log_print("unable to find an amber executable")
assert (0)
# create a directory to hold the output
def get_new_dir_name():
base_name = "results/output"
label = 0
while 1:
check_name = base_name + str(label)
if not os.path.exists(check_name):
print("writing results to:")
print(check_name)
print("")
return check_name
label += 1
def android_sanity_check():
"""Check that Android device is accessible, and amber is installed as /data/local/tmp/amber_ndk"""
try:
subprocess.run(["adb", "shell", "true"], timeout=5, check=True)
except:
print("Error: no Android device connected?")
exit(1)
try:
subprocess.run(["adb", "shell", "test -f /data/local/tmp/amber_ndk"], timeout=5, check=True)
except:
print(
"Error: on Android device, /data/local/tmp/amber_ndk was not found. Please install Amber at this precise location.")
exit(1)
def main():
global LOG_FILE
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='Path to input directory containing test in text format')
parser.add_argument('num_iterations', type=int, help='Number of iteration to run each test')
parser.add_argument('--android', action='store_true',
help='Run on Android device. Assumes a single Android device is connected, accessible with adb, and with amber already installed as /data/local/tmp/amber_ndk')
args = parser.parse_args()
if args.android:
android_sanity_check()
start = time.time()
input_dir = args.input_dir
global input_test_type
input_test_type = re.findall(r'\d+', str(input_dir))
if len(input_test_type) != 2:
input_test_type = ['X', 'X']
num_iterations = args.num_iterations
# the user must input the location of the directory where the .amber files will reside
output_dir_path = get_new_dir_name()
# the user may change the flags used to build the amber tests with (include spaces before and after the flag(s))
amber_build_flags = " -d -t spv1.3 "
os.system("mkdir " + output_dir_path)
log_file_name = output_dir_path + "/output_log.txt"
LOG_FILE = open(log_file_name, 'w')
log_print("Date and Time:")
now = datetime.now()
nowpp = now.strftime("%d/%m/%Y %H:%M:%S")
log_print(nowpp)
log_print("Computer:")
log_print(socket.gethostname())
log_print("")
# Store Vulkan info
vulkan_info = output_dir_path + "/vulkaninfo.txt"
if args.android:
log_print("No vulkaninfo on Android")
else:
log_print("storing vulkaninfo to: " + vulkan_info)
log_print("")
os.system("vulkaninfo > " + vulkan_info)
if args.android:
amber_build_path = "" # ignored anyway
else:
amber_build_path = find_amber() + " "
# the user must provide all the possible configuration objects they want to test with, placing them in the
# all_config_variants list below
default_cfg = Configuration(timeout=2000, workgroups=65532, threads_per_workgroup=1, saturation_level=0, subgroup=0)
round_r_cfg = Configuration(timeout=2000, workgroups=65532, threads_per_workgroup=1, saturation_level=1, subgroup=0)
chunk_cfg = Configuration(timeout=2000, workgroups=65532, threads_per_workgroup=1, saturation_level=2, subgroup=0)
#diff_subgroup_cfg = Configuration(timeout=2000, workgroups=65532, threads_per_workgroup=256, saturation_level=0,
# subgroup=1)
#diff_workgroup_cfg = Configuration(timeout=2000, workgroups=65532, threads_per_workgroup=4, saturation_level=0,
# subgroup=0)
all_config_variants = [default_cfg, round_r_cfg, chunk_cfg]
# call the main driver function
amber_driver(all_config_variants, input_dir, output_dir_path, amber_build_path, amber_build_flags, num_iterations,
args.android)
end = time.time()
log_print("")
log_print("Execution time (s):")
log_print(str(end - start))
LOG_FILE.close()
if __name__ == "__main__":
main()
|
992,701 | 35017dafeb4835f3cad42c57568cf968a64969f3 | # #coding:utf-8
# #Author:Mr Zhi
# #4. 将列表['alex', 'eric', 'rain'] 中的每一个元素使用 "_" 连接为一个字符串
# l = ['alex', 'eric', 'rain']
# print('_'.join(l))
# """
#
# 20:46:06
# 17-吴 2017/6/13 20:46:06
# 考核形式是怎样的哦。。
# 20:47:06
# 14组导师-刘又源 2017/6/13 20:47:06
# 问答and写点代码
# 14组导师-刘又源 2017/6/13 20:47:12
# 所以你们准备好麦克
# 17-吴 2017/6/13 20:47:16
# 有点慌
# 17-吴 2017/6/13 20:47:19
# 嗯
# 14组导师-刘又源 2017/6/13 20:47:28
# 不用慌,很轻松
# 17-吴 2017/6/13 20:48:09
# 然后还要远程桌面吗
# 20:48:42
# 14组导师-刘又源 2017/6/13 20:48:42
# 不用
# 17-吴 2017/6/13 20:50:18
# 这样子
# 14组导师-刘又源 2017/6/13 20:52:14
# 都记得有这个考核吧
# 20:52:24
# 14组导师-刘又源 2017/6/13 20:52:24
# 一会儿九点正式开始
# 17-吴 2017/6/13 20:52:35
# 我就是看快到9点了一点动静都没。。。
# 14组导师-刘又源 2017/6/13 20:53:48
# 我已经习惯大家忘记了,所以每次都提前提醒大家别忘记。然并卵
# 8组-李明昌-limich 2017/6/13 20:54:07
#
# 20:54:31
# 8组-李明昌-limich 2017/6/13 20:54:31
# 新版的第三周作业 40分钟搞定
# 8组-李明昌-limich 2017/6/13 20:55:11
# 之前那个作业需求都有点读不懂。。。。我们组老师让我等这周再做,先报名考核。
# 21:01:20
# 14组导师-刘又源 2017/6/13 21:01:20
# 大家都在吧,在的扣个1
# 8组-李明昌-limich 2017/6/13 21:01:26
# 1
# 17-吴 2017/6/13 21:01:28
# 1
# 18组-李英-liyinglc 2017/6/13 21:01:32
# 1
#
# 14组导师-刘又源邀请你参与QQ电话。
#
# 漂 2017/6/13 21:01:47
# 1
# 18组-李英-liyinglc 2017/6/13 21:01:54
# 可以
# 21:03:40
# 14组导师-刘又源 2017/6/13 21:03:40
# 1. Python脚本的头部 #!/usr/bin/env python 作用?
#
# 漂 2017/6/13 21:04:09
# 1
# 14组导师-刘又源 2017/6/13 21:04:42
# 2. 简述ascii、unicode、utf-8、gbk的关系?
# 21:07:55
# 14组导师-刘又源 2017/6/13 21:07:55
# 3. 变量名命名规范
# 14组导师-刘又源 2017/6/13 21:09:24
# 4. 将列表['alex', 'eric', 'rain'] 中的每一个元素使用 "_" 连接为一个字符串
#
# 漂 2017/6/13 21:09:43
# 1
# 17-吴 2017/6/13 21:09:42
# 我还没开。。
# 18组-李英-liyinglc 2017/6/13 21:09:43
# 1
# 17-吴 2017/6/13 21:09:45
# 1
#
# 8组-李明昌-limich 2017/6/13 21:09:54
# 1
# 21:11:46
# 14组导师-刘又源 2017/6/13 21:11:46
# 5. 有如下变量,请实现要求的功能
# tu = ("alex", [11, 22, {"k1": 'v1', "k2": ["age", "name"], "k3": (11,22,33)}, 44])
# a. 讲述元祖的特性
# 元组也叫只读列表,跟列表差不多,它只有两个方法,一个是count,一个是index
# b. 请问tu变量中的第一个元素 “alex” 是否可被修改?
# c. 请问tu变量中的"k2"对应的值是什么类型?是否可以被修改?如果可以,请在其中添加一个元素 “Seven”
#
# d. 请问tu变量中的"k3"对应的值是什么类型?是否可以被修改?如果可以,请在其中添加一个元素 “Seven”
#
# """
# # 6. 字典key都可以是什么类型?cd
# # A.列表 B.字典 C.字符串 D.数字
# s = "你是风儿%%我是沙%s"
# n = s %('僧')
# print(n)
# def func(arg):
# arg.append(55)
# li = [11, 22, 33, 44]
# li = func(li)
# print(li)
#9. 使用while循环实现输出 1-100 内的所有奇数
i = 0
while (i < 100):
i +=1
if i % 2 !=0:
print(i)
#10. 使用while循环实现输出2 - 3 + 4 - 5 + 6 ... + 100 的和
i =1
sum =0
while (i<100):
i +=1
if i % 2==0:
sum+=i
else:
sum-=i
print(sum)
# 4 写代码实现99乘法表(格式化输出)
#
# 5 写代码计算1*2+3*4+5*6+7*8...+99*100
|
992,702 | f1f99ee15e05947f114a6ea561dc53991fe5558d | # -*- coding: utf-8 -*-
import mock
from mock import call
import pytest
from h.streamer import nsq
from h.streamer import streamer
from h.streamer import websocket
def test_process_work_queue_sends_nsq_messages_to_nsq_handle_message(session):
message = nsq.Message(topic='foo', payload='bar')
queue = [message]
streamer.process_work_queue({}, queue, session_factory=lambda: session)
nsq.handle_message.assert_called_once_with(message,
topic_handlers=mock.ANY)
def test_process_work_queue_uses_appropriate_topic_handlers_for_nsq_messages(session):
message = nsq.Message(topic='foo', payload='bar')
queue = [message]
streamer.process_work_queue({'nsq.namespace': 'wibble'},
queue,
session_factory=lambda: session)
topic_handlers = {
'wibble-annotations': nsq.handle_annotation_event,
'wibble-user': nsq.handle_user_event,
}
nsq.handle_message.assert_called_once_with(mock.ANY,
topic_handlers=topic_handlers)
def test_process_work_queue_sends_websocket_messages_to_websocket_handle_message(session):
message = websocket.Message(socket=mock.sentinel.SOCKET, payload='bar')
queue = [message]
streamer.process_work_queue({}, queue, session_factory=lambda: session)
websocket.handle_message.assert_called_once_with(message)
def test_process_work_queue_commits_after_each_message(session):
message1 = websocket.Message(socket=mock.sentinel.SOCKET, payload='bar')
message2 = nsq.Message(topic='foo', payload='bar')
queue = [message1, message2]
streamer.process_work_queue({}, queue, session_factory=lambda: session)
assert session.commit.call_count == 2
def test_process_work_queue_rolls_back_on_handler_exception(session):
message = nsq.Message(topic='foo', payload='bar')
queue = [message]
nsq.handle_message.side_effect = RuntimeError('explosion')
streamer.process_work_queue({}, queue, session_factory=lambda: session)
session.commit.assert_not_called()
session.rollback.assert_called_once_with()
def test_process_work_queue_rolls_back_on_unknown_message_type(session):
message = 'something that is not a message'
queue = [message]
streamer.process_work_queue({}, queue, session_factory=lambda: session)
session.commit.assert_not_called()
session.rollback.assert_called_once_with()
def test_process_work_queue_calls_close_after_commit(session):
message = nsq.Message(topic='foo', payload='bar')
queue = [message]
streamer.process_work_queue({}, queue, session_factory=lambda: session)
assert session.method_calls[-2:] == [
call.commit(),
call.close()
]
def test_process_work_queue_calls_close_after_rollback(session):
message = nsq.Message(topic='foo', payload='bar')
queue = [message]
nsq.handle_message.side_effect = RuntimeError('explosion')
streamer.process_work_queue({}, queue, session_factory=lambda: session)
assert session.method_calls[-2:] == [
call.rollback(),
call.close()
]
@pytest.fixture
def session():
return mock.Mock(spec_set=['close', 'commit', 'execute', 'rollback'])
@pytest.fixture(autouse=True)
def nsq_handle_message(request):
patcher = mock.patch('h.streamer.nsq.handle_message')
patcher.start()
request.addfinalizer(patcher.stop)
@pytest.fixture(autouse=True)
def websocket_handle_message(request):
patcher = mock.patch('h.streamer.websocket.handle_message')
patcher.start()
request.addfinalizer(patcher.stop)
|
992,703 | d51e931c5a937dc3ff0a29dedb2208d46519299a | import torch
import torch.functional as F
import torch.jit as jit
from torch import nn
from torch import Tensor
class LSTMCell(jit.ScriptModule):
def __init__(self, ni, nh):
super().__init__()
self.ni = ni
self.nh = nh
self.Wih = nn.Parameter(torch.randn(4 * nh, ni))
self.Whh = nn.Parameter(torch.randn(4 * nh, nh))
self.bias_ih = nn.Parameter(torch.randn(4 * nh))
self.bias_hh = nn.Parameter(torch.randn(4 * nh))
@jit.script_method
def forward(self, input:Tensor, state:Tuple[Tensor,Tensor])->Tuple[Tensor, Tuple[Tensor, Tensor]]:
h, c = state
gates = (input @ self.Wih + self.bias_ih + h @ self.Whh + self.bias_hh)
inGate, forgetGate, cellActivate, outGate = gates.chuck(4, 1)
inGate = torch.sigmoid(inGate)
forgetGate = torch.sigmoid(forgetGate)
cellActivate = torch.tanh(cellActivate)
outGate = torch.sigmoid(outGate)
cout = (forgetGate * c) + (inGate * cellActivate)
hout = outGate * torch.tanh(cout)
return hout, (hout, cout)
class LSTMLayer(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super().__init__()
self.cell = cell(*cell_args)
@jit.script_method
def forward(self, input:Tensor, state:Tuple[Tensor, Tensor])->Tuple[Tensor, Tuple[Tensor, Tensor]]:
inputs = input.unbind(1)
outputs = []
for i in range(len(inputs)):
out, state = self.cell(inputs[i], state)
outputs.append(out)
return torch.stack(outputs, dim=1), state
# We'll need all different kinds of dropouts.
# Dropout consists into replacing some coefficients by 0 with probability p.
# To ensure that the average of the weights remains constant,
# we apply a correction to the weights that aren't nullified of a factor 1/(1-p).
def dropoutMask(x, size, prob):
"prob -- replaced by 0 with this probability"
return x.new(*size).bernoulli_(1-prob).div_(1-prob)
# A tensor x will have three dimensions: bs, seq_len, vocab_size.
# We consistently apply the dropout mask across the seq_len dimension
class RNNDropout(nn.Module):
def __init__(self, prob=0.5):
super().__init__()
self.prob = prob
def forward(self, x):
if not self.training or self.prob == 0:
return x
mask = dropoutMask(x.data, (x.size(0), 1, x.size(2)), self.prob)
return x * mask
WEIGHT_HH = 'weight_hh_l0'
class WeightDropout(nn.Module):
"WeightDropout is dropout applied to the weights of the inner LSTM hidden to hidden matrix."
def __init__(self, module, weight_prob=[0.], layer_names=[WEIGHT_HH]):
super().__init__()
self.module = module
self.weight_prob = weight_prob
self.layer_names = layer_names
for layer in self.layer_names:
# Makes a copy of the weights of the selected layers.
w = getattr(self.module, layer)
self.register_parameter(f'{layer}_raw', nn.Parameter(w.data))
self.module._parameters[layer] = F.dropout(w, p=self.weight_prob, training=False)
def _setWeights(self):
"""if we want to preserve the CuDNN speed and not reimplement the cell from scratch.
We add a parameter that will contain the raw weights,
and we replace the weight matrix in the LSTM at the beginning of the forward pass."""
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
self.module._parameters[layer] = F.dropout(raw_w, p=self.weight_pro, training=self.training)
def forward(self, *args):
import warnings
self._setWeights()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return self.module.forward(*args)
class EmbeddingDropout(nn.Module):
"EmbeddingDropout applies dropout to full rows of the embedding matrix."
def __init__(self, embedding, embed_prob):
super().__init__()
self.embedding = embedding
self.embed_prob = embed_prob
self.pad_idx = self.embedding.padding_idx
if self.pad_idx is None:
self.pad_idx = -1
def forward(self, words, scale=None):
if self.training and self.embed_p != 0:
size = (self.embedding.weight.size(0), 1)
mask = dropoutMask(self.embedding.weight.data, size, self.embed_prob)
maskedEmbedding = self.embedding.weight * mask
else:
maskedEmbedding = self.embedding.weight
if scale:
maskedEmbedding.mul_(scale)
return F.embedding(words, maskedEmbedding, self.pad_idx,
self.embedding.max_norm, self.embedding.normtype,
self.embedding.scale_grad_by_freq, self.embedding.sparse)
def to_detach(h):
return h.detach() if type(h) == torch.Tensor else tuple(to_detach(v) for v in h)
class AWDLSTM(nn.Module):
initRange = 0.1
"AWD-LSTM: https://arxiv.org/abs/1708.02182."
def __init__(self, vocab_size, embed_size, n_hid, n_layers, pad_token,
hidden_p=0.2, input_p=0.6, embed_p=0.1, weight_p=0.5):
super().__init__()
self.batch_size = 1
self.embed_size = embed_size
self.n_hid = n_hid
self.n_layers = n_layers
self.embedding = nn.Embedding(vocab_size, embed_size, padding_idx=pad_token)
self.embedding_dropout = EmbeddingDropout(self.embedding, embed_p)
self.rnns = [nn.LSTM(embed_size if l==0 else n_hid,
(n_hid if l != n_layers - 1 else embed_size),
1, batch_first=True) for l in range(n_layers)]
self.rnns = nn.ModuleList([WeightDropout(rnn, weight_p) for rnn in self.rnns])
self.embedding.weight.data.uniform_(-self.initRange, self.initRange)
self.input_dropout = RNNDropout(input_p)
self.hidden_dropout = nn.ModuleList([RNNDropout(hidden_p) for l in range(n_layers)])
def _init_hidden(self, l):
"Return one hidden state."
nh = self.n_hid if l != self.n_layers - 1 else self.embed_size
return next(self.parameters()).new(1, self.batch_size, nh).zero_()
def reset(self):
"Reset the hidden states."
self.hidden = [(self._init_hidden(l), self._init_hidden(l)) for l in range(self.n_layers)]
def forward(self, input):
batch_size, seq_len = input.size()
if batch_size != self.batch_size:
self.batch_size = batch_size
self.reset()
raw_output = self.input_dropout(self.embedding_dropout(input))
new_hidden, raw_outputs, outputs = [], [], []
for l, (rnn, hidden_dropout) in enumerate(zip(self.rnns, self.hidden_dropout)):
raw_output, new_h = rnn(raw_output, self.hidden[l])
new_hidden.append(new_h)
raw_outputs.append(raw_output)
if l != self.n_layers - 1:
raw_output = hidden_dropout(raw_output)
outputs.append(raw_output)
self.hidden = to_detach(new_hidden)
return raw_outputs, outputs
|
992,704 | b6e7e19cd459ea0ebf5d4d7e4ce25a50538c5d66 | # python创建文件夹和TXT文件,删除TXT文件和删除文件夹
'''需求:
1、先在E盘创建一个TEST文件夹。
2、在TEST文件夹内创建一个TXT文本文件并写入内容“Hello world!”
3、删除TEST文件夹内的TXT文件
4、删除路径TEST文件目录'''
import os,stat,time
dirPath='E:\\study_dir\\'
filename='a.txt'
def createfile():
if (os.path.exists(dirPath)):
print('目录'+dirPath+'已经存在')
else:
os.mkdir(dirPath)
print('创建目录'+dirPath)
file_path = dirPath + filename
print("file_path: " + file_path)
with open(file_path,mode='w+',encoding='utf-8')as f:
f.write('hello world')
def deleteFile():
print('移除前' + dirPath + '目录下有文件:%s' % os.listdir(dirPath))
# 判断文件是否存在
if (os.path.exists(dirPath + filename)):
os.remove(dirPath + filename)
print('移除后' + dirPath + '目录下有文件:%s' % os.listdir(dirPath))
else:
print("要删除的文件不存在!")
def deletePath():
if (os.path.exists(dirPath)):
os.rmdir(dirPath) # 删除目录
print('移除' + dirPath + '目录')
else:
print("要删除的路径不存在!")
print("开始执行----------------------------------------------------------------------------------------")
createfile()
time.sleep(8)# 8 秒后删除TXT文件
deleteFile()
time.sleep(8)# 8 秒后删除路径TEST
deletePath()
print("结束执行----------------------------------------------------------------------------------------")
|
992,705 | 33de8edda7487e89b3133e3f9edbbbb4cc4a1bbf | from django.shortcuts import redirect
def login_required(view):
def _wrapped_view_func(request, *args, **kwargs):
if not request.session.get('current_teacher_id'):
return redirect('/login')
else:
return view(request, *args, **kwargs)
return _wrapped_view_func |
992,706 | 07d6ce4099e46dec70c6f747f3b5236975a79c8c | #!/usr/bin/python3
"""Unittest for review file: class and methods"""
import pep8
import unittest
from models import review
from models.review import Review
class TestBaseModelpep8(unittest.TestCase):
"""Validate pep8"""
def test_pep8(self):
"""test for base file and test_base file pep8"""
style = pep8.StyleGuide(quiet=True)
review_pep8 = "models/review.py"
test_review_pep8 = "tests/test_models/test_review.py"
result = style.check_files([review_pep8, test_review_pep8])
self.assertEqual(result.total_errors, 0)
class TestDocsBaseModel(unittest.TestCase):
"""test docstrings for base and test_base files"""
def test_module(self):
"""check module docstrings"""
self.assertTrue(len(review.__doc__) > 0)
def test_class(self):
"""check class docstrings"""
self.assertTrue(len(Review.__doc__) > 0)
def test_method(self):
"""check method docstrings"""
for func in dir(Review):
self.assertTrue(len(func.__doc__) > 0)
if __name__ == "__main__":
unittest.main()
|
992,707 | 9a56d31b469813d6718f85e1abeabeaccca74f03 |
class Solution:
def search_nums(self, nums: List[int], target, start, end)->int:
mid: int = (start + end) // 2
res: int = -1
if start<=end:
if target == nums[mid]:
return mid
elif target > nums[mid]:
return self.search_nums(nums, target, mid+1, end)
elif target < nums[mid]:
return self.search_nums(nums, target, start, mid-1)
return res
def search(self, nums: List[int], target: int) -> int:
return self.search_nums(nums, target, 0, len(nums)-1)
|
992,708 | 6a1f4b8d32688708113393be75a71ab6745b61bf | #!/usr/bin/env python3
#
# A script for comparing two collection lists from familysearch.org
# The lists are JSON files, where the entry 'collections' points to
# an array of dictionaries, with keys like:
# 'collectionId' : The unique identifier for this collection
# 'title' : The Title for this collection (should be unique, but not required to be)
# 'lastUpdate' : the last date that this collection was updated
# 'lastUpdatdeMillis' : the last update time of this collection (used to tell if the colleciton has changed)
#
# We build a pair of dictionaries from the two JSON structures,
# where each collection is an entry, and the collectonId is the key.
# Then we use those two dictionaries to find collections that have been added/removed/updated.
# Future refinement: See http://stackoverflow.com/questions/4527942/comparing-two-dictionaries-in-python
# especially "Daniel"'s answer
import sys, os
import json
import codecs
import locale
import datetime
def readJSON(filename):
with open(filename) as json_data:
d = json.load(json_data)
json_data.close()
return d
def toDict(collection):
d = {}
for item in collection:
d [ item [u'collectionId']] = item
return d
def imageString(idxCount, imgCount):
idxStr = locale.format_string("%d", idxCount, grouping=True)
imgStr = locale.format_string("%d", imgCount, grouping=True)
if idxCount > 0 and imgCount > 0:
retStr = "%s indexed records with %s record images" % (idxStr, imgStr)
elif imgCount > 0:
retStr = "Browse %s Images only, no index" % (imgStr)
elif idxCount > 0:
retStr = "Index only (%s records), no images" % (idxStr)
else:
retStr = "Index only, no images"
return retStr
def imageStringWithDiffs(idxCount, imgCount, oldCount, oldImages):
retStr = imageString(idxCount, imgCount)
oldidxStr = locale.format_string("%d", oldCount, grouping=True)
oldimgStr = locale.format_string("%d", oldImages, grouping=True)
retStr += " (was %s records with %s images)" % (oldidxStr, oldimgStr)
return retStr
def printCollection(aColl, action, withTimeStamp):
idxCnt = int(aColl[u'recordCount'])
imgCnt = int(aColl[u'imageCount'])
imgStr = imageString(idxCnt, imgCnt)
if withTimeStamp:
ts = datetime.datetime.fromtimestamp(int(aColl[u'lastUpdatedMillis'])//1000)
stStr = ts.strftime('%d-%b-%Y')
print("%s\t(https://familysearch.org/search/collection/%s); %s, %s %s" % (aColl[u'title'], aColl[u'collectionId'], imgStr, action, stStr))
else:
print("%s\t(https://familysearch.org/search/collection/%s); %s, %s" % (aColl[u'title'], aColl[u'collectionId'], imgStr, action))
def printCollectionWithDiffs(newColl, oldColl, action):
ts = datetime.datetime.fromtimestamp(int(newColl[u'lastUpdatedMillis'])//1000)
stStr = ts.strftime('%d-%b-%Y')
idxCnt = int(newColl[u'recordCount'])
imgCnt = int(newColl[u'imageCount'])
imgStr = imageStringWithDiffs(idxCnt, imgCnt, int(oldColl[u'recordCount']), int(oldColl[u'imageCount']))
print("%s\t(https://familysearch.org/search/collection/%s); %s, %s %s" % (newColl[u'title'], newColl[u'collectionId'], imgStr, action, stStr))
def printDict(d, label, action, withTimeStamp = True):
print("%s" % label)
for k in d.keys():
printCollection(d[k], action, withTimeStamp)
# print "%s\t(https://familysearch.org/search/collection/%s); xxx, Updated %s" % (d[k][u'title'], d[k][u'collectionId'], d[k][u'lastUpdate'])
def printDictWithDiffs(d, label, action):
print("%s" % label)
for k in d.keys():
printCollectionWithDiffs(newEntries[k], oldEntries[k], action)
# print "%s\t(https://familysearch.org/search/collection/%s); xxx, Updated %s" % (d[k][u'title'], d[k][u'collectionId'], d[k][u'lastUpdate'])
# {"collections":[{"collectionId":"1974186","title":"Argentina, Jujuy, Catholic Church Recor
locale.setlocale(locale.LC_ALL, 'en_US')
# From https://stackoverflow.com/questions/4545661/unicodedecodeerror-when-redirecting-to-file
# sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)
# From https://stackoverflow.com/questions/4374455/how-to-set-sys-stdout-encoding-in-python-3
sys.stdout.reconfigure(encoding='utf-8')
oldJSON = readJSON(sys.argv[1])# [u"collections"]
newJSON = readJSON(sys.argv[2])# [u"collections"]
# Given the two collections, generate four new collections.
# Unchanged, added, removed, updated
# key on collection ID for faster lookup
oldEntries = toDict(oldJSON)
newEntries = toDict(newJSON)
added = {}
removed = {}
updated = {}
moreImages = {}
fewerImages = {}
moreRecords = {}
fewerRecords = {}
unchanged = {}
# Sanity checking
for coll in newJSON:
id = coll [u'collectionId']
# print id
if not u'lastUpdatedMillis' in coll:
print("Collection %s has no update time" % id)
if not u'imageCount' in coll:
print("Collection %s has no image count" % id)
if not u'recordCount' in coll:
print("Collection %s has no record count" % id)
# Find added, updated, and unchanged collections
for coll in newJSON:
id = coll [u'collectionId']
if not id in oldEntries:
added[id] = coll
else:
oldColl = oldEntries[id]
newColl = newEntries[id]
if newColl[u'lastUpdatedMillis'] < oldColl[u'lastUpdatedMillis']:
print("## Collection ", id, " has regressed (datewise)!")
if newColl[u'lastUpdatedMillis'] > oldColl[u'lastUpdatedMillis']:
updated[id] = coll
elif newColl[u'imageCount'] > oldColl[u'imageCount']:
moreImages[id] = coll
elif newColl[u'imageCount'] < oldColl[u'imageCount']:
fewerImages[id] = coll
elif newColl[u'recordCount'] > oldColl[u'recordCount']:
moreRecords[id] = coll
elif newColl[u'recordCount'] < oldColl[u'recordCount']:
fewerRecords[id] = coll
else:
# assert(newColl[u'count'] == oldEntries[id][u'count'])
unchanged[id] = coll
# Find added collections
for coll in oldJSON:
id = coll [u'collectionId']
if not id in newEntries:
removed[id] = coll
printDict(removed, "--- Collections Deleted ---", "DELETED", False)
print()
printDict(added, "--- Collections Added ---", "ADDED")
print()
printDictWithDiffs(updated, "--- Collections Updated ---", "UPDATED")
print()
printDictWithDiffs(moreImages, "--- Collections with new images ---", "last updated")
print()
printDictWithDiffs(fewerImages, "--- Collections with images removed ---", "last updated")
print()
printDictWithDiffs(moreRecords, "--- Collections with new records ---", "last updated")
print()
printDictWithDiffs(fewerRecords, "--- Collections with records removed ---", "last updated")
print()
# print len(oldJSON), len (newJSON), len(unchanged)
|
992,709 | b29296399dbc2cdfe43b333b7eb9e07988af529d | import os, urllib.request, time
import xml.etree.ElementTree as ET
ARXIV_URL = 'http://export.arxiv.org/oai2?verb=ListRecords'
METADATA_FORMAT = 'arXivRaw' # possible values: oai_dc, arXiv, arXivRaw
OUTPUT_FOLDER = os.path.join('..', '..', 'raw_data', METADATA_FORMAT)
if not os.path.exists(OUTPUT_FOLDER):
os.mkdir(OUTPUT_FOLDER)
namespaces = {'oai': 'http://www.openarchives.org/OAI/2.0/'}
file_count = 0
# while there are still records to fetch
while True:
if file_count == 0:
URL = ARXIV_URL + '&metadataPrefix=' + METADATA_FORMAT
retry = True
while retry:
try:
url_output = urllib.request.urlopen(URL, timeout=10000000)
# means the current set of records were collected successfully
retry = False
except Exception as e:
print(e)
# retry after 1 hour
print('Retry: ' + URL)
time.sleep(3600)
data = url_output.read().decode()
# save fetched records to file
file_count += 1
file_name = os.path.join(OUTPUT_FOLDER, str(file_count) + '.xml')
with open(file_name, 'w') as o:
o.write(data)
# get token for next set of records
root = ET.parse(file_name)
resumption_token = root.find('oai:ListRecords/oai:resumptionToken', namespaces)
if resumption_token is not None:
resumption_token = resumption_token.text
else:
break
URL = ARXIV_URL + '&resumptionToken=' + resumption_token
# request data every 30 seconds
time.sleep(30)
print('{} files saved.'.format(file_count)) |
992,710 | 9a57dec2f3ece67c436eddcf70c20e98286c126a | # Handlers for Transaction Sets
import voluptuous
from rest_core.resources import Resource
from rest_core.resources import RestField
from rest_core.resources import DatetimeField
from rest_core.resources import ResourceUrlField
from rest_core.resources import ResourceIdField
from rest_core.exc import DoesNotExistException
from auth_core.decorators import authentication_required
from handlers.rest import BaseRestHandler
from services import transaction_service
from models import TransactionModel
resource_url = '/api/rest/v1.0/transactions/%s'
TRANSACTION_FIELDS = [
ResourceIdField(output_only=True, verbose_only=True),
ResourceUrlField(resource_url, output_only=True, verbose_only=True),
RestField(TransactionModel.rule_item_ids, output_only=True),
RestField(TransactionModel.total_preferences, output_only=True),
RestField(TransactionModel.total_dislikes, output_only=True),
RestField(TransactionModel.total_likes, output_only=True),
RestField(TransactionModel.user_id, output_only=True),
DatetimeField(TransactionModel.created_timestamp, output_only=True),
DatetimeField(TransactionModel.latest_timestamp, output_only=True),
]
class TransactionBaseHandler(BaseRestHandler):
"""
Base Handler for Preferences
"""
def get_rules(self):
return TRANSACTION_FIELDS
def get_model_by_id_or_error(self, resource_id):
"""
Fetch a model by given id OR implicitly raise a 404
"""
m = transaction_service.get_by_id(resource_id)
if not m:
err = 'Preference with resource_id \'%s\' not found'
raise DoesNotExistException(err % resource_id)
return m
def model_to_rest_resource(self, model, verbose=False):
"""Convert a PreferenceModel to a Rest Resource (dict)"""
return Resource(model, TRANSACTION_FIELDS).to_dict(verbose)
class TransactionDetailHandler(TransactionBaseHandler):
"""
Handler for a single Preference
"""
# TODO: Update this to operate off of user_id
@authentication_required
def get(self, resource_id):
pref_model = self.get_model_by_id_or_error(resource_id)
result = self.model_to_rest_resource(pref_model,
self.cleaned_params.get('verbose'))
self.serve_success(result)
class TransactionCollectionHandler(TransactionBaseHandler):
"""
Handler for a collection of Preferences
"""
def get_param_schema(self):
return {
u'limit': voluptuous.Coerce(int),
u'cursor': voluptuous.Coerce(str),
}
@authentication_required
def get(self):
kwargs = {
'limit': self.cleaned_params.get('limit', None),
'cursor': self.cleaned_params.get('cursor', None)
}
is_verbose = self.cleaned_params.get('verbose')
models, next_cursor, more = transaction_service.query(**kwargs)
return_resources = []
for pref_model in models:
return_resources.append(self.model_to_rest_resource(pref_model, is_verbose))
self.serve_success(return_resources, {'cursor': next_cursor, 'more': more})
|
992,711 | 173b03c71ed7957ab9bb1cf4e7911da11a8b81c4 | from typing import Any
from typing import Dict
from switchmng.typing import JsonDict
class BaseResource():
"""
Represents the base for all REST resources.
This class only provides the skeleton for other resources
and cannot be instantiated.
Every implementing class should overwrite
`_Attributes` and `ResourceIdentifier`.
"""
ResourceIdentifier = 'name'
"""*Name* of the attribute that is this resource's identifier"""
_Attributes: Dict[str, Dict[str, Any]] = {}
def __init__(self, **kwargs):
cls = type(self)
# Check if all required arguments are available
for key in cls._Attributes:
# Set optional arguments to default value
if cls._Attributes[key]['optional']:
setattr(self, key, cls._Attributes[key]['null'])
continue
# Check if all required arguments are given
if key not in kwargs:
raise TypeError("Missing attribute '{}' for resource {}".format(
key,
cls))
# Check if all given arguments are valid
self.check_params(**kwargs)
for key, val in kwargs.items():
setattr(self, key, val)
def __setattr__(self, name, val) -> None:
# If attribute is not a resource attribute set it normally
if name not in type(self)._Attributes:
super().__setattr__(name, val)
return
# Check resource attribute before setting
self.check_param(name, val)
attr = type(self)._Attributes[name]
super().__setattr__(attr['private'], val)
# Run post hooks after setting resource attribute
if 'post_hooks' in attr:
for hook in attr['post_hooks']:
hook(self)
def __getattribute__(self, name):
# If attribute is not a resource attribute get it normally
attrs = type(self)._Attributes
if name not in attrs:
return super().__getattribute__(name)
return getattr(self, attrs[name]['private'])
def __str__(self) -> str:
# Represent this resource as a string of resource identifier
return str(getattr(self, type(self).ResourceIdentifier))
def __repr__(self) -> str:
return self.__str__()
def jsonify(self) -> JsonDict:
"""
Represent this resource as a json-ready dict.
That is a dict which completely consists of json-compatible structures
like:
* dict
* list
* string
* int
* bool
* None / null
"""
Attributes = type(self)._Attributes
# Create json dictionary
json_dict: JsonDict = {}
# Add every resource attribute to dictionary
for key in Attributes:
# If necessary jsonify attribute before adding it
var = getattr(self, key)
if 'jsonify' in Attributes[key]:
var = Attributes[key]['jsonify'](var)
json_dict[key] = var
return json_dict
@classmethod
def check_param(cls, key, val) -> None:
"""
Check given parameter.
Check if given parameter has the correct type and is a valid
attribute for this resource.
These checks get executed when trying to assign a value to a
resource attribute but can be called when needing to check parameters
before making changes.
:raises TypeError: When type of given parameter does not match
expectation
:raises ValueError: When value of given parameter does not match
expectation
"""
# Check if attribute is valid for this resource at all
if key not in cls._Attributes:
raise TypeError("Unexpected attribute '{}' for resource '{}'".format(
key,
cls))
Attribute = cls._Attributes[key]
# Check if attribute is null and is allowed to be null
if Attribute['optional'] and val is None:
return
# Check if attribute has correct type
if Attribute['list']:
msg = "Attribute '{}' of resource {} has to be of type list of '{}'".format(
key,
cls,
Attribute['type'])
if not isinstance(val, list):
raise TypeError(msg)
for item in val:
if not isinstance(item, Attribute['type']):
raise TypeError(msg)
else:
msg = "Attribute '{}' of resource {} has to be of type '{}'".format(
key,
cls,
Attribute['type'])
if not isinstance(val, Attribute['type']):
raise TypeError(msg)
# Check all checks
if 'checks' in Attribute:
msg = "Illegal value '{}' for attribute '{}' of resource {}".format(
val,
key,
cls)
for value_check in Attribute['checks']:
if value_check(cls, val) is False:
raise ValueError(msg)
@classmethod
def check_params(cls, **kwargs) -> None:
"""
Check all given parameter.
Check if given parameters have the correct type and are valid
attributes for this resource.
These checks get executed when trying to assign a value to a
resource attribute but can be called when needing to check multiple
parameters at once in order to prevent inconistent states.
:raises TypeError: When type of given parameter does not match
expectation
:raises ValueError: When value of given parameter does not match
expectation
"""
for key, val in kwargs.items():
cls.check_param(key, val)
|
992,712 | 938089f96f013a189e294291002138840db27fa9 | tab = {'0':'Zero','1' : 'Un','2': 'Deux','3':'Trois','4':'Quatre','5':'Cinq','6':'Six','7':'Sept','8':'Huit','9':'Neuf'}
mot = '123'
for i in mot:
print(tab[i]) |
992,713 | cb3a508f344779c717f3027d297fb1cc4877bebc | __author__ = "rolandh"
RESEARCH_AND_SCHOLARSHIP = "http://refeds.org/category/research-and-scholarship"
RELEASE = {
"": ["eduPersonTargetedID"],
RESEARCH_AND_SCHOLARSHIP: [
"eduPersonPrincipalName",
"eduPersonScopedAffiliation",
"mail",
"givenName",
"sn",
"displayName",
],
}
|
992,714 | 3029e2c99aa1e3f083e5db92872f2e9a96f2c26d | import torch
import torch.nn as nn
from torchvision.models import resnet18
"""
The present python script contains the following classes:
* The FeatureExtractor class used to define the feature extractor (Resnet18 without the FC layer),***(Edoardo hai detto che lo facevi quindi te lo lascio)
* The MainTask class, used to perform the main task which a multi class classification
* The PreText class which is used to predict the relative rotation between the depth and the RGB modalities
"""
class PreText(nn.Module):
"""
Pretext task
"""
def __init__(self, num_classes = 4, featureMaps = 512, **kwargs):
super(PreText, self).__init__()
self.layer = nn.Sequential(
nn.Conv2d(featureMaps*2, 100, kernel_size = 1, stride = 1),
nn.BatchNorm2d(100),
nn.ReLU(inplace=True),
nn.Conv2d(100, 100, kernel_size = 3, stride = 2),
nn.BatchNorm2d(100),
nn.ReLU(inplace=True),
nn.Flatten(),
nn.Linear(100*3*3, 100),
nn.BatchNorm1d(100),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(100, num_classes),
)
def forward(self, h):
c = self.layer(h)
return c
class MainTask(nn.Module):
"""
Main classifier
"""
def __init__(self, num_classes = 47, featureMaps = 512, **kwargs):
super(MainTask, self).__init__()
self.layer = nn.Sequential(
nn.AdaptiveAvgPool2d((1,1)),
nn.Flatten(),
nn.Linear(featureMaps*2*1*1, 1000),
nn.BatchNorm1d(1000),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(1000, num_classes),
)
def forward(self, h):
d = self.layer(h)
return d
class PreText_variation(nn.Module):
"""
Pretext task 02 used for the color permutation
"""
def __init__(self, num_classes = 1, featureMaps = 512, **kwargs):
super(PreText_variation, self).__init__()
self.layer = nn.Sequential(
nn.Conv2d(featureMaps*2, 100, kernel_size = 1, stride = 1),
nn.BatchNorm2d(100),
nn.ReLU(inplace=True),
nn.Conv2d(100, 100, kernel_size = 3, stride = 2),
nn.BatchNorm2d(100),
nn.ReLU(inplace=True),
nn.Flatten(),
nn.Linear(100*3*3, 100),
nn.BatchNorm1d(100),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(100, num_classes),
)
def forward(self, h):
c = self.layer(h)
return c
class Branch(nn.Module):
"""
This class is for the branches that make the FeatureExtractor.
Source code for resnet18: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
def __init__(self, pretrained=True):
super(Branch, self).__init__()
net = resnet18(pretrained=pretrained, progress=True)
self.conv1 = net.conv1
self.bn1 = net.bn1
self.relu = net.relu
self.maxpool = net.maxpool
self.conv2 = net.layer1
self.conv3 = net.layer2
self.conv4 = net.layer3
self.conv5 = net.layer4
def forward(self, x):
"""
x: input data. 4-dimensional tensor ( shape: 64x3x224x224 i.e. batch_size,num_channels,widht,height )
@Returns: 4-dimensional tensor of size [len(x), 512, 7, 7]
"""
# the residual part is implemented in the BasicBlock class that composes layers layer1..4
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
"""
conv5 is made of 2 conv. layers that apply 512 filters each, and give 7x7 outputs for each filter and for each image
"""
return x
class FeatureExtractor(nn.Module):
def __init__(self, pretrained=True):
super(FeatureExtractor, self).__init__()
self.rgb_branch = Branch(pretrained=pretrained)
self.depth_branch = Branch(pretrained=pretrained)
def _combine_features(self, rgb_out, depth_out):
"""
Note that len(rgb_out)==len(depth_out).
@Returns: 4-dimensional tensor of size [len(rgb_out), 1024, 7, 7] (is this what we mean with "combine along the channel dimension"?)
"""
return torch.cat([rgb_out, depth_out], dim=1)
def forward(self, rgb_batch, depth_batch):
# Forward pass in both branches
rgb_features = self.rgb_branch(rgb_batch)
depth_features = self.depth_branch(depth_batch)
# Combine the outputs of the two branches to make the final features.
return self._combine_features(rgb_features, depth_features)
|
992,715 | 74c22984072bcd99f33abb07fb85fb5842793209 | import os
from todos.lambda_responses import HttpResponseServerError, HttpOkJSONResponse
from todos.todo_model import TodoModel
from utils.constants import ENV_VAR_ENVIRONMENT, ENV_VAR_DYNAMODB_TABLE, ENV_VAR_DYNAMODB_REGION
def handle(event, context):
try:
table_name = os.environ[ENV_VAR_DYNAMODB_TABLE]
region = os.environ[ENV_VAR_DYNAMODB_REGION]
except KeyError as err:
error_message = '{0} is missing from environment variables'.format(str(err))
return HttpResponseServerError(error_code='ENV_VAR_NOT_SET',
error_message=error_message).__dict__()
TodoModel.setup_model(TodoModel, region, table_name, ENV_VAR_ENVIRONMENT not in os.environ)
# fetch all todos from the database
results = TodoModel.scan()
# create a response
items = {'items': [dict(result) for result in results]}
return HttpOkJSONResponse(body=items).__dict__()
|
992,716 | a1ff0ef75372d9e3b6c416a2c3783bd86b51f46e | from rest_framework import serializers
from .models import Task, Description
class DescriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Description
fields = ('id', 'task', 'text')
def update(self, instance, validated_data):
validated_data.pop('task')
return super().update(instance, validated_data)
class TaskSerializer(serializers.ModelSerializer):
author = serializers.ReadOnlyField(source='author.id')
description = DescriptionSerializer(many=True, read_only=True)
class Meta:
model = Task
fields = ('id', 'name', 'project', 'status', 'performer',
'author', 'description', 'comment')
def perform_create(self, serializer):
serializer.save(author=self.request.user)
class ProfileSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source='user.id')
class Meta:
fields = ('id', 'username', 'userstatus', 'token') |
992,717 | 76ea3860ecbdce60be7fcaf8d2e495338798e46e | import requests
from lxml import etree
import json
lb = []
zidian = {}
def qaqu_syan(url,headers):
requ = requests.get(url,headers=headers)
print(requ.status_code)
with open('雪球信息.txt', 'w', encoding='utf-8') as f:
f.write(requ.text)
def yiqu_soyan():
with open('雪球信息.txt', 'r', encoding='utf-8') as f:
q = f.read()
html = etree.HTML(q)
tiquyiji = html.xpath('//div[@ class="home__timeline__tabs tabs"]/router-link[@ class="tab__item"]')
for i in tiquyiji:
tioatiao = i.xpath('text()')
to = i.xpath('@to')
data = i.xpath('@data-category')
# print(tioatiao)
# print(to)
# print(data)
# print('*'*30)
zidian[str(tioatiao)] = data
lb.append(data)
def huoqujs(headers):
url = 'https://xueqiu.com/v4/statuses/public_timeline_by_category.json?since_id=-1&max_id=-1&count=10&category='
for kv in zidian.items():
v = kv[1]
k = kv[0]
w = url + str(v[0])
print(k[2:4])
print('正在请求',w)
requ = requests.get(w, headers=headers)
print(requ.status_code)
print('正在爬取')
with open('雪球分类信息'+ k[2:4] +'.txt', 'w', encoding='utf-8') as f:
f.write(requ.text)
def tiquflxx():
for kv in zidian.items():
k = kv[0]
with open('雪球分类信息'+ k[2:4] +'.txt', 'r', encoding='utf-8') as f:
q = f.read()
s = q.replace(r"\\",'')
w = json.loads(q)
pinlss = w["list"]
for i in pinlss:
try:
data = json.loads(i['data'])
print('文章的id:'+ str(data['id']))
print('标题:' + str(data['title']))
print('描述:' + str(data['description']))
print('用户名:'+ str(data['user']['screen_name']))
print('地区:'+ str(i['column']))
print('用户头像:'+ str(data['user']['profile_image_url']))
print('详情的链接详情的链接:'+ str(data['target']))
print('*'*100)
except:
data = json.loads(i['data'])
print('描述:' + str(data['text']))
print('地区:' + str(i['column']))
print('详情的链接详情的链接:' + str(data['target']))
print('*' * 30)
def shakanzidian():
print(zidian)
print(lb)
pass
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36",
'Cookie': 'aliyungf_tc=AQAAAFuOTB0E7AYADUF5ahoG/e8JSARM; xq_a_token=019174f18bf425d22c8e965e48243d9fcfbd2cc0; xq_a_token.sig=_pB0kKy3fV9fvtvkOzxduQTrp7E; xq_r_token=2d465aa5d312fbe8d88b4e7de81e1e915de7989a; xq_r_token.sig=lOCElS5ycgbih9P-Ny3cohQ-FSA; Hm_lvt_1db88642e346389874251b5a1eded6e3=1528716823; u=521528716822937; device_id=dfa2a3a1b381ea40ecb96f71dd70d167; _ga=GA1.2.1321525895.1528716824; _gid=GA1.2.430573630.1528716824; Hm_lpvt_1db88642e346389874251b5a1eded6e3=1528717023'
}
url = 'https://xueqiu.com/today#/'
if __name__ == '__main__':
qaqu_syan(url,headers)
#yiqu_soyan()
#shakanzidian()
#huoqujs(headers)
#tiquflxx() |
992,718 | 1f2228f105294e2883df52d56c2c6b3bf78b70a6 | def solution(cacheSize, cities):
res = 0
cache = []
if cacheSize == 0: return len(cities)*5
for c in cities:
if c.lower() in cache:
res += 1
cache.append(cache.pop(cache.index(c.lower())))
else:
if len(cache) >= cacheSize:
cache.pop(0)
cache.append(c.lower())
res += 5
return res |
992,719 | 865450330c79d0f002a3922d1fc7d1e406c7d307 | import platform
import argparse
import os
import csv
import json
import re
import xml.etree.ElementTree
from .ixia_headers import IXIA_RESULT_HEADERS
class ParserException:
class All(Exception):
pass
class TestTypeNotFound(All):
pass
NAS_DRIVE_ADDRESS = '96.37.189.6'
def determine_result_files(path, result_type):
filelist = []
file_patterns = IXIA_RESULT_HEADERS[result_type[0]].keys()
path_files = os.listdir(path)
for file in path_files:
for pattern in file_patterns:
result = re.match(pattern, file)
if result:
filelist.append((file, pattern))
return filelist
def convert(data):
try:
return int(data)
except:
try:
return float(data)
except:
if data:
if data[0] == '[' and data[-1] == ']':
data = eval(data)
return data
def normalize(list_or_dict):
new_item = list_or_dict
if isinstance(list_or_dict, dict):
new_item = {}
for key, value in list_or_dict.items():
new_item[key] = convert(value)
elif isinstance(list_or_dict, list):
new_item = []
for value in list_or_dict:
new_item.append(convert(value))
return new_item
def parse_recursive(results, lines, lineno, pattern_obj):
if lineno >= len(lines):
return
cols = []
cols_pattern = ""
while lineno < len(lines):
line = lines[lineno]
line_split = []
ready_to_insert = False
for pattern, obj in pattern_obj.items():
pattern_split = pattern.split('@@')
pattern_key = pattern_split[0]
pattern_regex = pattern_split[1]
pattern_result = re.search(pattern_regex, line)
line_split = [l.strip() for l in line.split(',')]
if pattern_result:
# Line matches pattern
if isinstance(obj, dict):
# This is another dictionary to define parsing rules
if pattern_key not in results:
results[pattern_key] = []
ret = {}
arrinfo = pattern_result.groups()
keyinfo = pattern_result.groupdict()
if keyinfo:
# Insert regex groups
keyinfo = normalize(keyinfo)
ret.update(keyinfo)
lineno = parse_recursive(ret, lines, lineno, obj)
results[pattern_key].append(ret)
ready_to_insert = False
break
else:
# These are table rows. Save keys for later use
cols = line_split
cols_pattern = pattern_key
ready_to_insert = False
break
elif cols:
# Pattern did not match, but we may have more patterns to check
# Signal that we are ready to insert entry
ready_to_insert = True
# Outside of for-loop
if ready_to_insert:
# Signals result row(no patterns match)
if len(cols) == len(line_split):
# Line has same column width as saved columns
# Normalize
line_split = normalize(line_split)
# Add entry
new_entry = dict(zip(cols, line_split))
if cols_pattern not in results:
results[cols_pattern] = []
# Save the entry
results[cols_pattern].append(new_entry)
elif line:
# End of function
return lineno - 1
# Increment the line number; we'll be iterating again
lineno += 1
return lineno
def parse_html_log(html_log):
errors = []
DELIMITERS = ['<span class=MSG_ERROR>', '</span>']
delimiter = 0
with open(html_log) as html:
error = ''
for line in html:
# The search and replace has to be done because there are two different
# versions of the error log file
line = line. \
strip(). \
replace('<font ', '</span><font '). \
replace('<font color=red>', '<span class=MSG_ERROR>')
index = 0
while True:
look_for = DELIMITERS[delimiter]
index = line.find(look_for)
if index >= 0:
if delimiter == 0:
# This is the starting tag. Slice out the tag from rest of line
line = line[(index + len(look_for)):]
else:
# This is the ending tag
error += line[:index]
line = line[(index + len(look_for)):]
# We'll add the error to the errors list
errors.append(error)
error = ''
# Switch the delimiter
delimiter ^= 1
# Continue the while loop. We may have more tags in the line
continue
elif delimiter == 1:
# We're scanning for a closing tag, so add the line
error += line
# Exit the while loop. Nothing else to scan in the current line
break
return errors
def parse_errors(path, file):
files = os.listdir(path)
html_files = [file for file in files if file[-5:] == '.html']
html_log = None
if len(html_files) > 1:
# We have multiple HTML files. Some tests may have multiple results and
# log files, but are not consistent with the name of the result file. To
# generalize the solution, we will compare all the '_' delimited pieces
# between .html and .csv and pair the HTML file with the most matches
# Remove the '.csv' portion and split
file_delimited = file[:-4].split('_')
highest = 0
for html_file in html_files:
counter = 0
html_delimited = html_file[:-5].split('_')
for element in file_delimited:
if element in html_delimited:
counter += 1
if counter > highest:
html_log = html_file
else:
# Since we only have 1 HTML file, we'll assume this is the log
html_log = html_files[0]
html_log = os.path.join(path, html_log)
results = parse_html_log(html_log)
return results
def parse_result_by_file(path, file, result_type):
ret = {}
lines = []
filename = os.path.join(path, file[0])
with open(filename) as csv_file:
for line in csv_file:
lines.append(line.strip())
pattern_obj = IXIA_RESULT_HEADERS[result_type[0]][file[1]]
parse_recursive(ret, lines, 0, pattern_obj)
if ret:
# No error
ret['errors'] = None
else:
# There was probably an error... Let's find out!
ret['errors'] = parse_errors(path, file[0])
return ret
def parse_result_by_type(path, result_type):
results = {}
files = determine_result_files(path, result_type)
for file in files:
key = file[0][:-4]
results[key] = parse_result_by_file(path, file, result_type)
return results
def determine_test_type(path):
filelist = os.listdir(path)
for file in filelist:
for file_pattern in IXIA_RESULT_HEADERS.keys():
if re.search(file_pattern, file):
return (file_pattern, file)
return None
def parse_ixia_wml_recursion(wml):
if len(wml) == 0:
return convert(wml.text)
ret = {}
for child in wml:
tag = child.tag
parsed_child = parse_ixia_wml_recursion(child)
if isinstance(parsed_child, dict) and child.text:
text_node = child.text.strip()
if text_node:
parsed_child['TextNode'] = text_node
if tag in ret:
if not isinstance(ret[tag], list):
ret[tag] = [ret[tag]]
ret[tag].append(parsed_child)
else:
ret[tag] = parsed_child
return ret
def xml_to_json(path, wml_file=None):
ret = {}
if not wml_file:
print('I have to find the WML file by path...')
filename = os.path.join(path, wml_file)
wml = xml.etree.ElementTree.parse(filename).getroot()
ret[wml.tag] = parse_ixia_wml_recursion(wml)
return {wml_file[:-4]: ret}
def ixia_csv_to_json(path, csv=True, wml=True):
# If windows, replace "/mnt/wifi_vol" with NAS drive directory
if platform.system() == 'Windows':
path = path.replace('/mnt/', '\\\\{}\\'.format(NAS_DRIVE_ADDRESS))
path = path.replace('/', '\\')
result_type = determine_test_type(path)
if not result_type:
raise ParserException.TestTypeNotFound('Test Type was not found')
csv_results = parse_result_by_type(path, result_type) if csv else None
wml_results = xml_to_json(path, result_type[1]) if wml else None
result = {}
if csv:
result['csv'] = csv_results
if wml:
result['wml'] = wml_results
return result
def main(args):
path = args.path
indent = args.indent
csv = True
wml = True
if args.csv or args.wml:
csv = bool(args.csv)
wml = bool(args.wml)
try:
result = ixia_csv_to_json(path, csv, wml)
if indent:
result = json.dumps(result, indent=2)
else:
result = json.dumps(result)
print(result)
except ParserException.All as e:
print('PARSER|ERROR|{}'.format(str(e)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Convert CSV and WML into JSON'
)
parser.add_argument(
'path', metavar='/path/to/results', type=str,
help='Result directory'
)
parser.add_argument(
'--indent',
help='Indent the JSON output',
action='store_true'
)
parser.add_argument(
'--wml',
help='Include the WML only',
action='store_true'
)
parser.add_argument(
'--csv',
help='Include the CSV only',
action='store_true'
)
args = parser.parse_args()
if args:
main(args)
|
992,720 | 189a7f6838532a1eb0fe2c953b2685d56ff3ea37 | import cv2
import numpy as np
import pytesseract
from PIL import Image
image = cv2.imread('elect1.jpg')
image2 = image.copy()
height, width, channel = image.shape
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imwrite('gray.jpg', gray)
bulr = cv2.GaussianBlur(gray, (3, 3), 0)
cv2.imwrite('bulr.jpg', bulr)
canny = cv2.Canny(bulr, 100, 200)
cv2.imwrite('canny.jpg', canny)
contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
box1=[]
f_count=0
select=0
plate_width=0
for i in range(len(contours)):
cnt=contours[i]
area = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(cnt)
rect_area = w * h # area size
aspect_ratio = float(w) / h # ratio = width / height
if(aspect_ratio >= 0.2) and (aspect_ratio <= 1.0) and (rect_area >= 100) and (rect_area <= 700) :
cv2.rectangle(image2, (x, y), (x + w, y + h), (0, 255, 0), 1)
box1.append(cv2.boundingRect(cnt))
cv2.imwrite('image.jpg', image2)
for i in range(len(box1)): # Buble Sort on python
for j in range(len(box1) - (i + 1)):
if box1[j][0] > box1[j + 1][0]:
temp = box1[j]
box1[j] = box1[j + 1]
box1[j + 1] = temp
for m in range(len(box1)):
count = 0
for n in range(m + 1, (len(box1) - 1)):
delta_x = abs(box1[n + 1][0] - box1[m][0])
if delta_x > 150:
break
delta_y = abs(box1[n + 1][1] - box1[m][1])
if delta_x == 0:
delta_x = 1
if delta_y == 0:
delta_y = 1
gradient = float(delta_y) / float(delta_x)
if gradient < 0.25:
count = count + 1
if count > f_count:
select = m
f_count = count;
plate_width = delta_x
cv2.imwrite('image1.jpg', image)
cv2.imwrite('image2.jpg', image2)
number_plate = image[box1[select][1] - 10 : box1[select][3] + box1[select][1] + 20, box1[select][0] - 10 : 140 + box1[select][0]]
cv2.imwrite('number_plate.jpg', number_plate)
resize_plate = cv2.resize(number_plate, None, fx = 1.8, fy = 1.8, interpolation = cv2.INTER_CUBIC + cv2.INTER_LINEAR)
cv2.imwrite('resize_plate.jpg', resize_plate)
plate_gray = cv2.cvtColor(resize_plate, cv2.COLOR_BGR2GRAY)
cv2.imwrite('plate_gray.jpg', plate_gray)
ret, th_plate = cv2.threshold(plate_gray, 150, 255, cv2.THRESH_BINARY)
cv2.imwrite('plate_th.jpg', th_plate)
kernel = np.ones((3, 3), np.uint8)
er_plate = cv2.erode(th_plate, kernel, iterations = 1)
er_invplate = er_plate
cv2.imwrite('er_plate.jpg', er_invplate)
result = pytesseract.image_to_string(Image.open('er_plate.jpg'), lang='kor')
#return(result.replace(" ", ""))
|
992,721 | bcec6419c00fa11ab4a9ccf81b764b732ff34a8b | # coding: utf8
import logging
from datetime import datetime
from functools import wraps
from ruamel import yaml
def read_yaml(yaml_file: str) -> dict:
"""
Parse any valid yaml/yml file
Args:
yaml_file: yaml file path
Returns:
dict
"""
with open(yaml_file, 'r', encoding="utf8") as _file:
_dict = yaml.safe_load(_file)
logging.info(f"Yaml file {yaml_file} parsed!")
return _dict
def log_time(f):
@wraps(f)
def inner(*args, **kwargs):
start = datetime.now()
print("{} - {} START".format(start, f.__name__), flush=True)
response = f(*args, **kwargs)
print("{} - {} END duration: {}".format(datetime.now(), f.__name__, datetime.now() - start), flush=True)
return response
return inner
|
992,722 | 3b9cf102018333cf25b7d9b7aa71db139eead8a6 | import unittest
from cogent.sip.sipsim import CubicSpline
class TestCubic(unittest.TestCase):
def testC(self):
spline = CubicSpline(0, 1, 2, 0, 5)
testpoly = [1,
-4,
5,
0]
for i in range(len(testpoly)):
self.assertAlmostEquals( spline.poly[i], testpoly[i] )
if __name__ == "__main__":
unittest.main()
|
992,723 | 27feb025c2caf88652aad7d691b1afae5628da47 | with open("input.txt") as f:
lines = f.readlines()
print(sum(map(int, lines)))
|
992,724 | 03e86edd54beea36768f499c8f7dd383e2b92f77 | # Generated by Django 3.1.7 on 2021-05-22 10:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('work', '0018_auto_20210521_1613'),
]
operations = [
migrations.AddField(
model_name='sprint',
name='cost',
field=models.FloatField(null=True),
),
]
|
992,725 | ae40bd8ce540c1d923189fea02d2ab90be2edf58 | print("OK")
assert False |
992,726 | 3e93a82807e7b818803ae18da0a15a998ef53cf6 | import re
def solution(word, pages):
word = word.lower()
page_info = {}
for idx, page in enumerate(pages):
text = re.compile("<meta property=\"og:url\" content=\"https://\S*\"", re.I|re.S)
text = text.findall(page)[0]
pattern = re.compile('https://\S*\w', re.I)
cur = pattern.findall(text)[0]
if cur not in page_info.keys():
page_info[cur] = [idx,0,[],0]
text = re.compile("<a href=\"\S*?\"", re.I|re.S)
text = text.findall(page)
for i, t in enumerate(text):
pattern = re.compile('https://\S*\w', re.I)
text[i] = pattern.findall(t)[0]
page_info[cur][2].append(text[i])
pattern = re.compile('[a-zA-Z]+')
text = pattern.findall(page)
for t in text:
if word == t.lower():
page_info[cur][1] += 1
for k in page_info.keys():
if not page_info[k][2]:
continue
link_score = page_info[k][1]/len(list(set(page_info[k][2])))
print(link_score)
for link in list(set(page_info[k][2])):
if link in page_info.keys():
page_info[link][3] += link_score
answer = [0,-1]
for k in page_info.keys():
if page_info[k][1] + page_info[k][3] > answer[1]:
answer = [page_info[k][0],page_info[k][1]+page_info[k][3]]
return answer[0]
|
992,727 | d12b1332589bf0c9d87da8285a6abd0ca0b35f7b | # Generated by Django 3.0.8 on 2020-10-17 03:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='movies',
name='director',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='movies',
name='heroinname',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='movies',
name='heroname',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='movies',
name='moviename',
field=models.CharField(max_length=100),
),
]
|
992,728 | 1a8fa80109df5d4dca5c95e1607c8d6fa63e8780 | import numpy as np
# Using the scikit-learn library
# https://pypi.org/project/scikit-learn/
from sklearn.feature_selection import SelectKBest
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from sklearn.svm import SVC
# Config
c_max = 5
c_step = 0.1
gamma_max = 0.1
gamma_step = 0.01
folds = 50
cache_size = 4000
selector_range = range(7, 13)
# Load training data
csv = np.recfromcsv("train.csv")
x_train = np.column_stack((csv.x1, csv.x2, csv.x3, csv.x4, csv.x5, csv.x6, csv.x7, csv.x8, csv.x9, csv.x10, csv.x11,
csv.x12, csv.x13, csv.x14, csv.x15, csv.x16, csv.x17, csv.x18, csv.x19, csv.x20))
y_train = np.column_stack((csv.y,))
# Load testing data
csv = np.recfromcsv("test.csv")
x_test = np.column_stack((csv.x1, csv.x2, csv.x3, csv.x4, csv.x5, csv.x6, csv.x7, csv.x8, csv.x9, csv.x10, csv.x11,
csv.x12, csv.x13, csv.x14, csv.x15, csv.x16, csv.x17, csv.x18, csv.x19, csv.x20))
# Scale training and testing X
# x_train = scale(x_train)
# x_test = scale(x_test)
# Do k-fold
best_score = None
best_svc = None
best_c = None
best_gamma = None
best_selector = None
best_selector_k = None
fold = KFold(folds)
i = 0
for train_index, test_index in fold.split(x_train):
x_train_train = x_train[train_index]
y_train_train = y_train[train_index]
x_train_test = x_train[test_index]
y_train_test = y_train[test_index]
for c_iter in np.arange(c_step, c_max, c_step):
for gamma_iter in np.arange(gamma_step, gamma_max, gamma_step):
for sel in selector_range:
# Selector
selector = SelectKBest(k=sel)
selector.fit(x_train_train, y_train_train)
x_transformed_train = selector.transform(x_train_train)
x_transformed_test = selector.transform(x_train_test)
# SVC
svc = SVC(C=c_iter, cache_size=cache_size, probability=False, gamma=gamma_iter,
decision_function_shape="ovo")
svc.fit(x_transformed_train, y_train_train)
y_train_predict = svc.predict(x_transformed_test)
score = accuracy_score(y_train_test, y_train_predict)
if best_score is None or score > best_score:
best_score = score
best_svc = svc
best_c = c_iter
best_gamma = gamma_iter
best_selector = selector
best_selector_k = sel
print "New best score: {}".format(best_score)
print "\tFinished fold={}, c={}, gamma={}, selector={}".format(i, c_iter, gamma_iter, sel)
if best_score == 1.0:
break
if best_score == 1.0:
break
if best_score == 1.0:
break
if best_score == 1.0:
break
i += 1
# Predict data with best selector and best SVC
x_test_transformed = best_selector.transform(x_test)
y_predict = best_svc.predict(x_test_transformed)
# Save to file
with open("result.csv", "w") as f:
f.write("Id,y\n")
_id = 2000
for y in y_predict:
f.write("{},{}\n".format(_id, y))
_id += 1
print "Finished with best_score={}, best_c={}, best_gamma={}, best_selector_k={}".format(best_score, best_c, best_gamma,
best_selector_k)
|
992,729 | dd37256e65fb1e539901667020f88a0b69263057 | import numpy as np
from skatingAI.utils.utils import BodyParts
class HumanDistanceMap(object):
def __init__(self):
self.graph = {
BodyParts.Head.name: [BodyParts.torso.name],
BodyParts.RUpArm.name: [BodyParts.torso.name, BodyParts.RForeArm.name],
BodyParts.RForeArm.name: [BodyParts.RUpArm.name, BodyParts.RHand.name],
BodyParts.RHand.name: [BodyParts.RForeArm.name],
BodyParts.torso.name: [BodyParts.Head.name, BodyParts.RUpArm.name,
BodyParts.RThigh.name],
BodyParts.RThigh.name: [BodyParts.RLowLeg.name, BodyParts.torso.name],
BodyParts.RLowLeg.name: [BodyParts.RFoot.name, BodyParts.RThigh.name],
BodyParts.RFoot.name: [BodyParts.RThigh.name],
}
self.weighted_distances = self._build_matrix()
def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):
""" find all paths from start_vertex to
end_vertex in graph """
path = path + [start_vertex]
if start_vertex == end_vertex:
return [path]
paths = []
for vertex in self.graph[start_vertex]:
if vertex not in path:
extended_paths = self._find_all_paths(vertex,
end_vertex,
path)
for p in extended_paths:
paths.append(p)
return paths
def _build_matrix(self):
distance_map = []
for a in self.graph:
distances = []
for i, b in enumerate(self.graph):
parts = self._find_all_paths(a, b)[0]
distances.append(len(parts) - 1)
distance_map.append(distances)
return self._matrix_formatations(distance_map)
def _matrix_formatations(self, distance_map):
distance_map = np.array(distance_map)
distance_map = (1 - distance_map / (distance_map.shape[0] + distance_map.shape[0] / 2)).astype(np.float16)
distance_map[distance_map == 1] = 0
distance_map = np.insert(distance_map, (0), 1, axis=0)
distance_map = np.insert(distance_map, (0), 1, axis=1)
distance_map[0, 0] = 0
return distance_map
|
992,730 | 02447512f8b8ef5d6b3b9f6e22346a93fb3f8377 | def main():
""" Takes a one line input from STDIN and outputs 'yes' if it is a proper
propositional statement, no otherwise. NO WHITESPACE
and = '&'
or = '|'
equivalence = '='
negation = '~'
implication = '>'
S = sentence
P = proposition
T/F = Truth Symbols
C = Connective
Author 1: Evan Srock (esrock)
Author 2: Patrick Chadbourne
Author 3: Katie Phillips
"""
if sentence(Parser()):
print 'yes'
else:
print 'no'
# **************************************************************
def sentence(p):
""" Checks if s is a valid Sentence. Check for:
P
T/F
~S
:param s: A string that represents the sentence to check
:return: boolean
"""
if p.get_next() == '~':
return negation_sentence(p)
elif is_symbol(p.get_next()):
p.match('S')
if p.get_next() is None:
return True
else:
return connective_sentence(p)
else:
return False
# **************************************************************
def negation_sentence(p):
"""
checks for it to be a valid
:param p: parser class instance
:return: boolean
"""
p.match('~')
return sentence(p)
# **************************************************************
def connective_sentence(p):
"""
checks fot it to be a valid connective
Should be:
(Connective)(sentence)
Sentence will check sentences before and after connective
:param p: parser class instance
:return: boolean
"""
if p.get_next() == '=':
p.match('=')
elif p.get_next() == '|':
p.match('|')
elif p.get_next() == '>':
p.match('>')
else:
p.match('&')
return sentence(p)
# **************************************************************
def is_symbol(p):
""" Checks if p is a proposition or truth symbol
Will return False if p is not in the range A,B..Z,a..z or is more than
one symbol
:param p: the proposition to be checked
:return: boolean
"""
return len(p) == 1 and p.isalpha()
# **************************************************************
class Parser:
"""
A class to handle matching tokens and holding the input
Initializer gets input
match matches a passed in token with the recorded input
get_next gets the next token in the input
"""
index = 0
def __init__(self):
"""
sets up the parser by getting the input
removes whitespace from the input
"""
self.the_input = raw_input().strip().replace(' ', '')
if self.the_input == '':
print ('No input detected')
exit(1)
def match(self, token):
"""
Matches the token to the current part of the input. Increments
index if successful
:param token: a string of length 1 that is to be matched.
'S' denotes symbol
:return: boolean if match successful
"""
try:
if token == 'S' and is_symbol(self.the_input[self.index]) \
or self.the_input[self.index] == token:
self.index += 1
return True
except IndexError:
print 'Error on checking \'' + token + \
'\': the next token is empty'
exit(1)
print 'No' # there is improper grammar
exit(1)
def get_next(self):
"""
fetches the next token in the input
:return: string of length 1 that is to be matched
"""
try:
return self.the_input[self.index]
except IndexError:
return None
if __name__ == '__main__':
main()
|
992,731 | 8767eeed406fc4b7beaeff1a5211ad9e7a5ba477 | from random import randrange, getrandbits
from tkinter import *
root = Tk()
root.geometry('1050x640')
root.resizable(0, 0)
root.title("Criptografia RSA - APS segundo semestre v2")
topFrame = Frame(root)
topFrame.pack(side=TOP)
bottomFrame = Frame(root)
bottomFrame.pack(side=BOTTOM)
leftFrame = Frame(root)
leftFrame.pack(side=LEFT)
rightFrame = Frame(root)
rightFrame.pack(side=RIGHT)
lblTitulo = Label(topFrame, text="CRIPTOGRAFIA RSA", foreground="black") #cabeçalho do form
lblTitulo.pack()
lblTitulo.configure(relief="ridge", font="Arial 24", border = 0)
class GeradorNumeroPrimo:
def __init__(self):
self.numero_primo = self.gerar_numero_primo()
def teste_miller_rabin(self,n, k=128):
""" Testa se o número é primo
Param:
n - número testado
k - número de testes
return True if n is prime
"""
if n < 6: # válida alguns casos
return [False, False, True, True, False, True][n]
# Testa se n é par
if n <= 1 or n % 2 == 0:
return False
# encontra r e s
s = 0
r = n - 1
while r & 1 == 0:
s += 1
r //= 2
# executa k testes
for _ in range(k):
a = randrange(2, n - 1)
x = pow(a, r, n)
if x != 1 and x != n - 1:
j = 1
while j < s and x != n - 1:
x = pow(x, 2, n)
if x == 1:
return False
j += 1
if x != n - 1:
return False
return True
def tentativa_de_numero(self,length):
""" Gera um número primo inteiro aleatório.
retorna o número
"""
# gera bits aleatórios
self.numero_primo = getrandbits(length)
# aplica uma mascara para determinar valor 1 para MSB e LSB
self.numero_primo |= (1 << length - 1) | 1
return self.numero_primo
def gerar_numero_primo(self,length=5):
''' Cria um número primo testado
parâmetros:
length - tamanho em bits
'''
self.numero_primo = 4
# Continua enquanto o teste falha
while not self.teste_miller_rabin(self.numero_primo, 128):
self.numero_primo = self.tentativa_de_numero(length)
return self.numero_primo
#classes estanciadas e métodos chamados
p = GeradorNumeroPrimo()
numero_p = p.numero_primo
q = GeradorNumeroPrimo()
numero_q = q.numero_primo
#exibindo os numeros publicos
lbl = Label(topFrame,text="-------------------------------------------------------------------", foreground="black") #exibindo o resultado
lbl.pack()
lbl.configure(relief="ridge", font="Arial 16 ", border= 0)
lblPrimoP = Label(topFrame,text="Número primo (P): " + str(numero_p ) , foreground="black") #exibindo o resultado
lblPrimoP.pack()
lblPrimoP.configure(relief="ridge", font="Arial 16 ", border= 0)
lblPrimoQ = Label(topFrame,text="Número primo (Q): " + str(numero_q ) , foreground="black")
lblPrimoQ.pack()
lblPrimoQ.configure(relief="ridge", font="Arial 16 ", border= 0)
lbl2 = Label(topFrame,text="-------------------------------------------------------------------", foreground="black") #exibindo o resultado
lbl2.pack()
lbl2.configure(relief="ridge", font="Arial 16 ", border= 0)
#entrada da mensagem que será criptografada
lblChavePublicaR = Label(leftFrame, text= "Escolha sua chave pública:" , foreground="black")
lblChavePublicaR.pack()
lblChavePublicaR.configure(relief="ridge", font="Arial 16 ", border= 0)
global txtChavePublicaR
txtChavePublicaR = Entry(leftFrame,font="Arial 16")
txtChavePublicaR.pack()
lblMsg = Label(leftFrame, text="Insira a mensagem: ", foreground="black")
lblMsg.pack()
lblMsg.configure(relief="ridge", font="Arial 16 ", border= 0)
global txtMsg
txtMsg = Entry(leftFrame,font="Arial 16")
txtMsg.pack()
lblInter = Label(rightFrame, text="Insira a mensagem a ser interpretada: ", foreground="black")
lblInter.pack()
lblInter.configure(relief="ridge", font="Arial 16 ", border= 0)
global txtInter
txtInter = Entry(rightFrame,font="Arial 16")
txtInter.pack()
#entrada das chaves públicas - input das chaves
lblChavePublicaE = Label(leftFrame, text= "Insira a primeira chave pública (E):" , foreground="black")
lblChavePublicaE.pack()
lblChavePublicaE.configure(relief="ridge", font="Arial 16 ", border= 0)
global txtChavePublicaE
txtChavePublicaE = Entry(leftFrame,font="Arial 16")
txtChavePublicaE.pack()
lblChavePublicaN = Label(leftFrame, text="Insira a segunda chave pública (N): ", foreground="black")
lblChavePublicaN.pack()
lblChavePublicaN.configure(relief="ridge", font="Arial 16 ", border= 0)
global txtChavePublicaN
txtChavePublicaN = Entry(leftFrame,font="Arial 16")
txtChavePublicaN.pack()
#entrada das chaves privadas - input das chaves
lblChavePrivadaD = Label(rightFrame, text= "Insira a primeira chave privada (D):" , foreground="black")
lblChavePrivadaD.pack()
lblChavePrivadaD.configure(relief="ridge", font="Arial 16 ", border= 0)
txtChavePrivadaD = Entry(rightFrame,font="Arial 16")
txtChavePrivadaD.pack()
lblChavePrivadaN = Label(rightFrame, text= "Insira a segunda chave privada (N): ", foreground="black")
lblChavePrivadaN.pack()
lblChavePrivadaN.configure(relief="ridge", font="Arial 16 ", border= 0)
txtChavePrivadaN = Entry(rightFrame,font="Arial 16")
txtChavePrivadaN.pack()
class Criptografia(object):
#calculos usados para cifrar e decifrar as mensagens
def criptografia(self, m, e, n):
c = (m**e) % n
return c
def descriptografia(self, c, d, n):
m = c**d % n
return m
def encripta_mensagem(self):
s = txtMsg.get()
# aqui, é validada se as entradas são numericas ou não
if(str(txtChavePublicaE.get()).isnumeric() and str(txtChavePublicaN.get()).isnumeric()):
e = int(txtChavePublicaE.get())
n = int(txtChavePublicaN.get())
enc = ''.join(chr(self.criptografia(ord(x), e, n)) for x in s)
print(enc)
lblEnc["text"] = enc
else:
lblEnc["text"] = "Insira apenas números inteiros!!"
return lblEnc
def decripta_mensagem(self):
s = txtInter.get()
# aqui, é validada se as entradas são numericas ou não
if(str(txtChavePrivadaD.get()).isnumeric() and str(txtChavePrivadaN.get()).isnumeric()):
d = int(txtChavePrivadaD.get())
n = int(txtChavePrivadaN.get())
dec = ''.join(chr(self.descriptografia(ord(x), d, n)) for x in s)
lblDec["text"] = dec
else:
lblDec["text"] = "Insira apenas números inteiros!!"
return lblDec
# labels que exibirão o resultado da criptografia / descriptografia
lblEncCabecalho = Label(leftFrame,text="O texto criptografado é: ", foreground="black")#tenho q retornar a porra do bagui cripto...
lblEncCabecalho.pack()
lblEncCabecalho.configure(relief="ridge", font="Arial 16 ", border= 0)
lblEnc = Label(leftFrame,text="-------------", foreground="black")#tenho q retornar a porra do bagui cripto...
lblEnc.pack()
lblEnc.configure(relief="ridge", font="Arial 16 ", border= 0)
lblEncCabecalho2 = Label(rightFrame,text="O texto descriptografado é: ", foreground="black")#tenho q retornar a porra do bagui cripto...
lblEncCabecalho2.pack()
lblEncCabecalho2.configure(relief="ridge", font="Arial 16 ", border= 0)
lblDec = Label(rightFrame,text="--------------", foreground="black")#tenho q retornar a porra do bagui cripto...
lblDec.pack()
lblDec.configure(relief="ridge", font="Arial 16 ", border= 0)
class Chaves(Criptografia):
def __init__(self, p, q):
self.p = p
self.q = q
n=self.p*self.q
phi=(self.p-1)*(self.q-1) # Função totiente de Euler ou função Phi
lblEscolherChave["text"] = str(self.coprimos(phi)) + "\n" # calculo da chave pública mdc(phi(N), E) == 1
def gerar_chaves(self):
n=self.p*self.q
phi=(self.p-1)*(self.q-1) # Função totiente de Euler ou função Phi
# aqui, é validada se as entradas são numericas ou não
if(str(txtChavePublicaR.get()).isnumeric()):
e= int(txtChavePublicaR.get())
d=self.inverso_modular(e,phi) # calculo da chave privada d*e = 1 mod(φ(n))
lblChave["text"] = "\nChaves públicas (e=" + str(e) + ", n=" + str(n) + ")" + "\nChaves privadas (d="+ str(d) + ", n=" + str(n) + ")\n"
else:
lblChave["text"] = "Insira apenas números inteiros!"
return lblChave
def mdc(self, a, b):
while a != 0:
a, b = b % a, a
return b
def inverso_modular(self, a, m):
for x in range(1, m):
if (a * x) % m == 1:
return x
return None
def coprimos(self, a):
l = []
for x in range(2, a):
if self.mdc(a, x) == 1 and self.inverso_modular(x,a) != None: # MDC(φ(n), e) = 1
l.append(x)
for x in l:
if x == self.inverso_modular(x,a):
l.remove(x)
return l
#exibição das chaves - essas labels exibem as chaves que estão disponiveis para a escolha e as chaves que serão usadas
#em na criptografia que será executada no momento
lblEscolherChave = Label(topFrame,text="escolherchave" ,foreground="black")
lblEscolherChave.pack()
lblEscolherChave.configure(relief="ridge", font="Arial 16 ", border= 0)
lbl3 = Label(topFrame,text="-------------------------------------------------------------------", foreground="black") #exibindo o resultado
lbl3.pack()
lbl3.configure(relief="ridge", font="Arial 16 ", border= 0)
lblChave = Label(topFrame,text="chave que serão usadas" ,foreground="black")
lblChave.pack()
lblChave.configure(relief="ridge", font="Arial 16 ", border= 0)
#botões - aqui, os botões são definidos e as funções são chamadas. Sempre que o botão for clicado, ele executa a função
# que está no command
criptografia = Criptografia()
chaves = Chaves(numero_p, numero_q)
btnCripto = Button(leftFrame, text="Gerar Chaves", fg="green", command = chaves.gerar_chaves)
btnCripto.pack() # gera as chaves que serão usadas na criptografia do texto inputado
btnCripto = Button(leftFrame, text="Criptografar", fg="green", command = criptografia.encripta_mensagem)
btnCripto.pack() #criptografa o texto inputado
btnDescripto = Button(rightFrame, text="Descriptografar", fg="green", command = chaves.decripta_mensagem)
btnDescripto.pack() #descriptografa o texto inputado
btnQuit = Button(rightFrame, text="Sair", fg="red",font=" arial",command=quit)
btnQuit.pack() # fecha o programa
root.mainloop() #finaliza o form
|
992,732 | 82a58b77fba4fc7c9c102027593160a6e12cbaa2 | import rospy
import time
import numpy as np
from ackermann_msgs.msg import AckermannDriveStamped
from sensor_msgs.msg import LaserScan
from ar_track_alvar_msgs.msg import AlvarMarkers
class finalProgram():
def __init__(self):
rospy.Subscriber("ackermann_cmd_mux/output", AckermannDriveStamped,self.ackermann_cmd_input_callback)
rospy.Subscriber("/scan", LaserScan, self.findSpace)
rospy.Subscriber('ar_pose_marker', AlvarMarkers, self.ar_callback, queue_size = 1)
self.cmd_pub = rospy.Publisher('/ackermann_cmd_mux/input/default', AckermannDriveStamped, queue_size = 10)
self.maxSpeed = 1
self.output = 0
self.flag = None
self.last_ar = None
self.minimum = None
self.refDist = 0.7
self.error = 0
self.kp = 0.6
def ar_callback(self,ar_markers):
if len(ar_markers.markers) > 1:
for i in range(1, len(ar_markers.markers)):
self.minimum = ar_markers.markers[0].id
if ar_markers.markers[i].pose.pose.position.y > self.minimum:
self.minimum = ar_markers.markers[i].id
self.flag = 1
elif len(ar_markers.markers) == 1:
self.minimum = ar_markers.markers[0].id
self.flag = 1
else:
pass
if self.flag == None:
self.last_ar = 0
else:
if self.minimum < 50:
self.last_ar = self.minimum
else:
pass
def findSpace(self):
ranges = msg.ranges
space = []
distances = []
for i in range(len(ranges)):
if(ranges[i] > 1):
space.append(i)
else:
distances.append(ranges[i])
### steering
ini = space[0]
last = space[len(space) - 1]
steering = ((last + ini) / 2)/len(ranges) - 0.5
if(steering > 0.34):
steering = 0.34
if(steering < -0.34):
steering = -0.34
self.output = steering
self.ackermann_cmd_input_callback(AckermannDriveStamped())
def ackermann_cmd_input_callback(self, msg):
msg.drive.speed = self.maxSpeed
msg.drive.steering_angle = self.output
msg.drive.steering_angle_velocity = 1
self.cmd_pub.publish(msg)
if __name__ == "__main__":
rospy.init_node("finalProgram")
node = finalProgram()
rospy.spin()
|
992,733 | a7ad99745700f4b45542629bbe8ed5d4a6cae98f | #!/usr/bin/env python3
import sys
import os
import configparser
myFolder = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(os.path.join(myFolder, os.path.pardir))
from windows.window_setting import WindowOption
import time
__Author__ = 'Zhao Zeming'
__Version__ = 1.0
class WindowOptionLogic(WindowOption):
def __init__(self, parent=None):
self.parent = parent
self.cache_notch_filter = {50: 0, 60: 1}
self.cache_bandpass_high = {1: 0, 5: 1, 10: 2, 20: 3}
self.cache_bandpass_low = {50: 0, 100: 1, 200: 2, 450: 3}
self.cache_sampling_freq = {250: 0, 500: 1, 1000: 2, 2000: 3}
self.cache_channel_num = {64: 0, 128: 1, 192: 2}
self.myFile = os.path.split(os.path.realpath(__file__))[0]
super(WindowOptionLogic, self).__init__()
def initUI(self):
super(WindowOptionLogic, self).initUI()
self.config_ini_read()
self.pushbutton_ok_page0.clicked.connect(self.action_pushbutton_ok_page0)
self.pushbutton_re_page0.clicked.connect(self.action_pushbutton_re_page0)
self.pushbutton_de_page0.clicked.connect(self.action_pushbutton_de_page0)
self.pushbutton_ok_page1.clicked.connect(self.action_pushbutton_ok_page1)
self.pushbutton_re_page1.clicked.connect(self.action_pushbutton_re_page1)
self.pushbutton_de_page1.clicked.connect(self.action_pushbutton_de_page1)
self.pushbutton_reset_filter.clicked.connect(self.action_pushbutton_reset_filter)
self.pushbutton_reset_data.clicked.connect(self.action_pushbutton_reset_data)
self.pushbutton_reset_tcpip.clicked.connect(self.action_pushbutton_reset_tcpip)
self.checkbox_notch_filter.stateChanged.connect(self.action_notch_filter_change)
self.checkbox_bandpass_filter.stateChanged.connect(self.action_bandpass_filter_change)
self.radiobutton_restart_auto.toggled.connect(self.action_auto_press_change)
self.radiobutton_users_gender_secret_page1.setChecked(True)
self.display_initial()
def config_ini_read(self):
config_ini = configparser.ConfigParser()
config_res = configparser.ConfigParser()
file_config_ini = os.path.join(self.myFile, os.path.pardir, '.temp', '.config.ini')
file_config_res = os.path.join(self.myFile, os.path.pardir, 'config', 'config.ini')
config_ini.read(file_config_ini)
config_filter_data_ini = config_ini['Filter']
config_filter_name_ini = config_ini.options('Filter')
config_data_data_ini = config_ini['Data']
config_data_name_ini = config_ini.options('Data')
config_socket_data_ini = config_ini['Socket']
config_socket_name_ini = config_ini.options('Socket')
config_res.read(file_config_res)
config_filter_data_res = config_res['Filter']
config_filter_name_res = config_res.options('Filter')
config_data_data_res = config_res['Data']
config_data_name_res = config_res.options('Data')
config_socket_data_res = config_res['Socket']
config_socket_name_res = config_res.options('Socket')
self.dict_config_filter_res = {}
self.dict_config_data_res = {}
self.dict_config_socket_res = {}
self.dict_config_filter_ini = {}
self.dict_config_data_ini = {}
self.dict_config_socket_ini = {}
for i in config_filter_name_ini:
self.dict_config_filter_ini[i] = int(config_filter_data_ini[i])
self.dict_config_filter_res[i] = int(config_filter_data_res[i])
for i in config_data_name_ini:
self.dict_config_data_ini[i] = int(config_data_data_ini[i])
self.dict_config_data_res[i] = int(config_data_data_res[i])
for i in config_socket_name_ini:
self.dict_config_socket_ini[i] = config_socket_data_ini[i]
self.dict_config_socket_res[i] = config_socket_data_res[i]
def display_initial(self):
self.checkbox_notch_filter.setChecked(self.dict_config_filter_ini['filter_notch_able'])
self.checkbox_bandpass_filter.setChecked(self.dict_config_filter_ini['filter_band_able'])
self.combobox_notch_filter.setCurrentIndex(self.cache_notch_filter[self.dict_config_filter_ini['filter_notch']])
self.combobox_bandpass_high.setCurrentIndex(self.cache_bandpass_high[self.dict_config_filter_ini['filter_band_high']])
self.combobox_bandpass_low.setCurrentIndex(self.cache_bandpass_low[self.dict_config_filter_ini['filter_band_low']])
self.combobox_sampling_freq.setCurrentIndex(self.cache_sampling_freq[self.dict_config_filter_ini['sampling_freq']])
self.combobox_channel_num.setCurrentIndex(self.cache_channel_num[self.dict_config_data_ini['channel_num']])
self.spinbox_set_num.setValue(self.dict_config_data_ini['set_number'])
self.spinbox_set_time.setValue(self.dict_config_data_ini['set_time'])
self.spinbox_restart_auto.setValue(self.dict_config_data_ini['auto_res_time'])
self.radiobutton_restart_auto.setChecked(self.dict_config_data_ini['auto_res_able'])
self.radiobutton_restart_press.setChecked(not self.dict_config_data_ini['auto_res_able'])
self.combobox_filetype_save.setCurrentIndex(self.dict_config_data_ini['filetype_save'])
self.lineedit_tcp_address.setText(self.dict_config_socket_ini['tcp_address'])
self.lineedit_tcp_port.setText(self.dict_config_socket_ini['tcp_port'])
self.combobox_notch_filter.setEnabled(self.checkbox_notch_filter.isChecked())
self.combobox_bandpass_high.setEnabled(self.checkbox_bandpass_filter.isChecked())
self.combobox_bandpass_low.setEnabled(self.checkbox_bandpass_filter.isChecked())
self.spinbox_restart_auto.setEnabled(self.radiobutton_restart_auto.isChecked())
def action_pushbutton_ok_page0(self):
self.close()
def action_pushbutton_de_page0(self):
self.close()
def action_pushbutton_ok_page1(self):
self.dict_config_filter_ini['filter_notch_able'] = str(int(self.checkbox_notch_filter.isChecked()))
self.dict_config_filter_ini['filter_band_able'] = int(self.checkbox_bandpass_filter.isChecked())
self.dict_config_filter_ini['filter_notch'] = int(self.combobox_notch_filter.currentText())
self.dict_config_filter_ini['filter_band_high'] = int(self.combobox_bandpass_high.currentText())
self.dict_config_filter_ini['filter_band_low'] = int(self.combobox_bandpass_low.currentText())
self.dict_config_filter_ini['sampling_freq'] = int(self.combobox_sampling_freq.currentText())
self.dict_config_data_ini['channel_num'] = int(self.combobox_channel_num.currentText())
self.dict_config_data_ini['set_number'] = int(self.spinbox_set_num.value())
self.dict_config_data_ini['set_time'] = int(self.spinbox_set_time.value())
self.dict_config_data_ini['auto_res_able'] = int(self.radiobutton_restart_auto.isChecked())
self.dict_config_data_ini['auto_res_time'] = int(self.spinbox_restart_auto.value())
self.dict_config_data_ini['filetype_save'] = int(self.combobox_filetype_save.currentIndex())
self.dict_config_socket_ini['tcp_address'] = self.lineedit_tcp_address.text()
self.dict_config_socket_ini['tcp_port'] = self.lineedit_tcp_port.text()
config_ini = configparser.ConfigParser()
config_info = configparser.ConfigParser()
file_config_ini = os.path.join(self.myFile, os.path.pardir, '.temp', '.config.ini')
file_user_info = os.path.join(self.myFile, os.path.pardir, '.temp', '.info.ini')
config_ini.read(file_config_ini)
config_info.read(file_user_info)
for i in self.dict_config_filter_ini.keys():
config_ini.set('Filter', i, str(self.dict_config_filter_ini[i]))
for i in self.dict_config_data_ini.keys():
config_ini.set('Data', i, str(self.dict_config_data_ini[i]))
for i in self.dict_config_socket_ini.keys():
config_ini.set('Socket', i, str(self.dict_config_socket_ini[i]))
config_ini.write(open(file_config_ini, 'w'))
self.close()
def action_pushbutton_de_page1(self):
self.close()
def action_pushbutton_re_page0(self):
pass
def action_pushbutton_re_page1(self):
self.dict_config_filter_ini = self.dict_config_filter_res.copy()
self.dict_config_data_ini = self.dict_config_data_res.copy()
self.dict_config_socket_ini = self.dict_config_socket_res.copy()
self.display_initial()
def action_pushbutton_reset_filter(self):
self.dict_config_filter_ini = self.dict_config_filter_res.copy()
self.display_initial()
def action_pushbutton_reset_data(self):
self.dict_config_data_ini = self.dict_config_data_res.copy()
self.display_initial()
def action_pushbutton_reset_tcpip(self):
self.dict_config_socket_ini = self.dict_config_socket_res.copy()
self.display_initial()
def action_notch_filter_change(self):
self.combobox_notch_filter.setEnabled(self.checkbox_notch_filter.isChecked())
def action_bandpass_filter_change(self):
self.combobox_bandpass_high.setEnabled(self.checkbox_bandpass_filter.isChecked())
self.combobox_bandpass_low.setEnabled(self.checkbox_bandpass_filter.isChecked())
def action_auto_press_change(self):
self.spinbox_restart_auto.setEnabled(self.radiobutton_restart_auto.isChecked())
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
win = WindowOptionLogic()
win.show()
sys.exit(app.exec_())
|
992,734 | 68c5092cbbebf128c984951da57f24c9a9625217 | #Python problem 9
#This program shows how 2 separate functions calculate square roots, one is using the math library in python and the other is using Newtons square root method
#Created by Robert Kiliszewski
#04/10/2017
import math
x = 100.0
print("Using Math.sqrt() = ", math.sqrt(x))
#Newtons Method for Square Roots
z_next = lambda z: (z - ((z*z - x) / (2 * z)))
repeat = 1.0
while repeat != z_next(repeat):
repeat = z_next(repeat)
print("Using newtons method for square roots", repeat)
|
992,735 | 22c5698ebeb6b247f841b62ddcfb018d2b5e663a | import sys
import os
import stat
import getopt
import random
import commands
import shutil
from sys import path
path.append(sys.path[0]+"/lib/")
from FuncbioOTU import getAttriValueFromSeqName,fasta2dict
def selectRefBySampeSize(minsampleSize,tempDir,copysequence,reference):
totalRef=os.path.splitext(copysequence)[0]+".ref"
print totalRef
fref=open(totalRef,'w') #used as reference for chimera detection
requry=os.path.splitext(copysequence)[0]+".requry"
fqury=open(requry,'w') #used as requery for chimera detecthion
nonchimera=os.path.splitext(copysequence)[0]+".nonchimera"
fnonch=open(nonchimera,'w') #a part of sequence for output that nonchimera.
requrySeqDict=fasta2dict(copysequence)
for name in requrySeqDict:
if int(getAttriValueFromSeqName(name,"sampleSize"))>=minsampleSize:
fref.write(">%s\n%s\n"%(name,requrySeqDict[name]))
fnonch.write(">%s\n%s\n"%(name,requrySeqDict[name]))
else:
fqury.write(">%s\n%s\n"%(name,requrySeqDict[name]))
refdict=fasta2dict(reference)
for name in refdict:
fref.write(">%s\n%s\n"%(name,refdict[name]))
return totalRef,requry,nonchimera
if __name__=="__main__":
usage="""usage:
--best_reference/-b (required) input file containing all reference sequences for chimera detection ("taxonomy_guided_OTU.fa")
--pengding_sequence/-s (required) input file containing all candidate sequences subject to chimera detection ("pengding_sequences_multiple.fa")
--sample_size/-r (optional) to specify minimum value of sample size for adding these pengding sequences into reference database, default:1
--processors/-p (optional) processors, default:4
"""
oldWorkDir=os.getcwd()
useParaList=[]
size='a'
minsamplesize=False
processors=4
opts,arg=getopt.getopt(sys.argv[1:],"b:s:r:p:h",['best_reference=','pengding_sequence=','sample_size=','processors=','help'],)
parameters=[a[0] for a in opts]
if '-h' in parameters or '--help' in parameters:
print usage
sys.exit(1)
if len(parameters)==0:
print usage
sys.exit(1)
if '-b' not in parameters and '--best_reference' not in parameters:
print "***Error, --best_reference/-b is requred.***\n"
print usage
sys.exit(1)
if '-s' not in parameters and '--pengding_sequence' not in parameters:
print "***Error, --pengding_sequence/-s is requred.***\n"
print usage
sys.exit(1)
for i,a in opts:
if i in ("--best_reference","-b"):
if not os.path.isfile(a):
print "%s is not found."%(a)
sys.exit(1)
reference=os.path.abspath(a)
if i in ("--pengding_sequence","-s"):
if not os.path.isfile(a):
print "%s is not found."%(a)
sys.exit(1)
sequence=os.path.abspath(a)
if i in ("--sample_size","-r"):
try:
minsamplesize=int(a)
except:
print "***Error, sample size (--sample_size/-r) must be integer.***\n"
print usage
sys.exit(1)
if i in ("--processors","-p"):
try:
processors=int(a)
except:
print "***Error, the processors (--processors/-p) must be integer.***\n"
print usage
sys.exit(1)
nonchimera=oldWorkDir+"/pengding_sequences_multiple.nonchimera"
chimera=oldWorkDir+"/pengding_sequences_multiple.chimera"
script_loc=os.path.split(os.path.realpath(sys.argv[0]))[0]
tempDir=script_loc+"/temp"+str(random.randint(10000,99999))
os.mkdir(tempDir)
os.chdir(tempDir) #change work directory
copysequence=tempDir+"/"+os.path.basename(sequence)
shutil.copyfile(sequence,copysequence)
uchimeraPath=script_loc+"/lib/Mothur.cen_64/mothur/uchime"
mothurPath=script_loc+"/lib/Mothur.cen_64/mothur/mothur"
try:
os.chmod(uchimeraPath,stat.S_IRWXU)
except:
commands="chmod a+x %s"%(uchimeraPath)
print "Please give executable permission to %s by this terminal commands:\n%s"%(uchimeraPath,commands)
try:
os.chmod(mothurPath,stat.S_IRWXU)
except:
commands="chmod a+x %s"%(mothurPath)
print "Please give executable permission to %s by this terminal commands:\n%s"%(mothurPath,commands)
sys.exit(1)
if minsamplesize:
totalRef,requry,subnonchimera=selectRefBySampeSize(minsamplesize,tempDir,copysequence,reference)
useParaList.append("fasta=%s"%(requry))
useParaList.append("reference=%s"%(totalRef))
else:
useParaList.append("fasta=%s"%(copysequence))
useParaList.append("reference=%s"%(reference))
useParaList.append("processors=%s"%(processors))
muthurParaStr="\"#chimera.uchime("+",".join(useParaList)+")\""
muthurPath=script_loc+"/lib/Mothur.cen_64/mothur/mothur"
commands.getoutput(muthurPath+" "+muthurParaStr)
#put chimera sequence names into list.
accons=os.path.splitext(copysequence)[0]+".uchime.accnos"
acconsList=[]
for line in open(accons,"r"):
acconsList.append(line.strip())
faDict=fasta2dict(copysequence)
fout1=open(nonchimera,"w")
fout2=open(chimera,"w")
for line in faDict:
if line in acconsList:
fout2.write(">%s\n%s\n"%(line,faDict[line]))
else:
fout1.write(">%s\n%s\n"%(line,faDict[line]))
if minsamplesize:
subnonDict=fasta2dict(subnonchimera)
for name in subnonDict:
fout1.write(">%s\n%s\n"%(name,subnonDict[name]))
remove_intermediate_file=r"rm -rf "+script_loc+r"/temp* "
commands.getoutput(remove_intermediate_file)
|
992,736 | 76ac1dca6871453bd58cc9aafcb8973b301efc2e | import random
import numpy as np
from keras.layers import Dense, GRU, Masking, LeakyReLU
from keras.models import Sequential
from keras.optimizers import Adam
from agents.drqn.replay_buffer import ReplayBuffer
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
class DRQNAgent(object):
def __init__(self,
ob_dim,
ac_dim,
lookback,
batch_size,
initial_exploration_steps,
exploration=LinearSchedule(1000, 0.1),
double='True'):
self.batch_size = batch_size
self.replay_buffer = ReplayBuffer(buffer_size=int(1e4), replay_batch_size=batch_size, seed=0)
self.ob_dim = ob_dim
self.ac_dim = ac_dim
self.lookback = lookback
self.exploration = exploration
self.double = double
# These are hyper parameters
self.discount_factor = 0.9
self.critic_lr = 1e-4
self.initial_exploration_steps = initial_exploration_steps
# create model for Q network
self.model = self.initialize_model('Model')
self.target_model = self.initialize_model('Target Model')
self.update_target_model()
# Number of finished episodes
self.t = 0
def initialize_model(self, name=None):
"""
Approximate Q function using Neural Network:
obs_seq is input and Q Value of each action is output the of network
"""
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(self.lookback, self.ob_dim)))
model.add(GRU(64, input_dim=(self.lookback, self.ob_dim), kernel_initializer='zeros'))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(64, kernel_initializer='he_uniform'))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(self.ac_dim, activation='linear', kernel_initializer='he_uniform'))
model.compile(loss='mse', optimizer=Adam(lr=self.critic_lr))
return model
def update_target_model(self):
"""
After some time interval update the target model to be same with model
"""
self.target_model.set_weights(self.model.get_weights())
def get_action(self, obs_seq):
"""
Args:
obs_seq: observation sequence
Returns:
ac: integer from 0 to (ac_dim - 1)
"""
if len(self.replay_buffer) < self.initial_exploration_steps:
return np.random.choice(self.ac_dim, 1)[0]
elif random.random() < self.exploration.value(self.t):
obs_seq = obs_seq.reshape((1, self.lookback, self.ob_dim))
q_value = self.model.predict(obs_seq, batch_size=1).flatten()
s = np.exp(q_value)
probability_list = s / np.sum(s)
ac = np.random.choice(self.ac_dim, 1, p=probability_list)[0]
else:
obs_seq = obs_seq.reshape((1, self.lookback, self.ob_dim))
q_value = self.model.predict(obs_seq, batch_size=1).flatten()
ac = np.argmax(q_value)
return ac
def sample_trajectory(self, env):
"""
Sample a trajectory by running the experiment, store all the transition pairs in the replay buffer
"""
obs_s = []
init_obs = env.reset()
obs_s.append(init_obs)
total_re = 0
def padding(seq):
if len(seq) < self.lookback:
len_to_pad = self.lookback - len(seq)
pad = [np.zeros_like(init_obs)] * len_to_pad
seq = pad + seq
return seq
while True:
obs_seq = np.asarray(padding(obs_s[-self.lookback:]))
ac = self.get_action(obs_seq)
new_obs, re, done, _ = env.step(ac)
obs_s.append(new_obs)
new_obs_seq = np.asarray(padding(obs_s[-self.lookback:]))
total_re += re
self.replay_buffer.add(obs_seq, ac, re, new_obs_seq, done)
if done:
break
self.t += 1
return total_re
def train_model(self):
"""
Train the model.
"""
if len(self.replay_buffer) < self.initial_exploration_steps:
return
batch_size = min(self.batch_size, len(self.replay_buffer))
mini_batch = [self.replay_buffer.sample() for _ in range(batch_size)]
update_input = np.zeros((batch_size, self.lookback, self.ob_dim))
update_target = np.zeros((batch_size, self.lookback, self.ob_dim))
acs, res, dones = [], [], []
for i in range(batch_size):
update_input[i] = mini_batch[i][0]
acs.append(mini_batch[i][1])
res.append(mini_batch[i][2])
update_target[i] = mini_batch[i][3]
dones.append(mini_batch[i][4])
target = self.model.predict(update_input)
target_val = self.target_model.predict(update_target)
for i in range(batch_size):
# Q Learning: get maximum Q value at s' from target model
if dones[i]:
target[i][acs[i]] = res[i]
else:
if self.double == 'False':
target[i][acs[i]] = res[i] + self.discount_factor * (np.amax(target_val[i]))
elif self.double == 'True':
target[i][acs[i]] = res[i] + self.discount_factor * target_val[i][np.argmax(target[i])]
else:
raise Exception('Unknown Double!')
self.model.fit(update_input, target, batch_size=self.batch_size,
epochs=1, verbose=0) |
992,737 | 92dbf16d9a63660631454138268b123db34ebdc9 | from django.contrib import admin
from models import SELIC
from models import IGPM
from models import IPCA
admin.site.register(SELIC)
admin.site.register(IGPM)
admin.site.register(IPCA)
|
992,738 | 0a9635623688fc6533916012d810950b8702adc9 | import logging
from itertools import izip_longest
from time import time, sleep
from decorator import decorator
# Warning: The simplecache doesn't support the expected lock interface
# so make sure to use a lockable cache.
# See https://pypi.org/project/redis for an example.
from .cache import CACHE, cachekey_static
try:
from django.conf import settings
THROTTLE_ENABLED = getattr(settings, 'THROTTLE_ENABLED', True)
THROTTLE_LOGGING = getattr(settings, 'THROTTLE_LOGGING', True)
LOGGER_PREFIX = getattr(settings, 'LOGGER_PREFIX', True)
except ImportError:
THROTTLE_ENABLED = True
THROTTLE_LOGGING = False
LOGGER_PREFIX = ''
if THROTTLE_LOGGING:
logger = logging.getLogger(LOGGER_PREFIX + __name__)
def log_message(label, text, seconds):
if isinstance(label, (tuple, list)):
label = str([i[1] for i in label])
return '"%s" throttle %s %s seconds' % (label, text, seconds)
def log_info(label, text, seconds):
logger.info(log_message(label, text, seconds))
def log_warning(*args, **kwargs):
logger.warning(*args, **kwargs)
else:
def log_info(*args, **kwargs):
pass
log_warning = log_info
class ThrottleTimeout(Exception):
pass
def throttle(limit, key=cachekey_static, cache=CACHE,
retry=True, timeout=None, marker=None, lockargs=None):
"""
A decorator to ensure function calls are rate-limited. Calls exceeding
limit are dropped or retried until limit condition is satisfied.
Multiple rate limits can be set by passing in a tuple or list as values
for `limit` and `key`. The limits are tested in order so it's usually
best to list the most likely limits first... assuming all other criteria
is roughly equal, this will likely mean from the smallest to largest
limit value (or from slowest to fastest rate).
`limit`
Maximum rate in calls/second
`key`
Cache key function to rate-limit calls into distinct buckets
(default: static_cachekey)
`cache`
Cache object with django cache style `set` and `get` interface
(default: django_cache )
`retry`
If True, retry until rate-limit condition is satisfied or until timeout
(default: True)
`timeout`
Maximum time limit before next retry attempt should just raise an error
(default: the greater of 10 seconds or 10/limit)
`marker`
Object returned when call is rate-limited and `retry` is False
(default: None)
`lockargs`
A dictionary to override the default kwargs for `cache.lock` function.
(default: None)
"""
if not THROTTLE_ENABLED:
return lambda func: func
_timeout = timeout or 10
multi = isinstance(limit, (tuple, list))
if multi:
if not isinstance(key, (tuple, list)):
key = [key] if key else []
assert len(limit) >= len(key)
minimum = [1.0 / float(l) for l in limit]
maximum = max(minimum)
expire = [max(10 * m, _timeout) for m in minimum]
limit = list(izip_longest(
minimum, key, expire, fillvalue=cachekey_static))
else:
minimum = maximum = 1.0 / float(limit)
expire = max(10 * minimum, _timeout)
timeout = timeout or max(10, maximum * 10)
lockargs = lockargs or dict(timeout=1, blocking_timeout=timeout)
def _message(label, text, seconds):
if multi:
label = str([i[1] for i in label])
return '"%s" throttle %s %s seconds' % (label, text, seconds)
def _now(label, start):
now = time()
if now - start > timeout:
message = log_message(label, 'timeout after', now - start)
log_warning(message)
raise ThrottleTimeout(message)
return now
@decorator
def single_limit(func, *args, **kwargs):
_key = key(func, args, kwargs)
if _key:
start = time()
done = False
while not done:
delay = 0
done = True
with cache.lock('throttle.lock', **lockargs):
now = _now(_key, start)
delay = max(cache.get(_key, 0) + minimum - now, 0)
if not delay:
cache.set(_key, now, expire)
if delay:
if not retry:
return marker
log_info(_key, 'retry in', delay)
sleep(delay)
done = False
return func(*args, **kwargs)
@decorator
def multi_limit(func, *args, **kwargs):
_limits = [
(minimum, key(func, args, kwargs), expire)
for minimum, key, expire in limit]
_limits = [
(minimum, key, expire)
for minimum, key, expire in _limits if key]
if _limits:
start = time()
done = False
while not done:
delay = 0
done = True
with cache.lock('throttle.lock', **lockargs):
now = _now(_limits, start)
seen = set()
for minimum, key, expire in _limits:
if key in seen:
continue
seen.add(key)
delay = max(cache.get(key, 0) + minimum - now, 0)
if delay:
break
cache.set(key, now, expire)
if delay:
if not retry:
return marker
log_info(_limits, 'retry in', delay)
sleep(delay)
done = False
return func(*args, **kwargs)
return multi_limit if multi else single_limit
|
992,739 | 9dfc25bad470d324c84ada63613dec6e1b4d3b31 |
if __name__ == '__main__':
n,m,k = [int(i) for i in input().strip().split()]
nums = [int(i) for i in input().strip().split()]
beauty_num = m*k
num_sort = sorted(nums,reverse= True)[:beauty_num]
num_dict = dict()
for i in num_sort:
if i not in num_dict:
num_dict[i] = 1
else:
num_dict[i] += 1
counter = 0
ans = []
for i in range(n):
if nums[i] in num_dict and num_dict[nums[i]] > 0:
counter += 1
num_dict[nums[i]] -= 1
if counter >= m:
ans.append(i + 1)
counter = 0
print(sum(num_sort))
print(' '.join(str(i) for i in ans[:-1]))
|
992,740 | 6f14bb52844a26f7aef17e628a164f8ceaef01c9 | from PIL import Image
import numpy as np
import os
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
sign_labels_map = {'a': 0, 'c': 2, 'b': 1, 'e': 4, 'd': 3, 'f': 5, 'i': 7, 'h': 6, 'k': 8, 'm': 10, 'l': 9, 'o': 12, 'n': 11, 'q': 14, 'p': 13, 'r': 15, 'u': 17, 't': 16, 'w': 19, 'v': 18, 'y': 21, 'x': 20}
# Predict a single image
def predict_img(img, img_width, img_height, model, weights_name):
img = img_to_array(img)
img = img * (1. / 255)
img = img.reshape((1,) + img.shape)
pred = model.predict(img)
predicted_label = np.argmax(pred)
return predicted_label
# Read and predict all images from a folder
def predict_from_folder(folder, img_width, img_height, model, sign_labels, weights_name):
images = []
total_imgs = 0
correct_predictions = 0
correct = False
# Count correct predictions for each angle
front_correct = 0
top_correct = 0
bottom_correct = 0
left_correct = 0
right_correct = 0
# Count total images for each angle
front_total = 0
top_total = 0
bottom_total = 0
left_total = 0
right_total = 0
for dir in os.listdir(folder):
for filename in os.listdir(folder + "/" + dir):
correct = False
img = Image.open(os.path.join(folder + "/" + dir,filename))
img = np.asarray(img)
if filename.find('front') != -1:
front_total += 1
elif filename.find('top') != -1:
top_total += 1
elif filename.find('bottom') != -1:
bottom_total += 1
elif filename.find('right') != -1:
right_total += 1
elif filename.find('left') != -1:
left_total += 1
pred = predict_img(img, img_width, img_height, model, weights_name)
# Check if prediction is correct
if str(sign_labels[pred]) == str(filename[0]):
correct = True
correct_predictions += 1
if filename.find('front') != -1:
front_correct += 1
elif filename.find('top') != -1:
top_correct += 1
elif filename.find('bottom') != -1:
bottom_correct += 1
elif filename.find('right') != -1:
right_correct += 1
elif filename.find('left') != -1:
left_correct += 1
total_imgs += 1
print "Total correct predictions: " + '\n' + " -" + str( (correct_predictions * 100) / total_imgs ) + "% " + " out of " + str( total_imgs ) + " total images"
print "Front correct predictions: " + '\n' + " -" + str((front_correct * 100)/ front_total ) + "%" + " out of " + str(front_total) + " total images"
if top_total != 0:
print "Top correct predictions: " + '\n' + " -" + str((top_correct * 100)/ top_total ) + "%" + " out of " + str( top_total) + " total images"
if bottom_total != 0:
print "Bottom correct predictions:" + '\n' + " -" + str((bottom_correct * 100)/ bottom_total ) + "%" + " out of " + str( bottom_total) + " total images"
if left_total != 0:
print "Left correct predictions:" + '\n' + " -" + str((left_correct * 100)/ left_total ) + "%" + " out of " + str( left_total) + " total images"
if right_total != 0:
print "Right correct predictions:" + '\n' + " -" + str((right_correct * 100)/ right_total ) + "%" + " out of " + str( right_total) + " total images"
# Function for pretty printing of the confusion matrix
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels]+[5]) # 5 is value length
empty_cell = " " * columnwidth
# Print header
print " " + empty_cell,
for label in labels:
print "%{0}s".format(columnwidth) % label,
print
# Print rows
for i, label1 in enumerate(labels):
print " %{0}s".format(columnwidth) % label1,
for j in range(len(labels)):
cell = "%{0}d".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print cell,
print
def print_cm_plot(cm, fileName):
# PLOTTING CONFUSION MATRIX
norm_conf = []
for i in cm:
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j)/float(a))
norm_conf.append(tmp_arr)
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet,
interpolation='nearest')
width, height = cm.shape
for x in xrange(width):
for y in xrange(height):
ax.annotate(str(cm[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center')
cb = fig.colorbar(res)
alphabet = 'ABCDEFHIKLMNOPQRSTUVWXY'
plt.xticks(range(width), alphabet[:width])
plt.yticks(range(height), alphabet[:height])
plt.savefig(fileName) |
992,741 | 976fdcce1a148cb12c13e0e5b04fb287c879c090 | # From SimGeneral/MixingModule/python/mix_2015_25ns_FallMC_matchData_PoissonOOTPU_cfi.py
probValue_76X= [
0.000108643,
0.000388957,
0.000332882,
0.00038397,
0.000549167,
0.00105412,
0.00459007,
0.0210314,
0.0573688,
0.103986,
0.142369,
0.157729,
0.147685,
0.121027,
0.08855,
0.0582866,
0.0348526,
0.019457,
0.0107907,
0.00654313,
0.00463195,
0.00370927,
0.0031137,
0.00261141,
0.00215499,
0.00174491,
0.00138268,
0.00106731,
0.000798828,
0.00057785,
0.00040336,
0.00027161,
0.000176535,
0.00011092,
6.75502e-05,
4.00323e-05,
2.32123e-05,
1.32585e-05,
7.51611e-06,
4.25902e-06,
2.42513e-06,
1.39077e-06,
8.02452e-07,
4.64159e-07,
2.67845e-07,
1.5344e-07,
8.68966e-08,
4.84931e-08,
2.6606e-08,
1.433e-08,
]
# From SimGeneral/MixingModule/python/mix_2015_25ns_Startup_PoissonOOTPU_cfi.py
probValue_74X = [ 4.8551E-07,
1.74806E-06,
3.30868E-06,
1.62972E-05,
4.95667E-05,
0.000606966,
0.003307249,
0.010340741,
0.022852296,
0.041948781,
0.058609363,
0.067475755,
0.072817826,
0.075931405,
0.076782504,
0.076202319,
0.074502547,
0.072355135,
0.069642102,
0.064920999,
0.05725576,
0.047289348,
0.036528446,
0.026376131,
0.017806872,
0.011249422,
0.006643385,
0.003662904,
0.001899681,
0.00095614,
0.00050028,
0.000297353,
0.000208717,
0.000165856,
0.000139974,
0.000120481,
0.000103826,
8.88868E-05,
7.53323E-05,
6.30863E-05,
5.21356E-05,
4.24754E-05,
3.40876E-05,
2.69282E-05,
2.09267E-05,
1.5989E-05,
4.8551E-06,
2.42755E-06,
4.8551E-07,
2.42755E-07,
1.21378E-07,
4.8551E-08]
import os, sys, ROOT
probFunctionVariable = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52]
hmc = ROOT.TH1D("pileup","MC pileup",50,0.,50)
for ii in range(len(probValue_76X)):
print ii, " ", probFunctionVariable[ii]+1, " ", probValue_76X[ii]
hmc.SetBinContent(probFunctionVariable[ii]+1, probValue_76X[ii])
#fout = ROOT.TFile("PUDistMC_2015_25ns_Startup_PoissonOOTPU.root","RECREATE")
fout = ROOT.TFile("PUDistMC_2015_25ns_FallMC_matchData_PoissonOOTPU.root","RECREATE")
fout.cd()
hmc.Write()
fout.Close()
|
992,742 | 4a1d2b2b2fe75d05a80ac3323854382b0068c06e | import smtplib
from_addr = 'alexthegreatwilliams@gmail.com'
to_addr = 'alexthegreatwilliams@gmail.com'
from_name= 'Alex'
to_name='Alex'
subject= 'Wazzup!'
msg='Hows it going bruh'
message = """From: {from_name} <{from_addr}>
To: {to_name} <{to_addr}>
Subject: {subject}
{msg}
"""
message_to_send = message.format(from_name, from_addr, to_name,
to_addr, subject, msg)
# Credentials (if needed)
username = 'alexthegreatwilliams@gmail.com'
password = '{fvkjyklaojqewpgn}'
# The actual mail send
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username, password)
server.sendmail(from_addr, to_addr, message_to_send)
server.quit() |
992,743 | a458332d022280ad718edf5bea52f513ab8bfbbc | """update db
Revision ID: 25ac49dfe877
Revises:
Create Date: 2019-03-16 15:24:25.564355
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '25ac49dfe877'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('OCR', sa.Column('character', sa.String(length=100), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('OCR', 'character')
# ### end Alembic commands ###
|
992,744 | 62027211196641742d1a7290285c4fbf84ab6918 | import requests
from http.server import BaseHTTPRequestHandler,HTTPServer, SimpleHTTPRequestHandler
BASE = "http://localhost:8080/analyze"
payload = {"text": "Deep pressure or deep touch pressure is a form of tactile sensory input. This input is most often delivered through firm holding, cuddling, hugging, firm stroking, and squeezing.\n\nHowever, before we get into too much detail about deep touch pressure, we need to understand our body’s sensory system and why deep touch pressure emerged in the first place.\n\nNeurologically, sensory processing is how we feel. Through processing sensory input, we make sense of the world around us. In everything we do, we are receiving sensory messages from both our bodies and the surrounding world."}
response = requests.post(BASE, data=payload)
print(response.text)
|
992,745 | a9a2df6f490be00a97ce33e01d0906aaebd3d37a | import sys
# https://practice.geeksforgeeks.org/problems/sort-an-array-of-0s-1s-and-2s/0
# Given an array A of size N containing 0s, 1s, and 2s; you need to sort the array
# in ascending order.
# returns the number of zeros, ones and twos (results can be constructed using these counts)
# O(n) since we only iterate the array once
def sort_it(arr):
count0 = count1 = count2 = 0
for x in arr:
if x == 0:
count0 += 1
elif x == 1:
count1 += 1
elif x == 2:
count2 += 1
return (count0, count1, count2)
if __name__ == "__main__":
test_inputs = []
test_inputs.append( ("0 2 1 2 0", "0 0 1 2 2") )
test_inputs.append( ("0 1 0", "0 0 1") )
""" Run process on sample inputs
"""
for inputs, results in test_inputs:
arr = [int(s) for s in inputs.split()]
print(arr)
n0, n1, n2 = sort_it(arr)
s = '0 ' * n0 + '1 ' * n1 + '2 ' * n2
print(s, " expected:", results)
|
992,746 | 73e3f8e147a73a23242d2000e03be6431467d444 | from exchanges.hbg.utils import *
from market.account import *
from market.exchange import *
class HBGSpotAccountMgr(AccountManager):
def __init__(self, account_secret):
super().__init__(account_secret)
async def transfer_asset(self, currency, amount, from_field, from_pair, to_field, to_pair):
if from_field == ExchangeField.SWAP or to_field == ExchangeField.SWAP:
await self.__transfer_between_spot_swap(currency, amount, from_field, to_field)
else:
raise NotImplementedError()
async def __transfer_between_spot_swap(self, currency, amount, from_field, to_field):
uri = 'https://api.huobi.pro/v2/account/transfer'
kwargs = {
'from': from_field.value.lower(), 'to': to_field.value.lower(),
'currency': currency.value, 'amount': amount
}
self.logger.info(f'kwargs: {kwargs}')
response = await hbg_contracts_post(self.account_secret.api_key, self.account_secret.api_secret, uri, kwargs)
self.logger.info(f'response: {response}')
|
992,747 | 86f46378392e5c1c2cd027114974a00996a23df8 | class Solution:
def isPowerOfTwo(self, n: int) -> bool:
if n<0:
return False
count = 0
for _ in range(32):
if n&1:
count += 1
n = n>>1
return True if count == 1 else False |
992,748 | 29d14dfc7e3ef17096f9305102c6b9e63fd0380f | """
adminrestrict tests
"""
__author__ = "Robert Romano"
__copyright__ = "Copyright 2021 Robert C. Romano"
import logging
import sys
from unittest import skipUnless
from django import VERSION as DJANGO_VERSION
from django.test import TestCase
from django.contrib.auth.models import User
from django.core.management import call_command
try:
from django.core.urlresolvers import reverse
except ImportError as e:
from django.urls import reverse
from adminrestrict.models import AllowedIP
class BasicTests(TestCase):
def setUp(self):
logging.disable(logging.ERROR)
self.user = User.objects.create_user(username="foo", password="bar")
def test_disallow_get(self):
a = AllowedIP.objects.create(ip_address="10.10.0.1")
with self.settings(ADMINRESTRICT_BLOCK_GET=True):
resp = self.client.get("/admin/")
self.assertEqual(resp.status_code, 403)
a.delete()
def test_allow_get_initial_page(self):
a = AllowedIP.objects.create(ip_address="10.10.0.1")
resp = self.client.get("/admin/")
self.assertIn(resp.status_code, [200, 302])
a.delete()
def test_get_redirected(self):
admin_url = reverse('admin:index')
a = AllowedIP.objects.create(ip_address="10.10.0.1")
resp = self.client.get(admin_url)
if DJANGO_VERSION < (1, 7, 0):
self.assertEqual(resp.status_code, 200)
else:
self.assertEqual(resp.status_code, 302)
a.delete()
def test_allow_all_if_empty(self):
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"})
self.assertIn(resp.status_code, [200, 302])
def test_allowed_ip(self):
a = AllowedIP.objects.create(ip_address="4.4.4.4")
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="4.4.4.4")
self.assertEqual(resp.status_code, 200)
a.delete()
def test_allowed_wildcard(self):
a = AllowedIP.objects.create(ip_address="127.0*")
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 200)
a.delete()
def test_blocked_no_wildcard_match(self):
a = AllowedIP.objects.create(ip_address="16*")
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 403)
a.delete()
def test_default_denied_msg(self):
DENIED_MSG = b"Access to admin is denied."
a = AllowedIP.objects.create(ip_address="16*")
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.content, DENIED_MSG)
a.delete()
def test_custom_denied_msg(self):
DENIED_MSG = b"denied!"
a = AllowedIP.objects.create(ip_address="16*")
with self.settings(ADMINRESTRICT_DENIED_MSG=DENIED_MSG):
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.content, DENIED_MSG)
a.delete()
def test_allow_all(self):
a = AllowedIP.objects.create(ip_address="*")
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 200)
a.delete()
@skipUnless(sys.version_info > (3, 0), "Python3 only")
def test_allowed_cidr_range(self):
a = AllowedIP.objects.create(ip_address="127.0.0.0/24")
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 200)
a.delete()
@skipUnless(sys.version_info > (3, 0), "Python3 only")
def test_bad_cidr_range(self):
a = AllowedIP.objects.create(ip_address="127.0.0.0/9100")
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 403)
a.delete()
def test_allow_private_ip(self):
a = AllowedIP.objects.create(ip_address="8.8.8.8")
with self.settings(ADMINRESTRICT_ALLOW_PRIVATE_IP=True):
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="192.168.1.1")
self.assertEqual(resp.status_code, 200)
a.delete()
def test_disallow_custom_private_ip(self):
a = AllowedIP.objects.create(ip_address="8.8.8.8")
with self.settings(ADMINRESTRICT_PRIVATE_IP_PREFIXES=('11.', '172.', '192.', '127.')):
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="11.0.0.1")
self.assertEqual(resp.status_code, 403)
a.delete()
def test_allow_custom_private_ip(self):
a = AllowedIP.objects.create(ip_address="10.10.0.1")
with self.settings(ADMINRESTRICT_PRIVATE_IP_PREFIXES=('11.', '172.', '192.', '127.')):
with self.settings(ADMINRESTRICT_ALLOW_PRIVATE_IP=True):
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, HTTP_X_FORWARDED_FOR="11.0.0.1,10.10.1.1")
self.assertEqual(resp.status_code, 200)
a.delete()
def test_allow_domain_lookup(self):
a = AllowedIP.objects.create(ip_address="ns4.zdns.google.")
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="216.239.38.114")
self.assertEqual(resp.status_code, 200)
a.delete()
def test_allow_deny_ip_using_cache(self):
with self.settings(ADMINRESTRICT_ENABLE_CACHE=True):
a = AllowedIP.objects.create(ip_address="8.8.8.8")
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 403)
a.delete()
a = AllowedIP.objects.create(ip_address="*")
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 200)
a.delete()
a = AllowedIP.objects.create(ip_address="127*")
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 200)
a.delete()
a = AllowedIP.objects.create(ip_address="8.*")
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 403)
def test_add_first_restriction(self):
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 200)
AllowedIP.objects.create(ip_address="8.8.8.8")
resp = self.client.post(
"/admin/", data={'username': "foo", 'password': "bar"}, follow=True)
self.assertEqual(resp.status_code, 403)
def test_combined(self):
AllowedIP.objects.create(ip_address="4.4.4.4")
AllowedIP.objects.create(ip_address="a*")
AllowedIP.objects.create(ip_address="168*")
AllowedIP.objects.create(ip_address="ns4.zdns.google.")
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="4.4.4.4")
self.assertEqual(resp.status_code, 200)
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="168.0.0.1")
self.assertEqual(resp.status_code, 200)
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="8.8.8.8")
self.assertEqual(resp.status_code, 403)
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="216.239.38.114")
self.assertEqual(resp.status_code, 200)
AllowedIP.objects.all().delete()
def test_ip6(self):
AllowedIP.objects.create(ip_address="::1")
AllowedIP.objects.create(
ip_address="2001:0db8:85a3:0000:0000:8a2e:0370:7334")
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="::1")
self.assertEqual(resp.status_code, 200)
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="2001:0db8:85a3:0000:0000:8a2e:0370:7334")
self.assertEqual(resp.status_code, 200)
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="2001:0db8:85a4:0000:0000:8a2e:0370:7334")
self.assertEqual(resp.status_code, 403)
def test_ip6_cidr(self):
AllowedIP.objects.create(ip_address="2001:0db8:85a3:0000::/64")
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="2001:0db8:85a3:0000:0000:8a2e:0370:7334")
self.assertEqual(resp.status_code, 200)
resp = self.client.post("/admin/", data={'username': "foo", 'password': "bar"},
follow=True, REMOTE_ADDR="2001:0db8:85a4:0000:0000:8a2e:0370:7334")
self.assertEqual(resp.status_code, 403)
async def test_async_middleware(self):
resp = await self.async_client.get("/admin/")
self.assertIn(resp.status_code, [200, 302])
class ManagementTests(TestCase):
def setUp(self):
logging.disable(logging.ERROR)
def test_allow_command(self):
self.assertFalse(AllowedIP.objects.filter(
ip_address='10.10.10.1').exists())
call_command('addadminip', '10.10.10.1')
self.assertTrue(AllowedIP.objects.filter(
ip_address='10.10.10.1').exists())
resp = self.client.post("/admin/")
self.assertEqual(resp.status_code, 403)
def test_remove_command(self):
AllowedIP.objects.create(ip_address="4.4.4.4")
AllowedIP.objects.create(ip_address="10.10.10.1")
self.assertTrue(AllowedIP.objects.filter(
ip_address='10.10.10.1').exists())
call_command('removeadminip', '10.10.10.1')
self.assertFalse(AllowedIP.objects.filter(
ip_address='10.10.10.1').exists())
resp = self.client.post("/admin/")
self.assertEqual(resp.status_code, 403)
|
992,749 | 6230ffd9d76da617fb361f7f4d4e577eb07491e6 | # area calculation
import math
print('==============================================================')
def calculateCircleArea():
radious = input('Please enter radious of the circle to calculate area: ')
if radious.isdigit():
radious = int(radious)
area = math.pi*(radious**2)
print(f'The area of the circle having radious {radious} unit is {area} unit^2')
else:
print('Invalid radious value')
def calculateRectangleArea():
x = input('Enter width of X side of the rectangle: ')
y = input('Enter width of Y side of the rectangle: ')
if x.isdigit() and y.isdigit:
area = int(x) * int(y)
print(f'Area of the rectangle having height {x} unit and width {y} unit is {area} unit^2')
else:
print('Invalid data entered')
def calculateTraingleArea():
h = input('Enter height of traiangle: ')
y = input('Enter base of traiangle: ')
if h.isdigit() and y.isdigit:
area = (int(h) * int(y))/2
print(f'Area of the traingle having height {h} unit and base {y} unit is {area} unit^2')
else:
print('Invalid data entered')
def calculateArea(choosenFigure=''):
choosenFigure = choosenFigure.lower()
if choosenFigure == 'circle':
calculateCircleArea()
elif choosenFigure == 'rectangle':
calculateRectangleArea()
elif choosenFigure == 'traiangle':
calculateTraingleArea()
else:
print('Wrong input')
def main():
print("""
Choose what you want to do
1. type 1+Enter to calculate area of a circle
2. type 2+Enter to calculate area of a traiangle
3. type 3+Enter to calculate area of a rectangle
""")
userinput = input()
if userinput.isdigit():
if userinput == '1':
calculateArea('circle')
elif userinput == '2':
calculateArea('traiangle')
elif userinput == '3':
calculateArea('rectangle')
else:
print('Invalid input')
else:
print('Invalid input')
main()
print('==============================================================')
|
992,750 | 4899d8ee22767c94a7795213ce45b9092d532e54 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
_ver = sys.version_info
py3_or_upper = (_ver[0] > 2)
#---------------------------------import---------------------------------------
if py3_or_upper:
from urllib.parse import urlencode, quote_plus, parse_qsl, urlsplit, urlparse
from urllib.request import Request, urlopen
str = str
basestring = (str, bytes)
numeric_types = (int, float)
else:
from urllib import urlencode, quote_plus
from urlparse import parse_qsl, urlsplit, urlparse
from urllib2 import Request, urlopen
str = unicode
basestring = basestring
numeric_types = (int, long, float)
import re
import os
from io import BytesIO
from settings import *
#------------------------------------------------------------------------------
def postBingImageToWeibo(prefix, userUrl):
_postBingImageToWebsite(prefix, userUrl, 'Weibo')
def _postBingImageToWebsite(prefix, userUrl, weibo):
respHtml = getWebContents(userUrl)
respHtml = str(respHtml, "utf-8")
#print respHtml
title = extractTitle(respHtml)
if title == None:
title = extractTitle2(respHtml)
#print(u"title : %s" % title)
imageUrl = extractImageUrl(respHtml)
#print(u"imageUrl : %s" % imageUrl)
imgData = None
if imageUrl:
parseResult = urlparse(imageUrl)
if len(parseResult.netloc) == 0:
parseResult = urlparse(userUrl)
slash = u'/'
if imageUrl[0] == u'/':
slash = u''
imageUrl = parseResult.scheme + '://' + parseResult.netloc + slash + imageUrl
imgData = downloadImage(imageUrl)
if imgData and title:
# 给定图片存放名称
#save_path = '/path/path'
#fileName = save_path + "/ddd.jpg"
#writeDataToFile(imgData, fileName)
picFile = BytesIO(imgData)
info = title + "\n" + prefix
if weibo:
from weibo_tiny import Client
client = Client(APP_KEY, APP_SECRET, REDIRECT_URL, username=USERNAME, password=PASSWORD)
client.post('statuses/upload', status=info, pic=picFile)
else:
maxTweetLen = 110
if len(info) > maxTweetLen:
info = info[0 : maxTweetLen]
from tweetpost import postTweet
postTweet(info, picFile)
def postBingImageToTwitter(prefix, userUrl):
_postBingImageToWebsite(prefix, userUrl, None)
def getWebContents(targetUrl):
req = Request(targetUrl)
resp = urlopen(req)
respHtml = resp.read()
return respHtml
# id="sh_cp" class="sc_light" title="卡尔斯岛上的Kallur灯塔,法罗群岛 (© Janne Kahila/500px)"
def extractTitle(respHtml):
titlePat = 'class="text" id="headline">'
# http://www.regexlab.com/wild2regex
# https://en.wikipedia.org/wiki/List_of_Unicode_characters
# http://www.runoob.com/regexp/regexp-tutorial.html
titlePatternEx = r'class\="text"\s+id\="headline">[^<]+<'
result = re.search(titlePatternEx, respHtml)
if result == None:
titlePat = '<a id="sh_cp" class="sc_light" title="'
titlePatternEx = r'<a\s+id\="sh_cp"\s+class\="sc_light"\s+title\="[^"]+"'
result = re.search(titlePatternEx, respHtml)
if result:
result = result.group()
result = result[ len(titlePat) : (len(result)-1) ]
return result
# "copyright":"Kuha Karuhas pavilion in Phraya Nakhon Cave, Thailand (© Bule Sky Studio/Shutterstock)"
def extractTitle2(respHtml):
titlePat = '"copyright":"'
titlePatternEx = r'"copyright":"[^\"]+"'
result = re.search(titlePatternEx, respHtml)
if result:
result = result.group()
result = result[ len(titlePat) : (len(result)-1) ]
return result
def extractImageUrl(respHtml):
imageRE = r'<div\s+class\="img_cont"\s+style\="background\-image\:\s+url\([^\)]+\)'
result = re.search(imageRE, respHtml)
if result:
result = result.group()
flag = r'('
begin = result.find(flag)
if begin >= 0:
flag2 = r')'
end = result.find(flag2, begin+1)
result = result[ (begin+1) : end ]
else:
result = ''
else:
imageRE = r'id\="bgLink"\s+rel\="preload"\s+href\="[^"]+"'
result = re.search(imageRE, respHtml)
if result:
result = result.group()
pat = 'id="bgLink" rel="preload" href="'
begin = len(pat)
if begin >= 0:
flag2 = r'"'
end = result.find(flag2, begin+1)
result = result[ (begin+1) : end ]
else:
result = ''
return result
def downloadImage(imageUrl):
try:
imgData = urlopen(imageUrl).read()
except:
imgData = None
pass
return imgData
def writeDataToFile(binData, localFile):
f = open(localFile, 'wb+')
if f:
f.write(binData)
f.close()
def getBingImage(userUrl):
respHtml = getWebContents(userUrl)
respHtml = str(respHtml, "utf-8")
imageUrl = extractImageUrl(respHtml)
imgData = None
if imageUrl:
parseResult = urlparse(imageUrl)
if len(parseResult.netloc) == 0:
parseResult = urlparse(userUrl)
slash = u'/'
if imageUrl[0] == u'/':
slash = u''
imageUrl = parseResult.scheme + '://' + parseResult.netloc + slash + imageUrl
imgData = downloadImage(imageUrl)
return imgData
###############################################################################
if __name__=="__main__":
prefix = u"Today's pretty #wallpaper # #photo # on #Bing #."
userUrl = BING_GLOBAL
postBingImageToTwitter(prefix, userUrl)
import time
time.sleep(5)
prefix = u'每日 #必应美图 # #壁纸 #。'
userUrl = BING_CHINA
postBingImageToTwitter(prefix, userUrl)
|
992,751 | b506a8554199526897df902a72487f9860afe9e9 | import unittest
import json
from praat import app
class TestSoundOps(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['CSRF_ENABLED'] = False
self.app = app.test_client()
def test_drawSound(self):
result = self.app.get("/draw-sound/Part1.mp3/0/4/?pitch&pulse&formants&spectrogram&pulses")
self.assertEqual(result.content_type, "image/png")
def test_getBounds(self):
result = self.app.get("/get-bounds/Part1.mp3")
# Load json string as a dictionary
bounds = json.loads(result.data)
self.assertEquals(bounds["start"], 0.0)
self.assertEquals(bounds["end"], 25.037256235827666)
def test_getEnergy(self):
result = self.app.get("/get-energy/Part1.mp3")
assert "0.07201807012373347 Pa2 sec" in result.data
def test_playSound(self):
result = self.app.get("/play/sp1.wav")
# Check if file being downloaded is a wav audio
self.assertEqual(result.content_type, "audio/wav")
result = self.app.get("/play/Part1.mp3")
# Check if file being downloaded is an mp3 audio
self.assertEqual(result.content_type, "audio/mp3")
def test_drawElan(self):
result = self.app.get("/draw-elan/sp1.wav/1/4/")
self.assertEqual(result.content_type, "image/png")
def test_annotationTimeSelection(self):
result = self.app.get("/annotation/time-selection/sp1.wav/TimeSubdivision-lt/1/4/tetst/test2/test3")
data = str.strip(result.data)
self.assertEqual(data, "sp1")
|
992,752 | de32caa9819f9fbe47e3437207b7b3e718b2ef54 | from pycoin.encoding import hash160_sec_to_bitcoin_address
from services.chroma import ChromaBlockchainState
from coloredcoinlib import (BlockchainState, ColorDataBuilderManager,
AidedColorDataBuilder,
FullScanColorDataBuilder, DataStoreConnection,
ColorDataStore, ColorMetaStore, ColorMap,
ThickColorData, ThinColorData)
from services.electrum import EnhancedBlockchainState
class ColoredCoinContext(object):
"""Interface to the Colored Coin Library's various offerings.
Specifically, this object provides access to a storage mechanism
(store_conn, cdstore, metastore), the color mapping (colormap)
and color data (Thick Color Data)
"""
def __init__(self, config):
"""Creates a Colored Coin Context given a config <config>
"""
params = config.get('ccc', {})
thin = params.get('thin', True)
self.testnet = config.get('testnet', False)
if thin:
color_data_class = ThinColorData
color_data_builder = AidedColorDataBuilder
else:
color_data_class = ThickColorData
color_data_builder = FullScanColorDataBuilder
if thin and not params.get('use_bitcoind', False):
chromanode_url = params.get('chromanode_url', None)
if not chromanode_url:
if self.testnet:
chromanode_url = "http://chromanode-tn.bitcontracts.org"
else:
chromanode_url = "http://chromanode.bitcontracts.org"
self.blockchain_state = ChromaBlockchainState(
chromanode_url,
self.testnet)
else:
self.blockchain_state = BlockchainState.from_url(
None, self.testnet)
if not thin and not self.testnet:
try:
# try fetching transaction from the second block of
# the bitcoin blockchain to see whether txindex works
self.blockchain_state.bitcoind.getrawtransaction(
"9b0fc92260312ce44e74ef369f5c66bbb85848f2eddd5"
"a7a1cde251e54ccfdd5")
except Exception as e:
# use Electrum to request transactions
self.blockchain_state = EnhancedBlockchainState(
"electrum.cafebitcoin.com", 50001)
self.store_conn = DataStoreConnection(
params.get("colordb_path", "color.db"))
self.cdstore = ColorDataStore(self.store_conn.conn)
self.metastore = ColorMetaStore(self.store_conn.conn)
self.colormap = ColorMap(self.metastore)
cdbuilder = ColorDataBuilderManager(
self.colormap, self.blockchain_state, self.cdstore,
self.metastore, color_data_builder)
self.colordata = color_data_class(
cdbuilder, self.blockchain_state, self.cdstore, self.colormap)
def raw_to_address(self, raw_address):
return hash160_sec_to_bitcoin_address(raw_address,
is_test=self.testnet)
|
992,753 | 6660738359d7c620e158a92fb0130303e1fe83ee | # Key Bindings
# 8 - 56
# 6 - 54
# 4 - 52
# 2 - 50
# + - 43
# - - 45
# * - 42
# / - 47
from onvif import ONVIFCamera
from msvcrt import getch
# Create class for moving PTZ Cam in ContiniousMode using NumPad bindings
class my_ptz():
def initialize(self, ip, port, username, password):
global ptz
print 'IP Camera initialization...'
mycam = ONVIFCamera(ip, port, username, password)
print 'Connected to ONVIF Camera ' + ip
print 'Creating Media service...'
media = mycam.create_media_service()
print 'Creating Media service... Done.'
token = media.GetProfiles()[0]._token
print 'Creating PTZ service...'
ptz = mycam.create_ptz_service()
print 'Creating PTZ service... Done.'
self.define_requests(token)
# Define necessary requests
def define_requests(self, token):
print 'Defining Requests types...'
global req_move, req_stop, req_goto_home
req_move = ptz.create_type('ContinuousMove')
req_move.ProfileToken = token
req_stop = ptz.create_type('Stop')
req_stop.ProfileToken = token
req_goto_home = ptz.create_type('GotoHomePosition')
req_goto_home.ProfileToken = token
print 'Defining Requests types... Done.'
print 'IP Camera initialization... Done.'
def stop(self):
ptz.Stop(req_stop)
def move_left(self, speed=0.5):
req_move.Velocity.Zoom._x = 0.0
req_move.Velocity.PanTilt._x = -speed
req_move.Velocity.PanTilt._y = 0.0
ptz.ContinuousMove(req_move)
self.stop()
def move_right(self, speed=0.5):
req_move.Velocity.Zoom._x = 0.0
req_move.Velocity.PanTilt._x = speed
req_move.Velocity.PanTilt._y = 0.0
ptz.ContinuousMove(req_move)
self.stop()
def move_down(self, speed=0.5):
req_move.Velocity.Zoom._x = 0.0
req_move.Velocity.PanTilt._x = 0.0
req_move.Velocity.PanTilt._y = -speed
ptz.ContinuousMove(req_move)
self.stop()
def move_up(self, speed=0.5):
req_move.Velocity.Zoom._x = 0.0
req_move.Velocity.PanTilt._x = 0.0
req_move.Velocity.PanTilt._y = speed
ptz.ContinuousMove(req_move)
self.stop()
def move_right_up(self, speed=0.5):
req_move.Velocity.Zoom._x = 0.0
req_move.Velocity.PanTilt._x = speed
req_move.Velocity.PanTilt._y = speed
ptz.ContinuousMove(req_move)
self.stop()
def move_left_up(self, speed=0.5):
req_move.Velocity.Zoom._x = 0.0
req_move.Velocity.PanTilt._x = -speed
req_move.Velocity.PanTilt._y = speed
ptz.ContinuousMove(req_move)
self.stop()
def move_right_down(self, speed=0.5):
req_move.Velocity.Zoom._x = 0.0
req_move.Velocity.PanTilt._x = speed
req_move.Velocity.PanTilt._y = -speed
ptz.ContinuousMove(req_move)
self.stop()
def move_left_down(self, speed=0.5):
req_move.Velocity.Zoom._x = 0.0
req_move.Velocity.PanTilt._x = -speed
req_move.Velocity.PanTilt._y = -speed
ptz.ContinuousMove(req_move)
self.stop()
def move_home(self):
ptz.GotoHomePosition(req_goto_home)
def zoom_in(self, speed=0.5):
self.stop()
req_move.Velocity.PanTilt._x = 0.0
req_move.Velocity.PanTilt._y = 0.0
req_move.Velocity.Zoom._x = speed
ptz.ContinuousMove(req_move)
def zoom_out(self, speed=0.5):
self.stop()
req_move.Velocity.PanTilt._x = 0.0
req_move.Velocity.PanTilt._y = 0.0
req_move.Velocity.Zoom._x = -speed
ptz.ContinuousMove(req_move)
# Initialize cam, and create an infinite loop to manage Cam actions
cam = my_ptz()
cam.initialize('192.168.11.23', 80, 'admin', 'Supervisor')
while True:
key = ord(getch())
speed = 1
if key == 27: #ESC
break
elif key == 56: #Up
print "Move Up..."
cam.move_up(speed)
elif key == 50: #Down
print "Move Down..."
cam.move_down(speed)
elif key == 54: #Right
print "Move Right..."
cam.move_right(speed)
elif key == 52: #Left
print "Move Left..."
cam.move_left(speed)
elif key == 53: #Home
print "Go to Home Position..."
cam.move_home()
elif key == 57: #Right Up
print "Move Right Up..."
cam.move_right_up(speed)
elif key == 55: #Left Up
print "Move Left Up..."
cam.move_left_up(speed)
elif key == 49: #Left Down
print "Move Left Down..."
cam.move_left_down(speed)
elif key == 51: #Right Down
print "Move Right Down..."
cam.move_right_down(speed)
elif key == 43: #Plus(+)
print "Zoom in..."
cam.zoom_in(speed)
elif key == 45: #Minus(-)
print "Zoom out..."
cam.zoom_out(speed)
|
992,754 | b2bbd2f4caf39a915c1a2223b13cf94257e128e0 | import sys
from PyQt5.QtWidgets import *
import win32com.client
import ctypes
import pandas as pd
import os
################################################
# PLUS 공통 OBJECT
g_objCodeMgr = win32com.client.Dispatch('CpUtil.CpCodeMgr')
g_objCpStatus = win32com.client.Dispatch('CpUtil.CpCybos')
g_objCpTrade = win32com.client.Dispatch('CpTrade.CpTdUtil')
################################################
# PLUS 실행 기본 체크 함수
def InitPlusCheck():
# 프로세스가 관리자 권한으로 실행 여부
if ctypes.windll.shell32.IsUserAnAdmin():
print('정상: 관리자권한으로 실행된 프로세스입니다.')
else:
print('오류: 일반권한으로 실행됨. 관리자 권한으로 실행해 주세요')
return False
# 연결 여부 체크
if (g_objCpStatus.IsConnect == 0):
print("PLUS가 정상적으로 연결되지 않음. ")
return False
# # 주문 관련 초기화 - 계좌 관련 코드가 있을 때만 사용
# if (g_objCpTrade.TradeInit(0) != 0):
# print("주문 초기화 실패")
# return False
return True
class CpMarketEye:
def __init__(self):
self.objRq = win32com.client.Dispatch("CpSysDib.MarketEye")
self.RpFiledIndex = 0
def Request(self, codes, dataInfo):
# 0: 종목코드 4: 현재가 20: 상장주식수
# 21: 외국인 보유 비율 10: 거래량 67:per 92:매출액영업이익률
rqField = [0, 4, 20, 21, 10, 67, 92] # 요청 필드
self.objRq.SetInputValue(0, rqField) # 요청 필드
self.objRq.SetInputValue(1, codes) # 종목코드 or 종목코드 리스트
self.objRq.BlockRequest()
# 현재가 통신 및 통신 에러 처리
rqStatus = self.objRq.GetDibStatus()
print("통신상태", rqStatus, self.objRq.GetDibMsg1())
if rqStatus != 0:
return False
cnt = self.objRq.GetHeaderValue(2)
for i in range(cnt):
code = self.objRq.GetDataValue(0, i) # 코드
cur = self.objRq.GetDataValue(1, i) # 현재가
listedStock = self.objRq.GetDataValue(2, i) # 상장주식수
foreign_rate = self.objRq.GetDataValue(3,i) #외국인 보유 비율
trading_volume = self.objRq.GetDataValue(4,i) # 거래량
per = self.objRq.GetDataValue(5,i) # per
ratio_gain = self.objRq.GetDataValue(6,i) #영업이익률
maketAmt = listedStock * cur
if g_objCodeMgr.IsBigListingStock(code):
maketAmt *= 1000
# print(code, maketAmt)
# key(종목코드) = tuple(상장주식수, 시가총액,외국인 보유비율, 거래량,per,영업이익률)
dataInfo[code] = (cur,listedStock, maketAmt,foreign_rate,trading_volume,per,ratio_gain)
return True
class top_10_calculate():
def __init__(self):
self.dataInfo = {}
def GetAllMarketTotal(self):
codeList = g_objCodeMgr.GetStockListByMarket(1) # 거래소
codeList2 = g_objCodeMgr.GetStockListByMarket(2) # 코스닥
allcodelist = codeList + codeList2
print('전 종목 코드 %d, 거래소 %d, 코스닥 %d' % (len(allcodelist), len(codeList), len(codeList2)))
objMarket = CpMarketEye()
rqCodeList = []
for i, code in enumerate(allcodelist):
rqCodeList.append(code)
if len(rqCodeList) == 200:
objMarket.Request(rqCodeList, self.dataInfo)
rqCodeList = []
continue
# end of for
if len(rqCodeList) > 0:
objMarket.Request(rqCodeList, self.dataInfo)
#시가총액 상위 10
def Market_TOP10(self):
#시가총액 순으로 sorting
data2 = sorted(self.dataInfo.items(), key=lambda x: x[1][2], reverse=True)
top_10_data = []
count = 0
for item in data2:
if (count == 10) : break
name = g_objCodeMgr.CodeToName(item[0])
cur = item[1][0]
listed = item[1][1]
markettot = item[1][2]
dic = {}
dic['이름'] = name
dic['현재가'] = cur
dic['상장주식수'] = listed
dic['시가총액'] = markettot
top_10_data.append(dic)
count += 1
return top_10_data
#외국인 보유비율 top 10
def Foreign_TOP10(self):
data2 = sorted(self.dataInfo.items(), key=lambda x: x[1][4], reverse=True)
top_10_data = []
count = 0
for item in data2:
if (count == 10): break
name = g_objCodeMgr.CodeToName(item[0])
cur = item[1][0]
foreign = item[1][4]
dic = {}
dic['이름'] = name
dic['현재가'] = cur
dic['외국인보유비율'] = foreign
top_10_data.append(dic)
count += 1
return top_10_data
#영업이익률 top 10
def Gain_TOP10(self):
data2 = sorted(self.dataInfo.items(), key=lambda x: x[1][6], reverse=True)
top_10_data = []
count = 0
for item in data2:
if (count == 10): break
name = g_objCodeMgr.CodeToName(item[0])
cur = item[1][0]
gain = item[1][6]
dic = {}
dic['이름'] = name
dic['현재가'] = cur
dic['영업이익률'] = gain
top_10_data.append(dic)
count += 1
return top_10_data
# def expectation(self):
# data2 = self.dataInfo.items()
#
#
#
# expectation_data= []
#
# for item in data2:
#
# instStockChart = win32com.client.Dispatch("CpSysDib.StockChart")
# instStockChart.SetInputValue(0, item[0])
# instStockChart.SetInputValue(1, ord('2'))
# instStockChart.SetInputValue(4, 60) #최근 60일
# instStockChart.SetInputValue(5, 8)
# instStockChart.SetInputValue(6, ord('D'))
# instStockChart.SetInputValue(9, ord('1'))
#
# instStockChart.BlockRequest()
#
# volumes = []
# numData = instStockChart.GetHeaderValue(3)
# for i in range(numData):
# volume = instStockChart.GetDataValue(0, i)
# volumes.append(volume)
#
# averageVolume = (sum(volumes) - volumes[0]) / (len(volumes) - 1)
#
# if(averageVolume != 0):
# temp = {}
# temp['이름'] = g_objCodeMgr.CodeToName(item[0])
# check = volumes[0] / averageVolume
# temp['유망종목 수치'] = check
#
# expectation_data.append(temp)
# else: continue
#
# return expectation_data
if __name__ == "__main__":
# 시가총액 top 10
objMarketTotal = top_10_calculate()
objMarketTotal.GetAllMarketTotal()
top_10 = objMarketTotal.Market_TOP10() #시가총액 상위 10개의 기업 리스트 출력 (각 리스트에는 이름, 상장주식수, 시가총액)
print(top_10)
|
992,755 | aa45d79dc544531f8612aee2eea03ecc97547583 | #!/bin/python3
import sys
input = sys.stdin.readline
def main():
s = list(map(str, input().strip().split()))[0]
for i in range(len(s)):
c = s[i]
if i % 2 == 0:
if c == c.upper():
print('No')
sys.exit()
else:
if c == c.lower():
print('No')
sys.exit()
print('Yes')
main()
|
992,756 | 7e3b58c0cfdda4cab1846489e3611e15f15fe60d | from scipy.optimize import leastsq, root
import numpy as np
from scipy import linalg as lin
from scipy import integrate
from matplotlib import pylab as plt
from scipy.sparse import dia_matrix
import timeit
class LotkiVolterra:
def __init__(self, a, b, c, d, e):
self.a, self.b, self.c, self.d, self.e = a, b, c, d, e
def dX_dt(self, X, t=0):
""" Return the growth rate of fox and rabbit populations. """
return np.array([self.a * X[0] - self.b * X[0] * X[1] - self.e * X[0] ** 2,
-self.c * X[1] + self.d * self.b * X[0] * X[1]])
def d2X_dt2(self, X, t=0):
""" Return the Jacobian matrix evaluated in X. """
return np.array([[self.a - self.b * X[1] - 2 * self.e * X[0], -self.b * X[0]],
[self.b * self.d * X[1], -self.c + self.b * self.d * X[0]]])
def analysis(self, t, X0):
X_f0 = np.array([0., 0.])
X_f1 = np.array([self.c / (self.d * self.b), self.a / self.b])
if all(self.dX_dt(X_f0) == np.zeros(2)) and all(self.dX_dt(X_f1) == np.zeros(2)):
return None
A_f0 = self.d2X_dt2(X_f0)
A_f1 = self.d2X_dt2(X_f1)
lambda1, lambda2 = np.linalg.eigvals(A_f1) # >>> (1.22474j, -1.22474j)
# They are imaginary numbers. The fox and rabbit populations are periodic as follows from further
# analysis. Their period is given by:
print(lambda1, lambda2)
T_f1 = 2 * np.pi / abs(lambda1) # >>> 5.130199
X, infodict = integrate.odeint(self.dX_dt, X0, t, full_output=True)
print(infodict['message'])
return X
def plot_x_t(self, name='rabbits_and_foxes_1.png'):
t = np.linspace(0, 15, 1000) # time
X0 = np.array([10, 5]) # initials conditions: 10 rabbits and 5 foxes
res = self.analysis(t, X0)
if res is None:
return None
rabbits, foxes = res.T
f1 = plt.figure()
plt.plot(t, rabbits, 'r-', label='Rabbits')
plt.plot(t, foxes, 'b-', label='Foxes')
plt.grid()
plt.legend(loc='best')
plt.xlabel('time')
plt.ylabel('population')
plt.title('Evolution of fox and rabbit populations')
f1.savefig(name)
plt.show()
def odu2_solveband(koeffs, func, L, bcl, bcr, N):
""" u" + a u' + b u = f(x)
b.c. : gamma * u' + beta * u = alpha
koeffs = (a, b)
func = f(x)
L - длина отрезка по x (отрезок от 0 до L)
bcl = lalpha, lbeta, lgamma
bcr = ralpha, rbeta, rgamma
N - число отрезков
"""
a, b = koeffs
lalpha, lbeta, lgamma = bcl
ralpha, rbeta, rgamma = bcr
h = L / N
''' u -> y[i], *b
u' -> 1 / 2h * (y[i+1] - y[i-1]) *a
u'' -> 1 / h**2 *(y[i+1] - 2y[i] + y[i-1]) *1
'''
''' обозначения
A0 - главная диагональ
Аu1 - диагональ выше главной на 1
Аd1 - диагональ ниже главной на 1'''
A0 = np.ones(N + 1) # y[i]
Au1 = np.zeros(N + 1) # y[i+1]
Ad1 = np.zeros(N + 1) # y[i-1]
Au1[:] = a / (2 * h) + 1 / h ** 2
A0[:] = b - 2 / h ** 2
Ad1[:] = -a / (2 * h) + 1 / h ** 2
F = np.fromfunction(func, (N + 1, 1))
A0[0] = lbeta - lgamma / h
Ad1[0] = 0
Au1[0] = lgamma / h
F[0] = lalpha
A0[N] = rbeta + rgamma / h
Au1[N] = 0
Ad1[N] = -rgamma / h
F[N] = ralpha
Au1 = np.roll(Au1, 1)
Ad1 = np.roll(Ad1, -1)
A_band = np.concatenate((Au1, A0, Ad1)).reshape(3, N + 1)
res = lin.solve_banded((1, 1), A_band, F)
# print(res)
return res
def odu2_solve(koeffs, func, L, bcl, bcr, N):
a, b = koeffs
lalpha, lbeta, lgamma = bcl
ralpha, rbeta, rgamma = bcr
h = L / N
A0 = np.ones(N + 1) # y[i]
Au1 = np.zeros(N + 1) # y[i+1]
Ad1 = np.zeros(N + 1) # y[i-1]
Au1[:] = a / (2 * h) + 1 / h ** 2
A0[:] = b - 2 / h ** 2
Ad1[:] = -a / (2 * h) + 1 / h ** 2
F = np.fromfunction(func, (N + 1, 1))
A0[0] = lbeta - lgamma / h
Ad1[0] = 0
Au1[0] = lgamma / h
F[0] = lalpha
A0[N] = rbeta + rgamma / h
Au1[N] = 0
Ad1[N] = -rgamma / h
F[N] = ralpha
data = np.array([np.roll(Ad1, -1), A0, np.roll(Au1, 1)])
offsets = np.array([-1, 0, 1])
M = dia_matrix((data, offsets), shape=(N + 1, N + 1)).toarray()
res = lin.solve(M, F)
return res
def analyse_ode2():
L = np.pi
N = 100
t1 = timeit.default_timer()
y1 = odu2_solveband([0, 1], lambda x, y: -x * L / N, L, (0, 1, 0), (1, 0, 1), N)
t1_final = timeit.default_timer() - t1
t2 = timeit.default_timer()
y2 = odu2_solve([0, 1], lambda x, y: -x * L / N, L, (0, 1, 0), (1, 0, 1), N)
t2_final = timeit.default_timer() - t2
f1 = plt.figure()
plt.plot(np.linspace(0, L, N + 1), y1, 'r-', label='solve_banded')
plt.plot(np.linspace(0, L, N + 1), y2, 'g--', label='solve')
Ne = 10
plt.plot(np.arange(0, L + L / Ne, L / Ne),
np.fromfunction(lambda x, y: -2 * np.sin(x * L / Ne) - x * L / Ne,
(Ne + 1, 1)), '.')
plt.legend(loc='lower left')
plt.show()
print("Время solve_banded[odu2]: t = ", t1_final)
print("Время solve[odu2] : t = ", t2_final)
def odu4_solveband(koeffs, func, L, phi, psi, N):
""" au(IV) + bu(III)+ cu" + du' + eu = f(x)
г. у.: u(x=0) = phi[0], u(x=0) = phi[1]
u'(x=0) = psi[0], u'(x=0) = psi[1]
koeffs = (a, b, c, d, e)
func = f(x)
L - длина отрезка по x (отрезок от 0 до L)
N - число отрезков
"""
a, b, c, d, e = koeffs
h = L / N
''' обозначения
A0 - главная диагональ
Аu1 - диагональ выше главной на 1
Аu2 - диагональ выше главной на 2
Аd1 - диагональ ниже главной на 1
Аd2 - диагональ ниже главной на 2'''
Au2 = np.zeros(N + 1) # y[i+2]
Au1 = np.zeros(N + 1) # y[i+1]
A0 = np.ones(N + 1) # y[i]
Ad1 = np.zeros(N + 1) # y[i-1]
Ad2 = np.zeros(N + 1) # y[i-2]
''' u -> y[i], *e
u' -> 1 / 2h * (y[i+1] - y[i-1]) *d
u'' -> 1 / h**2 *(y[i+1] - 2y[i] + y[i-1]) *c
u(III) -> 1/ 2h**3 *(-y[i-2]+2y[i-1]-2y[i+i]+y[i+2]) * b
u(IV) -> 1 / h**4 *(y[i-2]-4y[i-1] +6y[i] -4y[i+1] + y[i+2]) * a
'''
F = np.fromfunction(func, (N + 1, 1))
Au2[:] = a / (h ** 4) + b / (2 * (h ** 3))
Au1[:] = d / (2 * h) + c / (h ** 2) - b / (h ** 3) - 4 * a / (h ** 4)
A0[:] = e - 2 * c / (h ** 2) + 6 * a / (h ** 4)
Ad1[:] = c / (h ** 2) - d / (2 * h) + b / (h ** 3) - 4 * a / (h ** 4)
Ad2[:] = a / (h ** 4) - b / (2 * (h ** 3))
Au2[0] = 0
Au1[0] = 0
A0[0] = 1
Ad1[0] = 0
Ad2[0] = 0
F[0] = phi[0]
Au2[1] = 0
Au1[1] = 0
A0[1] = 1 / h
Ad1[1] = - 1 / h
Ad2[1] = 0
F[1] = psi[0]
Au2[N - 1] = 0
Au1[N - 1] = 1 / h
A0[N - 1] = -1 / h
Ad1[N - 1] = 0
Ad2[N - 1] = 0
F[N - 1] = psi[1]
Au2[N] = 0
Au1[N] = 0
A0[N] = 1
Ad1[N] = 0
Ad2[N] = 0
F[N] = phi[1]
Au2 = np.roll(Au2, 2)
Au1 = np.roll(Au1, 1)
Ad1 = np.roll(Ad1, -1)
Ad2 = np.roll(Ad2, -2)
A_band = np.concatenate((Au2, Au1, A0, Ad1, Ad2)).reshape(5, N + 1)
res = lin.solve_banded((2, 2), A_band, F)
return res
def odu4_solve(koeffs, func, L, phi, psi, N):
""" au(IV) + bu(III)+ cu" + du' + eu = f(x)
г. у.: u(x=0) = phi[0], u(x=0) = phi[1]
u'(x=0) = psi[0], u'(x=0) = psi[1]
koeffs = (a, b, c, d, e)
func = f(x)
L - длина отрезка по x (отрезок от 0 до L)
N - число отрезков
"""
a, b, c, d, e = koeffs
h = L / N
''' обозначения
A0 - главная диагональ
Аu1 - диагональ выше главной на 1
Аu2 - диагональ выше главной на 2
Аd1 - диагональ ниже главной на 1
Аd2 - диагональ ниже главной на 2'''
Au2 = np.zeros(N + 1) # y[i+2]
Au1 = np.zeros(N + 1) # y[i+1]
A0 = np.ones(N + 1) # y[i]
Ad1 = np.zeros(N + 1) # y[i-1]
Ad2 = np.zeros(N + 1) # y[i-2]
''' u -> y[i], *e
u' -> 1 / 2h * (y[i+1] - y[i-1]) *d
u'' -> 1 / h**2 *(y[i+1] - 2y[i] + y[i-1]) *c
u(III) -> 1/ 2h**3 *(-y[i-2]+2y[i-1]-2y[i+i]+y[i+2]) * b
u(IV) -> 1 / h**4 *(y[i-2]-4y[i-1] +6y[i] -4y[i+1] + y[i+2]) * a
'''
F = np.fromfunction(func, (N + 1, 1))
Au2[:] = a / (h ** 4) + b / (2 * (h ** 3))
Au1[:] = d / (2 * h) + c / (h ** 2) - b / (h ** 3) - 4 * a / (h ** 4)
A0[:] = e - 2 * c / (h ** 2) + 6 * a / (h ** 4)
Ad1[:] = c / (h ** 2) - d / (2 * h) + b / (h ** 3) - 4 * a / (h ** 4)
Ad2[:] = a / (h ** 4) - b / (2 * (h ** 3))
Au2[0] = 0
Au1[0] = 0
A0[0] = 1
Ad1[0] = 0
Ad2[0] = 0
F[0] = phi[0]
Au2[1] = 0
Au1[1] = 0
A0[1] = 1 / h
Ad1[1] = - 1 / h
Ad2[1] = 0
F[1] = psi[0]
Au2[N - 1] = 0
Au1[N - 1] = 1 / h
A0[N - 1] = -1 / h
Ad1[N - 1] = 0
Ad2[N - 1] = 0
F[N - 1] = psi[1]
Au2[N] = 0
Au1[N] = 0
A0[N] = 1
Ad1[N] = 0
Ad2[N] = 0
F[N] = phi[1]
data = np.array([np.roll(Ad2, -2), np.roll(Ad1, -1), A0, np.roll(Au1, 1), np.roll(Au2, 2)])
offsets = np.array([-2, -1, 0, 1, 2])
M = dia_matrix((data, offsets), shape=(N + 1, N + 1)).toarray()
res = lin.solve(M, F)
return res
def analyse_ode4():
L = 1
N = 100
t1 = timeit.default_timer()
y1 = odu4_solveband([1, 0, 0, 0, 1], lambda x, y: -x * L / N, L, (0, 0), (1, 1), N)
t1_final = timeit.default_timer() - t1
t2 = timeit.default_timer()
y2 = odu4_solve([1, 0, 0, 0, 1], lambda x, y: -x * L / N, L, (0, 0), (1, 1), N)
t2_final = timeit.default_timer() - t2
f1 = plt.figure()
plt.plot(np.linspace(0, L, N + 1), y2, 'g-', label='solve')
plt.plot(np.linspace(0, L, N + 1), y1, 'r--', label='solve_banded')
plt.legend(loc='lower left')
plt.show()
print("Время solve_banded[odu4]: t = ", t1_final)
print("Время solve[odu4] : t = ", t2_final)
def find_root(func):
z = root(func, 0)
print('Корень уравнения:', *z.x)
f1 = plt.figure()
x = np.linspace(-5, 5, 101)
y = func(x)
plt.plot(z.x, 0, 'bo', x, y, '-g')
plt.plot(x, np.linspace(0, 0, 101), '--r')
plt.show()
def f(x):
return [np.sin(x[0]) + 2 * x[1] - 2, np.cos(x[1] - 1) + x[0] - 0.7]
def find_root_sys():
z = root(f, [0, 0], method='lm')
print('Корень системы:', z.x)
x = np.linspace(0, 1, 100)
res = [2 - np.sin(x), 1 + np.arccos(x - 0.7)]
f1 = plt.figure()
plt.plot(z.x[0], z.x[1], 'bo', x, res[1], '-g', x, res[0], '-r')
plt.show()
def least_sq(x, y):
func = lambda params, x: params[0] * x ** 2 + params[1] * x + params[2]
error = lambda params, x, y: func(params, x) - y
pr_initial = (0.0, 0.0, 0.0)
pr_final, success = leastsq(error, pr_initial[:], args=(x, y))
print('Полная квадратичная невязка:', sum((y - func(pr_final, np.linspace(x.min(), x.max(), 5))) ** 2) ** 0.5)
print('X = 21, Y = ', func(pr_final, 21))
x1 = np.linspace(x.min(), x.max(), 50)
y1 = func(pr_final, x1)
plt.plot(x, y, 'bo', x1, y1, 'g-')
plt.show()
def leastsq_radioact(T, N):
func = lambda params, t: params[0] * np.exp(-params[1] * t)
error = lambda params, x, y: func(params, x) - y
pr_initial = (0, 0)
pr_final, success = leastsq(error, pr_initial[:], args=(T, N))
x = np.linspace(T.min(), T.max(), 50)
y = func(pr_final, x)
print('Константа распада:', pr_final[1])
print('Период полураспада:', np.log(2) / pr_final[1])
print('Полная квадратичная невязка:', sum((N - func(pr_final, np.linspace(T.min(), T.max(), 6))) ** 2) ** 0.5)
f = plt.figure()
plt.plot(x, y, 'g-', T, N, 'bo')
plt.show()
def SIR(sir, t, beta, gamma, N):
# sir[0] - S, sir[1] - I, sir[2] - R
dsdt = - (beta * sir[0] * sir[1]) / N
didt = (beta * sir[0] * sir[1]) / N - gamma * sir[1]
drdt = gamma * sir[1]
dsirdt = [dsdt, didt, drdt]
return dsirdt
if __name__ == "__main__":
print('Задание 1')
analyse_ode2()
analyse_ode4()
print('\nЗадание 2.1')
find_root(lambda x: np.log(np.e) + (x + 1) * (x + 1) * (x + 1))
find_root_sys()
print('\nЗадание 2.2.1')
X = np.array([20, 22, 24, 26, 28])
Y = np.array([40, 29.0, 21.1, 15.3, 11.1])
least_sq(X, Y)
print('\nЗадание 2.2.2')
T = np.array([0, 0.249024, 0.498048, 0.747072, 0.996096, 1.24512])
N = np.array([1000872, 942719, 891818, 840464, 795312, 748568])
leastsq_radioact(T, N)
print('\nЗадание 3.2')
a = 1.
b = 0.1
c = 1.5
d = 0.75
e = 0.1
system = LotkiVolterra(a, b, c, d, e)
system.plot_x_t('test.png')
N = 1000
S = N - 1
I = 1
R = 0
beta = 0.3
gamma = 0.2
sir0 = (S, I, R)
# time points
t = np.linspace(0, 100)
sir = integrate.odeint(SIR, sir0, t, args=(beta, gamma, N))
plt.plot(t, sir[:, 0], label='S(t)')
plt.plot(t, sir[:, 1], label='I(t)')
plt.plot(t, sir[:, 2], label='R(t)')
plt.legend()
plt.xlabel('T')
plt.ylabel('N')
# use scientific notation
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.show()
|
992,757 | dcd435844a57fa153cc38afa30af21d5305006d8 | from django.db import models
class Pizza(models.Model):
LARGE = 'L'
MEDIUM = 'M'
SMALL = 'S'
ROZMIARY = (
(LARGE, 'duza'),
(MEDIUM, 'srednia'),
(SMALL, 'mala'),
)
nazwa = models.CharField(
verbose_name='pizza',
max_length=30,
help_text='Nazwa pizzy')
opis = models.TextField(
blank=True,
help_text='Opis pizzy')
rozmiar = models.CharField(max_length=1,
choices=ROZMIARY,
default=LARGE)
cena = models.DecimalField(max_digits=5, decimal_places=2)
data = models.DateField('dodano', auto_now_add=True)
class Skladnik(models.Model):
pizza = models.ManyToManyField(Pizza, related_name='skladniki')
nazwa = models.CharField('skladnik', max_length=30)
jarski = models.BooleanField(
'jarski?',
help_text='Zaznacz, jeżeli składnik jest odpowiedni dla wegetarian',
default=False
)
cena = models.DecimalField(max_digits=3, decimal_places=2)
|
992,758 | 4eba3c2c3655eff8ef84f11a42e45cb990534682 | # Generated by Django 3.1.7 on 2021-04-20 07:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_postrewiews_postid'),
]
operations = [
migrations.AlterField(
model_name='postrewiews',
name='text',
field=models.TextField(help_text='Максимум 1000 символов', max_length=1000, verbose_name='Сообщение'),
),
]
|
992,759 | 03586dba3010bbb6616be9e757d9151e27736381 | import redis
import pymongo
from pymongo import MongoClient
from _include.dbClasses import mongodb as _mongodb
r = redis.StrictRedis()
#r = redis.StrictRedis(host='localhost', port=6379, db=0, charset="utf-8", decode_responses=True)
pubsub = r.pubsub()
pubsub.psubscribe("*")
print("sdhfhjsgfjgsjdfgsjgdjf")
for msg in pubsub.listen():
s = msg["data"]
#print(s)
if(s != 1):
key_1 = s.decode("utf-8")
#print(key_1)
key_ = key_1.replace('s_', '')
#print(key_)
print(r.hgetall(key_))
_mongodb.insertSpy_(MongoClient, pymongo, data=r.hgetall(key_))
r.delete(key_)
print(r.hgetall(key_))
print("deleted----------------------------")
#print(key_)
|
992,760 | 4ea0265384a9b17dc280472903b3f4fafcb06c29 | import feedparser
from bs4 import BeautifulSoup
import boto3
import os
myaccess = os.environ['access']
mysecret = os.environ['secret']
myregion = os.environ['region']
url = "https://towardsdatascience.com/feed"
feed = feedparser.parse(url)
dynamodb = boto3.resource('dynamodb', aws_access_key_id=myaccess, aws_secret_access_key=mysecret, region_name=myregion)
table = dynamodb.Table("Movies")
for post in feed.entries:
soup = BeautifulSoup(post.description, features="html.parser")
for link in soup.findAll("a"):
if "http" in link.get("href"):
response = table.put_item(Item={"title": link.get("href")})
"""
python test.py 'access_key_xxx' 'secret_key_xxx' 'us-east-1'
python test1.py --access abcd --secret ddd --region aaa
"""
|
992,761 | 1979d81bbc503a7a01c810ad1a387d95e34d129f | #! /usr/bin/env python3
import numpy as np
import os
from threading import Lock
import logging
# ROS
import rospy
import sensor_msgs.msg
import geometry_msgs.msg
import tf2_ros
import tf
import razer_hydra.msg
import meshcat
import meshcat.geometry as meshcat_geom
import meshcat.transformations as meshcat_tf
import pydrake
from pydrake.all import (
AbstractValue,
AngleAxis,
BasicVector,
BodyIndex,
ExternallyAppliedSpatialForce,
LeafSystem,
List,
Quaternion,
QueryObject,
RollPitchYaw,
RotationMatrix,
RigidTransform,
SpatialForce,
Value
)
def ros_tf_to_rigid_transform(msg):
return RigidTransform(
p=[msg.translation.x, msg.translation.y, msg.translation.z],
R=RotationMatrix(Quaternion(msg.rotation.w, msg.rotation.x,
msg.rotation.y, msg.rotation.z)))
class HydraInteractionLeafSystem(LeafSystem):
''' Handles comms with the Hydra, and uses QueryObject inputs from the SceneGraph
to pick closests points when required. Passes this information to the HydraInteractionForceElement
at every update tick.
Construct by supplying the MBPlant and SceneGraph under sim + a list of the body IDs
that are manipulable. Given ZMQ information, visualizes hand pose with a hand model.
TODO: Hand geometric may want to be handed to SceneGraph to visualize; would need to
investigate piping for manually specifying poses of objects not in MBP. '''
def __init__(self, mbp, sg, all_manipulable_body_ids=[], zmq_url="default"):
LeafSystem.__init__(self)
self.all_manipulable_body_ids = all_manipulable_body_ids
self.set_name('HydraInteractionLeafSystem')
self._geometry_query_input_port = self.DeclareAbstractInputPort(
"geometry_query", AbstractValue.Make(QueryObject()))
self.robot_state_input_port = self.DeclareVectorInputPort(
"robot_state", BasicVector(mbp.num_positions() + mbp.num_velocities()))
forces_cls = Value[List[ExternallyAppliedSpatialForce]]
self.spatial_forces_output_port = self.DeclareAbstractOutputPort(
"spatial_forces_vector",
lambda: forces_cls(),
self.DoCalcAbstractOutput)
self.DeclarePeriodicPublish(0.01, 0.0)
if zmq_url == "default":
zmq_url = "tcp://127.0.0.1:6000"
if zmq_url is not None:
logging.info("Connecting to meshcat-server at zmq_url=" + zmq_url + "...")
self.vis = meshcat.Visualizer(zmq_url=zmq_url)
fwd_pt_in_hydra_frame = RigidTransform(p=[0.0, 0.0, 0.0])
self.vis["hydra_origin"]["hand"].set_object(
meshcat_geom.ObjMeshGeometry.from_file(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "hand-regularfinal-scaled-1.obj"))
)
self.vis["hydra_origin"]["hand"].set_transform(meshcat_tf.compose_matrix(scale=[0.001, 0.001, 0.001], angles=[np.pi/2, 0., np.pi/2], translate=[-0.25, 0., 0.]))
#self.vis["hydra_origin"]["center"].set_object(meshcat_geom.Sphere(0.02))
#self.vis["hydra_origin"]["center"].set_transform(meshcat_tf.translation_matrix([-0.025, 0., 0.]))
#self.vis["hydra_origin"]["mid"].set_object(meshcat_geom.Sphere(0.015))
#self.vis["hydra_origin"]["mid"].set_transform(meshcat_tf.translation_matrix([0.0, 0., 0.]))
#self.vis["hydra_origin"]["fwd"].set_object(meshcat_geom.Sphere(0.01))
#self.vis["hydra_origin"]["fwd"].set_transform(fwd_pt_in_hydra_frame.matrix())
#self.vis["hydra_grab"].set_object(meshcat_geom.Sphere(0.01),
# meshcat_geom.MeshLambertMaterial(
# color=0xff22dd,
# alphaMap=0.1))
self.vis["hydra_grab"]["grab_point"].set_object(meshcat_geom.Sphere(0.01),
meshcat_geom.MeshLambertMaterial(
color=0xff22dd,
alphaMap=0.1))
# Hide it sketchily
self.vis["hydra_grab"].set_transform(meshcat_tf.translation_matrix([0., 0., -1000.]))
# State for selecting objects
self.grab_needs_update = False
self.grab_in_progress = False
self.grab_update_hydra_pose = None
self.selected_body = None
self.selected_pose_in_body_frame = None
self.desired_pose_in_world_frame = None
self.stop = False
self.freeze_rotation = False
self.previously_freezing_rotation = False
# Set up subscription to Razer Hydra
self.mbp = mbp
self.mbp_context = mbp.CreateDefaultContext()
self.sg = sg
self.hasNewMessage = False
self.lastMsg = None
self.hydra_origin = RigidTransform(p=[1.0, 0., -0.1],
rpy=RollPitchYaw([0., 0., 0.]))
self.hydra_prescale = 3.0
self.callback_lock = Lock()
self.hydraSubscriber = rospy.Subscriber("/hydra_calib", razer_hydra.msg.Hydra, self.callback, queue_size=1)
logging.info("Waiting for hydra startup...")
while not self.hasNewMessage and not rospy.is_shutdown():
rospy.sleep(0.01)
logging.info("Got hydra.")
def DoCalcAbstractOutput(self, context, y_data):
self.callback_lock.acquire()
if self.selected_body and self.grab_in_progress:
# Simple inverse dynamics PD rule to drive object to desired pose.
body = self.selected_body
# Load in robot state
x_in = self.EvalVectorInput(context, 1).get_value()
self.mbp.SetPositionsAndVelocities(self.mbp_context, x_in)
TF_object = self.mbp.EvalBodyPoseInWorld(self.mbp_context, body)
xyz = TF_object.translation()
R = TF_object.rotation().matrix()
TFd_object = self.mbp.EvalBodySpatialVelocityInWorld(self.mbp_context, body)
xyzd = TFd_object.translational()
Rd = TFd_object.rotational()
# Match the object position directly to the hydra position.
xyz_desired = self.desired_pose_in_world_frame.translation()
# Regress xyz back to just the hydra pose in the attraction case
if self.previously_freezing_rotation != self.freeze_rotation:
self.selected_body_init_offset = TF_object
self.grab_update_hydra_pose = RigidTransform(self.desired_pose_in_world_frame)
self.previously_freezing_rotation = self.freeze_rotation
if self.freeze_rotation:
R_desired = self.selected_body_init_offset.rotation().matrix()
else:
# Figure out the relative rotation of the hydra from its initial posture
to_init_hydra_tf = self.grab_update_hydra_pose.inverse()
desired_delta_rotation = to_init_hydra_tf.multiply(self.desired_pose_in_world_frame).GetAsMatrix4()[:3, :3]
# Transform the current object rotation into the init hydra frame, apply that relative tf, and
# then transform back
to_init_hydra_tf_rot = to_init_hydra_tf.GetAsMatrix4()[:3, :3]
R_desired = to_init_hydra_tf_rot.T.dot(
desired_delta_rotation.dot(to_init_hydra_tf_rot.dot(
self.selected_body_init_offset.rotation().matrix())))
# Could also pull the rotation back, but it's kind of nice to be able to recenter the object
# without messing up a randomized rotation.
#R_desired = (self.desired_pose_in_world_frame.rotation().matrix()*self.attract_factor +
# R_desired*(1.-self.attract_factor))
# Apply PD in cartesian space
xyz_e = xyz_desired - xyz
xyzd_e = -xyzd
f = 100.*xyz_e + 10.*xyzd_e
R_err_in_body_frame = np.linalg.inv(R).dot(R_desired)
aa = AngleAxis(R_err_in_body_frame)
tau_p = R.dot(aa.axis()*aa.angle())
tau_d = -Rd
tau = tau_p + 0.1*tau_d
exerted_force = SpatialForce(tau=tau, f=f)
out = ExternallyAppliedSpatialForce()
out.F_Bq_W = exerted_force
out.body_index = self.selected_body.index()
y_data.set_value([out])
else:
y_data.set_value([])
self.callback_lock.release()
def DoPublish(self, context, event):
# TODO(russt): Copied from meshcat_visualizer.py.
# Change this to declare a periodic event with a
# callback instead of overriding DoPublish, pending #9992.
LeafSystem.DoPublish(self, context, event)
self.callback_lock.acquire()
if self.stop:
self.stop = False
if context.get_time() > 0.5:
self.callback_lock.release()
raise StopIteration
#query_object = self.EvalAbstractInput(context, 0).get_value()
#pose_bundle = self.EvalAbstractInput(context, 0).get_value()
x_in = self.EvalVectorInput(context, 1).get_value()
self.mbp.SetPositionsAndVelocities(self.mbp_context, x_in)
if self.grab_needs_update:
hydra_tf = self.grab_update_hydra_pose
self.grab_needs_update = False
# If grab point is colliding...
#print [x.distance for x in query_object.ComputeSignedDistanceToPoint(hydra_tf.matrix()[:3, 3])]
# Find closest body to current pose
grab_center = hydra_tf.GetAsMatrix4()[:3, 3]
closest_distance = np.inf
closest_body = self.mbp.get_body(BodyIndex(2))
for body_id in self.all_manipulable_body_ids:
body = self.mbp.get_body(body_id)
offset = self.mbp.EvalBodyPoseInWorld(self.mbp_context, body)
dist = np.linalg.norm(grab_center - offset.translation())
if dist < closest_distance:
closest_distance = dist
closest_body = body
self.selected_body = closest_body
self.selected_body_init_offset = self.mbp.EvalBodyPoseInWorld(
self.mbp_context, self.selected_body)
self.callback_lock.release()
def callback(self, msg):
''' Control mapping:
Buttons: [Digital trigger, 1, 2, 3, 4, start, joy click]
Digital trigger: buttons[0]
Analog trigger: trigger
Joy: +x is right, +y is fwd
'''
self.callback_lock.acquire()
self.lastMsg = msg
self.hasNewMessage = True
pad_info = msg.paddles[0]
hydra_tf_uncalib = ros_tf_to_rigid_transform(pad_info.transform)
hydra_tf_uncalib.set_translation(hydra_tf_uncalib.translation()*self.hydra_prescale)
hydra_tf = self.hydra_origin.multiply(hydra_tf_uncalib)
self.desired_pose_in_world_frame = hydra_tf
self.vis["hydra_origin"].set_transform(hydra_tf.GetAsMatrix4())
# Interpret various buttons for changing to scaling
if pad_info.buttons[0] and not self.grab_in_progress:
logging.info("Grabbing.")
self.grab_update_hydra_pose = hydra_tf
self.grab_needs_update = True
self.grab_in_progress = True
elif self.grab_in_progress and not pad_info.buttons[0]:
self.grab_in_progress = False
self.selected_body = None
self.freeze_rotation = pad_info.trigger > 0.15
if pad_info.buttons[5]:
self.stop = True
# Optional: use buttons to adjust hydra-reality scaling.
# Disabling for easier onboarding of new users...
#if pad_info.buttons[1]:
# # Scale down
# self.hydra_prescale = max(0.01, self.hydra_prescale * 0.98)
# print("Updated scaling to ", self.hydra_prescale)
#if pad_info.buttons[3]:
# # Scale up
# self.hydra_prescale = min(10.0, self.hydra_prescale * 1.02)
# print("Updated scaling to ", self.hydra_prescale)
#if pad_info.buttons[2]:
# # Translate down
# translation = self.hydra_origin.translation().copy()
# translation[2] -= 0.01
# print("Updated translation to ", translation)
# self.hydra_origin.set_translation(translation)
#if pad_info.buttons[4]:
# # Translate up
# translation = self.hydra_origin.translation().copy()
# translation[2] += 0.01
# print("Updated translation to ", translation)
# self.hydra_origin.set_translation(translation)
#if abs(pad_info.joy[0]) > 0.01 or abs(pad_info.joy[1]) > 0.01:
# # Translate up
# translation = self.hydra_origin.translation().copy()
# translation[1] -= pad_info.joy[0]*0.01
# translation[0] += pad_info.joy[1]*0.01
# print("Updated translation to ", translation)
#self.hydra_origin.set_translation(translation)
self.callback_lock.release() |
992,762 | 9243f4a9b5786f21fe52159dc41b07444829d5ca | """
本函数将字典或嵌套字典中的key从下划线命名法装换为大驼峰命名法,
以便MatCloud MongoDB 的数据查询。
"""
import re
class Lower2Upper:
def __init__(self):
self.key_list = []
def get_all_keys(self, query_dict):
if isinstance(query_dict, dict):
for key in query_dict.keys():
value = query_dict[key]
self.key_list.append(key)
self.get_all_keys(value)
elif isinstance(query_dict, list):
for q in query_dict:
if isinstance(q, dict):
for key in q.keys():
value = q[key]
self.key_list.append(key)
self.get_all_keys(value)
def lower2upper(self, query):
self.get_all_keys(query)
query_list = str(query).split('\'')
for i, token in enumerate(query_list):
if token in self.key_list and token[0].isalpha():
token_list = re.split('([._])', token)
# for j, t in enumerate(token_list):
# if i != '.' or i != '_':
# token_list[j] = t.capitalize()
new_token = "".join(t.capitalize() for j, t in enumerate(token_list) if i != '.' or i != '_')
query_list[i] = new_token.replace('_', '')
return eval('\''.join(query_list))
if __name__ == '__main__':
q = {'$and': [{'centering': {'$eq': 'FaceCentered'}},
{'$or': [{'formula': {'$eq': 'Cu4'}}, {'complete_formula': {'$eq': 'Cu4'}}]}]}
lu = Lower2Upper()
print(q)
print(lu.lower2upper(q))
|
992,763 | 40619f6dfcc4a191df1acf3f5d4a589d89ba67dd | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Device.info'
db.add_column('csp_device', 'info', self.gf('django.db.models.fields.CharField')(default='sdfa', max_length=1000), keep_default=False)
def backwards(self, orm):
# Deleting field 'Device.info'
db.delete_column('csp_device', 'info')
models = {
'csp.device': {
'Meta': {'object_name': 'Device'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'external_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'intranet_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'safe': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'sn': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['csp']
|
992,764 | 404465f6769490f6ad55b73432302ea94b6ba793 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-10 19:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cricket_series', '0004_player_history_team'),
]
operations = [
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Player_Matchs_History',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('matches', models.IntegerField(blank=True)),
('run', models.IntegerField(blank=True)),
('highest', models.IntegerField(blank=True)),
('scores', models.IntegerField(blank=True)),
('fifties', models.IntegerField(blank=True)),
('hundreds', models.IntegerField(blank=True)),
],
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state_name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='team',
name='logoUri',
field=models.URLField(default=None, max_length=500),
),
migrations.AddField(
model_name='team',
name='points',
field=models.IntegerField(blank=True, default=None),
),
migrations.AlterField(
model_name='player_history',
name='jersey_number',
field=models.IntegerField(blank=True),
),
migrations.AddField(
model_name='player_matchs_history',
name='player',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cricket_series.Player_History'),
),
migrations.AddField(
model_name='match',
name='team_one',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='team_one', to='cricket_series.Team'),
),
migrations.AddField(
model_name='match',
name='team_two',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='team_two', to='cricket_series.Team'),
),
migrations.AddField(
model_name='match',
name='winner_team',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='winner_team', to='cricket_series.Team'),
),
migrations.AddField(
model_name='team',
name='club_state',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='cricket_series.State'),
),
]
|
992,765 | 60e918b99da8593b4d951fc992d77d0e1540c23b | #####################################
# File Name : PiDrone.py
# Author : NayLA
# Date : 27/03/2017
#####################################
import sys
import argparse , math
import time
import datetime
from TimeStampLib import *
from dronekit import connect , Command , LocationGlobal , VehicleMode
from pymavlink import mavutil
from UltrasonicSensor import *
import Adafruit_DHT
class DroneData:
global HmdTempSensor
global temperature
global humidity
global altitude
global latitude
global longitude
global sonar1_objdis
global sonar2_objdis
global sonar3_objdis
global sonar4_objdis
def __init__(self):
timestamp = TimeStamp()
#HudTempSensor= Adafruit_DHT.AM2302
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
#hud, temp = Adafruit_DHT.read_retry(sensor, 25)
self.temperature = 0
self.humidity = 0
self.altitude = 0
self.latitude = 0
self.longitude = 0
self.sonar1_objdis = 0
self.sonar2_objdis = 0
self.sonar3_objdis = 0
self.sonar4_objdis = 0
self.HudTempSensor = Adafruit_DHT.AM2302
def getTemperature(self):
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
self.humidity, self.temperature = Adafruit_DHT.read_retry(self.HudTempSensor, 25)
return self.temperature
def getHumidity(self):
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
self.humidity, self.temperature = Adafruit_DHT.read_retry(self.HudTempSensor, 25)
return self.humidity
def getAltitude(self):
##Add code here for getting altitude data from Pixhawk
return self.altitude
def getLatitude(self):
##Add code here for getting latitude data from Pixhawk
return self.latitude
def getLongitude(self):
##Add code here for getting longitude data from Pixhawk
return self.longitude
def getLocation(self):
return [(self.latitude,self.longitude)]
def getSonar1_ObsDistance(self):
return self.sonar1_objdis
def setSonar1_ObsDistance(self,objdis):
self.sonar1_objdis = objdis
def getSonar2_ObsDistance(self):
return self.sonar2_objdis
def setSonar2_ObsDistance(self,objdis):
self.sonar2_objdis = objdis
def getSonar3_ObsDistance(self):
return self.sonar3_objdis
def setSonar3_ObsDistance(self,objdis):
self.sonar3_objdis = objdis
def getSonar4_ObsDistance(self):
return self.sonar4_objdis
def setSonar4_ObsDistance(self,objdis):
self.sonar4_objdis = objdis
|
992,766 | 36831b60b14f26dff41d01e0c98628beabbf0d8f | class Solution:
def evalRPN(self, tokens: List[str]) -> int:
stack = []
for i in tokens:
if not stack:
stack.append(i)
continue
if i.lstrip("-").isdigit():
stack.append(i)
else:
b = int(stack.pop())
a = int(stack.pop())
if i == "+":
stack.append(str(a+b))
if i == "-":
stack.append(str(a-b))
if i == "*":
stack.append(str(a*b))
if i == "/":
stack.append(str(int(float(a)/b)))
return int(stack.pop()) |
992,767 | 67aa999e1e3ff0559384d817b6df91bf08ed9a09 | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
# write your code in Python
if len(A) == 0:
return ''
prefix = ''
for max_len in range(max([len(x) for x in A])):
prefix_test = A[0][0:max_len+1]
for x in A:
if x[0:max_len+1] != prefix_test:
return prefix
prefix = prefix_test
|
992,768 | a9ffb70688c05929b5c087c0174d5f4f8c991955 | #!/usr/miniconda/bin python3.8
# -*- encoding: utf-8 -*-
'''
@File : BT_system.py
@Time : 2021/10/14 23:18:32
@Author : Jinxing
@Version : 1.0
'''
from typing import Dict
import pandas as pd
import numpy as np
import torch as t
from torch import nn
from TechfinTorchAPI.dataloader.pandas_dataloader import PandasDataset
import sys,copy
sys.path.append('/root/persistence_data')
from logger import get_logger
class BtBase:
matrics: Dict
def __init__(self,
dataset: PandasDataset) -> None:
'''
可定制
'''
self.dataset = dataset
def register(self,
matrics: Dict):
self.matrics = matrics
def __call__(self,
model: nn.Module) -> Dict:
logger = get_logger(__name__)
result_data = self._test(model)
res = {}
for key, func in self.matrics.items():
try:
res[key] = func(result_data)
except:
logger.debug('An error happens in the {} evaluation'.format(key))
return res
def _test(self,
model: nn.Module) -> pd.DataFrame:
'''
数据的调用,预测与拼装。
期望返回一个dataframe,供TechfinTorchAPI.matrics.back_testing里的调用
'''
pass
class BTSystem(BtBase):
# 适用于截面模型(至少不需要滚动窗口)
def __init__(self,
dataset: PandasDataset) -> None:
self.dataset = dataset
def _test(self,
model: nn.Module) -> pd.DataFrame:
data = copy.deepcopy(self.dataset.data)
prediction = pd.DataFrame(np.zeros((len(data),1)), columns = ['prediction'], index = data.index)
iterator = iter(self.dataset)
while True:
try:
x = next(iterator)[0] # 因为我们默认任何时候第一个输出的就是x,只输出x避免multi-task的时候error
with t.no_grad():
pred = model(x).reshape(-1,1).cpu().numpy()
# 提取该batch下的index
item = self.dataset.item
index = self.dataset.index[item]
prediction.loc[index] = pred
except StopIteration:
break
return data.join(prediction)
|
992,769 | e1a120e6743b5d9d42853f40b09aae4615e758c2 | T = input()
for t in range(T):
N = input()
D = []
M = []
for i in range(N):
d,h,m = map(int, raw_input().split())
for j in range(h):
D += [d]
M += [m+j]
if len(D)==1:
ans = 0
elif len(D)==2:
t1 = M[0]*(360-D[0])
t2 = M[1]*(360-D[1])
if t1<t2:
if t2/M[0] + D[0]>=720:
ans = 1
else:
ans = 0
else:
if t1/M[1] + D[1]>=720:
ans = 1
else:
ans = 0
else:
ans = 0
print "Case #%s: %s"%(t+1, ans)
|
992,770 | d390f8572e87cc569d8f3c9387d629c36f317969 | import sys
input = sys.stdin.readline
N = int(input())
graph = [[] for _ in range(N)]
for _ in range(N - 1):
a, b = map(int, input().split())
a -= 1
b -= 1
graph[a].append(b)
graph[b].append(a)
c = list(map(int, input().split()))
c.sort(reverse = False)
tmp = len(graph[0])
start = 0
for index, x in enumerate(graph):
if len(x) > tmp:
tmp = len(x)
start = index
ans = 0
ans_lst = [-1] * N
ans_lst[start] = c.pop()
stack = [start]
while stack:
x = stack.pop()
for next_ in graph[x]:
if ans_lst[next_] == -1:
tmp = c.pop()
ans_lst[next_] = tmp
stack.append(next_)
ans += tmp
print (ans)
print (*ans_lst, sep = ' ')
|
992,771 | 0c31228fbec7cc62d605ef227c5f0f17cc5e6568 | import gQuery
import numpy as np
from MCUtils import * # FIXME: dangerous import
from astropy import wcs as pywcs
from astropy.io import fits as pyfits
import scipy.misc
import scipy.ndimage
from FileUtils import flat_filename
import gnomonic
import dbasetools as dbt
import galextools as gxt
def define_wcs(skypos,skyrange,width=False,height=False,verbose=0,
pixsz=0.000416666666666667):
"""Define the world coordinate system (WCS)."""
if verbose:
print_inline('Defining World Coordinate System (WCS).')
wcs = pywcs.WCS(naxis=2) # NAXIS = 2
imsz = gxt.deg2pix(skypos,skyrange)
wcs.wcs.cdelt = np.array([-pixsz,pixsz])
wcs.wcs.ctype = ['RA---TAN','DEC--TAN']
wcs.wcs.crpix = [(imsz[1]/2.)+0.5,(imsz[0]/2.)+0.5]
wcs.wcs.crval = skypos
return wcs
def movie_tbl(band,tranges,verbose=0,framesz=0,retries=20):
"""Initialize a FITS table to contain movie frame information."""
if verbose:
print_inline('Populating exposure time table.')
tstarts,tstops,exptimes=[],[],[]
for trange in tranges:
stepsz = framesz if framesz else trange[1]-trange[0]
steps = np.ceil((trange[1]-trange[0])/stepsz)
for i,t0 in enumerate(np.arange(trange[0],trange[1],stepsz)):
t1 = trange[1] if i==steps else t0+stepsz
tstarts.append(t0)
tstops.append(t1)
exptimes.append(dbt.compute_exptime(band,[t0,t1],
verbose=verbose,retries=retries))
col1 = pyfits.Column(name='tstart',format='E',array=np.array(tstarts))
col2 = pyfits.Column(name='tstop',format='E',array=np.array(tstops))
col3 = pyfits.Column(name='exptime',format='E',array=np.array(exptimes))
cols = pyfits.ColDefs([col1,col2,col3])
tbl = pyfits.new_table(cols)
return tbl
def fits_header(band,skypos,tranges,skyrange,width=False,height=False,
verbose=0,tscale=1000.,hdu=False,retries=20):
"""Populate a FITS header."""
if verbose:
print_inline('Populating FITS header.')
hdu = hdu if hdu else pyfits.PrimaryHDU()
wcs = define_wcs(skypos,skyrange,width=width,height=height)
hdu.header['CDELT1'],hdu.header['CDELT2'] = wcs.wcs.cdelt
hdu.header['CTYPE1'],hdu.header['CTYPE2'] = wcs.wcs.ctype
hdu.header['CRPIX1'],hdu.header['CRPIX2'] = wcs.wcs.crpix
hdu.header['CRVAL1'],hdu.header['CRVAL2'] = wcs.wcs.crval
#hdu.header['RA_CENT'],hdu.header['DEC_CENT'] = wcs.wcs.crval # Dupe.
hdu.header['EQUINOX'],hdu.header['EPOCH'] = 2000., 2000.
hdu.header['BAND'] = 1 if band=='NUV' else 2
# Do we want to set the following?
#hdu.header['OW'] = 1
#hdu.header['DIRECT'] = 1
#hdu.header['GRISM'] = 0
#hdu.header['OPAQUE'] = 0
# Put the total exposure time into the primary header
hdu.header['EXPTIME'] = 0.
for trange in tranges:
hdu.header['EXPTIME'] += dbt.compute_exptime(band,trange,
verbose=verbose,retries=retries)
if len(tranges)==1:
# Put the time range into the primary header for a single frame image
hdu.header['EXPSTART'],hdu.header['EXPEND'] = tranges[0]
# These are the proper keywords for this:
hdu.header['TIME-OBS'],hdu.header['TIME-END'] = tranges[0]
return hdu
def countmap(band,skypos,tranges,skyrange,width=False,height=False,
verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):
"""Create a count (cnt) map."""
imsz = gxt.deg2pix(skypos,skyrange)
count = np.zeros(imsz)
for trange in tranges:
# If memlight is requested, break the integration into
# smaller chunks.
step = memlight if memlight else trange[1]-trange[0]
for i in np.arange(trange[0],trange[1],step):
t0,t1=i,i+step
if verbose:
print_inline('Coadding '+str(t0)+' to '+str(t1))
events = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,
skyrange[0],skyrange[1]),
verbose=verbose,retries=retries)
# Check that there is actually data here.
if not events:
if verbose>1:
print "No data in "+str([t0,t1])
continue
times = np.array(events,dtype='float64')[:,0 ]/tscale
coo = np.array(events,dtype='float64')[:,1:]
# If there's no data, return a blank image.
if len(coo)==0:
if verbose:
print 'No data in this frame: '+str([t0,t1])
continue
# Define World Coordinate System (WCS)
wcs = define_wcs(skypos,skyrange,width=False,height=False)
# Map the sky coordinates onto the focal plane
foc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)
# Bin the events into actual image pixels
H,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,
bins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))
count += H
return count
def write_jpeg(filename,band,skypos,tranges,skyrange,width=False,height=False,
stepsz=1.,clobber=False,verbose=0,tscale=1000.,retries=20):
"""Write a 'preview' jpeg image from a count map."""
scipy.misc.imsave(filename,countmap(band,skypos,tranges,skyrange,
width=width,height=height,verbose=verbose,tscale=tscale,
retries=retries))
return
def rrhr(band,skypos,tranges,skyrange,width=False,height=False,stepsz=1.,
verbose=0,calpath='../cal/',tscale=1000.,response=True,hdu=False,
retries=20):
"""Generate a high resolution relative response (rrhr) map."""
imsz = gxt.deg2pix(skypos,skyrange)
# TODO the if width / height
flat = get_fits_data(flat_filename(band,calpath),verbose=verbose)
flatinfo = get_fits_header(flat_filename(band,calpath))
npixx,npixy = flat.shape
fltsz = flat.shape
pixsz = flatinfo['CDELT2']
detsize = 1.25
# Rotate the flat into the correct orientation to start.
flat = np.flipud(np.rot90(flat))
# NOTE: This upsample interpolation is done _last_ in the canonical
# pipeline as part of the poissonbg.c routine.
# The interpolation function is "congrid" in the same file.
# TODO: Should this be first order interpolation? (i.e. bilinear)
hrflat = scipy.ndimage.interpolation.zoom(flat,4.,order=0,prefilter=False)
img = np.zeros(hrflat.shape)[
hrflat.shape[0]/2.-imsz[0]/2.:hrflat.shape[0]/2.+imsz[0]/2.,
hrflat.shape[1]/2.-imsz[1]/2.:hrflat.shape[1]/2+imsz[1]/2.]
for trange in tranges:
t0,t1=trange
entries = gQuery.getArray(gQuery.aspect(t0,t1),retries=retries)
n = len(entries)
asptime = np.float64(np.array(entries)[:,2])/tscale
aspra = np.float32(np.array(entries)[:,3])
aspdec = np.float32(np.array(entries)[:,4])
asptwist= np.float32(np.array(entries)[:,5])
aspflags= np.float32(np.array(entries)[:,6])
asptwist= np.float32(np.array(entries)[:,9])
aspra0 = np.zeros(n)+skypos[0]
aspdec0 = np.zeros(n)+skypos[1]
xi_vec, eta_vec = gnomonic.gnomfwd_simple(
aspra,aspdec,aspra0,aspdec0,-asptwist,1.0/36000.,0.)
col = 4.*( ((( xi_vec/36000.)/(detsize/2.)*(detsize/(fltsz[0]*pixsz)) + 1.)/2. * fltsz[0]) - (fltsz[0]/2.) )
row = 4.*( (((eta_vec/36000.)/(detsize/2.)*(detsize/(fltsz[1]*pixsz)) + 1.)/2. * fltsz[1]) - (fltsz[1]/2.) )
vectors = rotvec(np.array([col,row]),-asptwist)
for i in range(n):
if verbose>1:
print_inline('Stamping '+str(asptime[i]))
# FIXME: Clean this mess up a little just for clarity.
img += scipy.ndimage.interpolation.shift(scipy.ndimage.interpolation.rotate(hrflat,-asptwist[i],reshape=False,order=0,prefilter=False),[vectors[1,i],vectors[0,i]],order=0,prefilter=False)[hrflat.shape[0]/2.-imsz[0]/2.:hrflat.shape[0]/2.+imsz[0]/2.,hrflat.shape[1]/2.-imsz[1]/2.:hrflat.shape[1]/2+imsz[1]/2.]*dbt.compute_exptime(band,[asptime[i],asptime[i]+1],verbose=verbose,retries=retries)*gxt.compute_flat_scale(asptime[i]+0.5,band,verbose=0)
return img
# TODO: tranges?
# TODO: Consolidate duplicate "reference array" code from aperture_response
# FIXME: DEPRECATED!!*!
#def backgroundmap(band,skypos,trange,skyrange,width=False,height=False,
# tscale=1000,memlight=False,verbose=0,hdu=False,NoData=-999,
# detsize=1.25,pixsz=0.000416666666666667,
# maglimit=23.,retries=20):
# """Generate a background (bg) map by masking out MCAT sources."""
# imsz = gxt.deg2pix(skypos,skyrange)
#
# if verbose:
# print 'Integrating count map.'
# img = countmap(band,skypos,[trange],skyrange,width=width,height=height,verbose=verbose,memlight=memlight,retries=retries)
#
# # Build a reference array
# xind = np.array([range(int(imsz[1]))]*int(imsz[0]))-(imsz[0]/2.)+0.5
# yind = np.rot90(np.array([range(int(imsz[0]))]*int(imsz[1]))-(imsz[1]/2.))+0.5
# # This returns too many sources so
# # TODO: add some kind of crossmatch to filter duplicate sources
# # or just use GCAT
# sources = gQuery.getArray(gQuery.mcat_sources(band,skypos[0],skypos[1],skrange[0]/2. if skyrange[0]>skyrange[1] else skyrange[1]/2.,maglimit=maglimit),retries=retries)
#
# if verbose:
# print 'Masking '+str(len(sources))+' sources. '
#
# source_ra = np.float32(np.array(sources)[:,0])
# source_dec = np.float32(np.array(sources)[:,1])
# source_fwhm = np.float32(np.array(sources)[:,7:9])
# ra0 = np.zeros(len(sources))+skypos[0]
# dec0 = np.zeros(len(sources))+skypos[1]
#
# xi_vec, eta_vec = gnomfwd_simple(ra0,dec0,source_ra,source_dec,np.zeros(len(sources)),1.0/36000.,0.)
# col = (((( xi_vec/36000.)/(detsize/2.)*(detsize/(3840.*pixsz))+1.)/2.*3840.)-(3840./2.)+0.5)
# row = ((((eta_vec/36000.)/(detsize/2.)*(detsize/(3840.*pixsz))+1.)/2.*3840.)-(3840./2.)+0.5)
#
# vectors = rotvec(np.array([col,row]),np.zeros(len(sources)))
#
# for i in range(len(sources)):
# distarray = np.sqrt(((-vectors[0,i]-xind)**2.)+((vectors[1,i]-yind)**2.))
# ix = np.where(distarray<=(source_fwhm[i,0] if source_fwhm[i,0]>source_fwhm[i,1] else source_fwhm[i,1])/pixsz)
# img[ix] = NoData
#
# return img
def movie(band,skypos,tranges,skyrange,framesz=0,width=False,height=False,
verbose=0,tscale=1000.,memlight=False,coadd=False,
response=False,calpath='../cal/',hdu=False,retries=20):
"""Generate a movie (mov)."""
# Not defining stepsz effectively creates a count map.
mv = []
rr = []
if coadd:
if verbose>2:
print 'Coadding across '+str(tranges)
mv.append(countmap(band,skypos,tranges,skyrange,width=width,
height=height,verbose=verbose,tscale=tscale,memlight=memlight,
hdu=hdu,retries=retries))
rr.append(rrhr(band,skypos,tranges,skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,hdu=hdu,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))
else:
for trange in tranges:
stepsz = framesz if framesz else trange[1]-trange[0]
steps = np.ceil((trange[1]-trange[0])/stepsz)
for i,t0 in enumerate(np.arange(trange[0],trange[1],stepsz)):
if verbose>1:
print_inline('Movie frame '+str(i+1)+' of '+str(int(steps)))
t1 = trange[1] if i==steps else t0+stepsz
mv.append(countmap(band,skypos,[[t0,t1]],skyrange,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,hdu=hdu,retries=retries))
# FIXME: This should not create an rr unless it's requested...
rr.append(rrhr(band,skypos,[[t0,t1]],skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))
return np.array(mv),np.array(rr)
def create_images(band,skypos,tranges,skyrange,framesz=0,width=False,height=False,verbose=0,tscale=1000.,memlight=False,coadd=False,response=False,calpath='../cal/',hdu=False,retries=20):
count,rr=movie(band,skypos,tranges,skyrange,framesz=framesz,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,coadd=coadd,response=response,calpath=calpath,hdu=hdu,retries=retries)
intensity = []
for i in range(count.shape[0]):
int_temp = count[i]/rr[i]
cut = np.where(np.isfinite(int_temp))
int_clean = np.zeros(int_temp.shape)
int_clean[cut]=int_temp[cut]
intensity.append(int_clean.tolist())
return np.array(count),np.array(rr),np.array(intensity)
def write_images(band,skypos,tranges,skyrange,write_cnt=False,write_int=False,write_rr=False,framesz=0,width=False,height=False,verbose=0,tscale=1000.,memlight=False,coadd=False,response=False,calpath='../cal/',clobber=False,retries=20):
"""Generate a write various maps to files."""
# No files were requested, so don't bother doing anything.
if not (write_cnt or write_int or write_rr):
return
count,rr,intensity=create_images(band,skypos,tranges,skyrange,framesz=framesz,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,coadd=coadd,response=response,calpath=calpath,retries=retries)
# Add a conditional so that this is only created for multi-frame images
tbl = movie_tbl(band,tranges,framesz=framesz,verbose=verbose,retries=retries)
if write_cnt:
hdu = pyfits.PrimaryHDU(count)
hdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)
hdulist = pyfits.HDUList([hdu,tbl])
if verbose:
print 'Writing count image to '+str(write_cnt)
hdulist.writeto(write_cnt,clobber=clobber)
if write_rr:
hdu = pyfits.PrimaryHDU(rr)
hdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)
hdulist = pyfits.HDUList([hdu,tbl])
if verbose:
print 'Writing response image to '+str(write_rr)
hdulist.writeto(write_rr,clobber=clobber)
if write_int:
hdu = pyfits.PrimaryHDU(intensity)
hdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)
hdulist = pyfits.HDUList([hdu,tbl])
if verbose:
print 'Writing intensity image to '+str(write_int)
hdulist.writeto(write_int,clobber=clobber)
return
|
992,772 | e5a6a0d7de48a4f17d4499b0393aa4283ef21c06 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 10 20:51:08 2020
@author: SivaniDwarampudi
"""
from random import randint
import time
"""
This is use for create 30 file one by one in each 5 seconds interval.
These files will store content dynamically from 'lorem.txt' using below code
"""
def main():
a = 1
with open('data.txt', 'r') as file: # reading content from 'lorem.txt' file
lines = file.readlines()
while a <= 30:
totalline = len(lines)
linenumber = randint(0, totalline - 10)
with open('destination.txt'.format(a), 'w') as writefile:
writefile.write(' '.join(line for line in lines[linenumber:totalline]))
print('creating file log{}.txt'.format(a))
a += 1
time.sleep(5)
if __name__ == '__main__':
main()
|
992,773 | c66c60fc441e3b5c1eff893360ba405f5ce1c348 | from django.conf import LazySettings, global_settings
from django.contrib.admin.helpers import Fieldline, AdminField, mark_safe
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth import models as auth_models
from django.core.urlresolvers import RegexURLResolver, NoReverseMatch
from django.db.models.fields import AutoField
from django.db.models.query import QuerySet
from django.forms import BaseForm
from django.forms.models import BaseModelForm, InlineForeignKeyField, \
construct_instance, NON_FIELD_ERRORS
from django.template.response import TemplateResponse
from django.test.client import ClientHandler, RequestFactory, MULTIPART_CONTENT, \
urlparse, FakePayload
from django.test.utils import ContextList
from monkeypatch import before, after, patch
from pprint import PrettyPrinter
import django.template.loader
# import os
# os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
@patch(ClientHandler, 'get_response')
def get_response_with_exception_passthru(original_function, self, request):
"""
Returns an HttpResponse object for the given HttpRequest. Unlike
the original get_response, this does not catch exceptions, which
allows you to see the full stack trace in your tests instead of
a 500 error page.
"""
# print("get_response(%s)" % request)
from django.core import exceptions, urlresolvers
from django.conf import settings
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and the render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
"""
try:
response.render()
except Exception as e:
if isinstance(response, TemplateResponse):
raise Exception("Failed to render template: %s: %s" %
(response.template_name, e))
"""
# Reset URLconf for this thread on the way out for complete
# isolation of request.urlconf
urlresolvers.set_urlconf(None)
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
return response
def dont_apply_response_fixes(original_function, self, request, response):
"""
It doesn't make any sense to rewrite location headers in tests,
because the test client doesn't know or care what hostname is
used in a request, so it could change in future without breaking
most people's tests, EXCEPT tests for redirect URLs!
"""
return response
# patch(ClientHandler, 'apply_response_fixes', dont_apply_response_fixes)
@patch(QuerySet, 'get')
def queryset_get_with_exception_detail(original_function, self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments. This version provides extra details about the query
if it fails to find any results.
"""
try:
return original_function(self, *args, **kwargs)
except self.model.DoesNotExist as e:
raise self.model.DoesNotExist("%s (query was: %s, %s)" %
(e, args, kwargs))
@patch(RequestFactory, 'post')
def post_with_string_data_support(original_function, self, path, data={},
content_type=MULTIPART_CONTENT, **extra):
"""If the data doesn't have an items() method, then it's probably already
been converted to a string (encoded), and if we try again we'll call
the nonexistent items() method and fail, so just don't encode it at
all."""
if content_type == MULTIPART_CONTENT and getattr(data, 'items', None) is None:
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'POST',
'wsgi.input': FakePayload(data),
}
r.update(extra)
return self.request(**r)
else:
return original_function(self, path, data, content_type, **extra)
@patch(BaseModelForm, '_post_clean')
def post_clean_with_simpler_validation(original_function, self):
"""
Until https://code.djangoproject.com/ticket/16423#comment:3 is implemented,
patch it in ourselves: do the same validation on objects when called
from the form, as the object would do on itself.
"""
opts = self._meta
# Update the model instance with self.cleaned_data.
# print "construct_instance with password = %s" % self.cleaned_data.get('password')
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
# print "constructed instance with password = %s" % self.instance.password
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for f_name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(f_name)
from django.core.exceptions import ValidationError
# Clean the model instance's fields.
try:
self.instance.full_clean(exclude)
except ValidationError, e:
self._update_errors(e.update_error_dict(None))
@patch(BaseForm, '_clean_form')
def clean_form_with_field_errors(original_function, self):
"""
Allow BaseForm._clean_form to report errors on individual fields,
instead of the whole form, like this:
raise ValidationError({'password': 'Incorrect password'})
The standard version only works on the whole form:
https://code.djangoproject.com/ticket/16423
"""
from django.core.exceptions import ValidationError
try:
self.cleaned_data = self.clean()
except ValidationError, e:
if hasattr(e, 'message_dict'):
for field, error_strings in e.message_dict.items():
self._errors[field] = self.error_class(error_strings)
else:
self._errors[NON_FIELD_ERRORS] = self.error_class(e.messages)
pp = PrettyPrinter()
@patch(RegexURLResolver, 'reverse')
def reverse_with_debugging(original_function, self, lookup_view, *args, **kwargs):
"""
Show all the patterns in the reverse_dict if a reverse lookup fails,
to help figure out why.
"""
try:
return original_function(self, lookup_view, *args, **kwargs)
except NoReverseMatch as e:
# if the function is a callable, it might be a wrapper
# function which isn't identical (comparable) to another
# wrapping of the same function
# import pdb; pdb.set_trace()
if lookup_view in self.reverse_dict:
raise NoReverseMatch(str(e) + (" Possible match: %s" %
(self.reverse_dict[lookup_view],)))
else:
if callable(lookup_view):
raise NoReverseMatch(str(e) + "\n" +
("No such key %s in %s\n\n" % (lookup_view,
[k for k in self.reverse_dict.keys() if callable(k)])) +
("Complete reverse map: %s\n" % pp.pformat(self.reverse_dict)))
else:
raise NoReverseMatch(str(e) + "\n" +
("No such key %s in %s\n" % (lookup_view,
[k for k in self.reverse_dict.keys() if not callable(k)])) +
("Complete reverse map: %s\n" % pp.pformat(self.reverse_dict)))
if '_reverse_with_prefix' in dir(RegexURLResolver):
# support for Django 1.4:
patch(RegexURLResolver, '_reverse_with_prefix', reverse_with_debugging)
@after(RegexURLResolver, '_populate')
def populate_reverse_dict_with_module_function_names(self):
from django.utils.translation import get_language
language_code = get_language()
reverse_dict = self._reverse_dict[language_code]
for pattern in reversed(self.url_patterns):
if not isinstance(pattern, RegexURLResolver):
# import pdb; pdb.set_trace()
for reverse_item in reverse_dict.getlist(pattern.callback):
function_name = "%s.%s" % (pattern.callback.__module__,
pattern.callback.__name__)
reverse_dict.appendlist(function_name, reverse_item)
class FieldlineWithCustomReadOnlyField(object):
"""
Custom replacement for Fieldline that allows fields in the Admin
interface to render their own read-only view if they like.
"""
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__"):
self.fields = [field]
else:
self.fields = field
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
from admin import CustomAdminReadOnlyField
yield CustomAdminReadOnlyField(self.form, field, is_first=(i == 0),
model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe(u'\n'.join([self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields]).strip('\n'))
django.contrib.admin.helpers.Fieldline = FieldlineWithCustomReadOnlyField
from django.db.backends.creation import BaseDatabaseCreation
# @patch(BaseDatabaseCreation, 'destroy_test_db')
def destroy_test_db_disabled(original_function, self, test_database_name,
verbosity):
"""
Temporarily disable the deletion of a test database, for post-mortem
examination.
"""
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
print("Not destroying test database for alias '%s' (%s)..." % (
self.connection.alias, test_database_name))
if not hasattr(auth_models.Group, 'natural_key'):
"""
Allow group lookups by name in fixtures, until
https://code.djangoproject.com/ticket/13914 lands.
"""
from django.db import models as db_models
class GroupManagerWithNaturalKey(db_models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
# print "auth_models.Group.objects = %s" % auth_models.Group.objects
del auth_models.Group._default_manager
GroupManagerWithNaturalKey().contribute_to_class(auth_models.Group, 'objects')
def group_natural_key(self):
return (self.name,)
auth_models.Group.natural_key = group_natural_key
def Deserializer_with_debugging(original_function, object_list, **options):
from django.core.serializers.python import _get_model
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import smart_unicode
from django.conf import settings
print "loading all: %s" % object_list
db = options.pop('using', DEFAULT_DB_ALIAS)
db_models.get_apps()
for d in object_list:
print "loading %s" % d
# Look up the model and starting build a dict of data for it.
Model = _get_model(d["model"])
data = {Model._meta.pk.attname : Model._meta.pk.to_python(d["pk"])}
m2m_data = {}
# Handle each field
for (field_name, field_value) in d["fields"].iteritems():
if isinstance(field_value, str):
field_value = smart_unicode(field_value, options.get("encoding", settings.DEFAULT_CHARSET), strings_only=True)
field = Model._meta.get_field(field_name)
# Handle M2M relations
if field.rel and isinstance(field.rel, db_models.ManyToManyRel):
print " field = %s" % field
print " field.rel = %s" % field.rel
print " field.rel.to = %s" % field.rel.to
print " field.rel.to._default_manager = %s" % (
field.rel.to._default_manager)
print " field.rel.to.objects = %s" % (
field.rel.to.objects)
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
def m2m_convert(value):
if hasattr(value, '__iter__'):
return field.rel.to._default_manager.db_manager(db).get_by_natural_key(*value).pk
else:
return smart_unicode(field.rel.to._meta.pk.to_python(value))
else:
m2m_convert = lambda v: smart_unicode(field.rel.to._meta.pk.to_python(v))
m2m_data[field.name] = [m2m_convert(pk) for pk in field_value]
for i, pk in enumerate(field_value):
print " %s: converted %s to %s" % (field.name,
pk, m2m_data[field.name][i])
result = original_function(object_list, **options)
print " result = %s" % result
import traceback
traceback.print_stack()
return result
# patch(django.core.serializers.python, 'Deserializer',
# Deserializer_with_debugging)
def save_with_debugging(original_function, self, save_m2m=True, using=None):
print "%s.save(save_m2m=%s, using=%s)" % (self, save_m2m, using)
original_function(self, save_m2m, using)
# patch(django.core.serializers.base.DeserializedObject, 'save',
# save_with_debugging)
def ContextList_keys(self):
keys = set()
for subcontext in self:
for dict in subcontext:
keys |= set(dict.keys())
return keys
ContextList.keys = ContextList_keys
def configure_with_debugging(original_function, self,
default_settings=global_settings, **options):
print "LazySettings configured: %s, %s" % (default_settings, options)
import traceback
traceback.print_stack()
return original_function(self, default_settings, **options)
# patch(LazySettings, 'configure', configure_with_debugging)
def setup_with_debugging(original_function, self):
print "LazySettings setup:"
import traceback
traceback.print_stack()
return original_function(self)
# patch(LazySettings, '_setup', setup_with_debugging)
# before(ChangeList, 'get_results')(breakpoint)
# @before(ChangeList, 'get_results')
"""
def get_results_with_debugging(self, request):
print "get_results query = %s" % object.__str__(self.query_set.query)
"""
# from django.forms.forms import BoundField
# before(BoundField, 'value')(breakpoint)
# Until a patch for 6707 lands: https://code.djangoproject.com/ticket/6707
"""
from django.db.models.fields.related import ReverseManyRelatedObjectsDescriptor
def related_objects_set_without_clear(original_function, self, instance,
new_values):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
old_values = manager.all()
values_to_remove = [v for v in old_values
if v not in new_values]
manager.remove(*values_to_remove)
patch(ReverseManyRelatedObjectsDescriptor, '__set__',
related_objects_set_without_clear)
"""
def AutoField_to_python_with_improved_debugging(original_function, self, value):
try:
return original_function(self, value)
except (TypeError, ValueError):
from django.core.exceptions import ValidationError
raise ValidationError(self.error_messages['invalid'] +
": %s.%s is not allowed to have value '%s'" %
(self.model, self.name, value))
# print "before patch: IntranetUser.id.to_python = %s" % IntranetUser.id.to_python
patch(AutoField, 'to_python', AutoField_to_python_with_improved_debugging)
# print "after patch: IntranetUser.id.to_python = %s" % IntranetUser.id.to_python
# Show the filename that contained the template error
"""
@patch(django.template.loader, 'render_to_string')
def template_loader_render_to_string_with_debugging(original_function,
template_name, dictionary=None, context_instance=None):
try:
return original_function(template_name, dictionary, context_instance)
except Exception as e:
import sys
raise Exception, "Failed to render template: %s: %s" % \
(template_name, e), sys.exc_info()[2]
"""
# Show the filename that contained the template error
@patch(django.template.base.Template, 'render')
def template_render_with_debugging(original_function, self, context):
try:
return original_function(self, context)
except Exception as e:
import sys
raise Exception, "Failed to render template: %s: %s" % \
(self.name, e), sys.exc_info()[2]
@patch(django.template.defaulttags.URLNode, 'render')
def urlnode_render_with_debugging(original_function, self, context):
if not self.view_name.resolve(context):
raise Exception(("Failed to resolve %s in context: did you " +
"forget to enclose view name in quotes? Context is: %s") %
(self.view_name, context))
try:
return original_function(self, context)
except NoReverseMatch as e:
raise Exception(("Failed to reverse %s in context %s (did you " +
"forget to enclose view name in quotes?): the exception was: %s") %
(self.view_name, context, e))
from django.db.models.fields import DateTimeField
@before(DateTimeField, 'get_prep_value')
def DateTimeField_get_prep_value_check_for_naive_datetime(self, value):
value = self.to_python(value)
from django.conf import settings
from django.utils import timezone
if value is not None and settings.USE_TZ and timezone.is_naive(value):
raise ValueError(("DateTimeField %s.%s received a " +
"naive datetime (%s) while time zone support is " +
"active.") % (self.model.__name__, self.name, value))
from django.template.base import Variable
@patch(Variable, '__init__')
def Variable_init_with_underscores_allowed(original_function, self, var):
from django.conf import settings
# for security reasons, production deployments are not allowed to
# render variable names containing underscores anyway.
if not settings.DEBUG:
return original_function(self, var)
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
from django.utils.text import unescape_string_literal
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
"""
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
"""
from django.template.base import VARIABLE_ATTRIBUTE_SEPARATOR
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
# temporary patch for https://code.djangoproject.com/ticket/16955
from django.db.models.sql.query import Query
@before(Query, 'add_filter')
def add_filter_add_value_capture(self, filter_expr, *args, **kwargs):
arg, value = filter_expr
self._captured_value_for_monkeypatch = value
@after(Query, 'add_filter')
def add_filter_remove_value_capture(self, value, *args, **kwargs):
delattr(self, '_captured_value_for_monkeypatch')
@patch(Query, 'setup_joins')
def setup_joins_with_value_type_check(original_function, self, *args, **kwargs):
results = original_function(self, *args, **kwargs)
value = getattr(self, '_captured_value_for_monkeypatch', None)
from users.models import Price
# if results[0].model == Price:
# import pdb; pdb.set_trace()
if value:
field = results[0]
target = results[1]
from django.db.models.fields.related import RelatedField
from django.db.models import Model
if (isinstance(field, RelatedField) and isinstance(value, Model) and
not isinstance(value, target.model)):
raise TypeError, "'%s' instance expected" % target.model._meta.object_name
return results
from django.contrib.auth.forms import ReadOnlyPasswordHashField
@patch(ReadOnlyPasswordHashField, 'bound_data')
def bound_data_with_bug_19611_patch(original_function, self, data, initial):
"""
This widget has no fields, so data will always be None, so return
the initial value always.
"""
return initial
|
992,774 | f4a2046cbd9a099dc93a80667befa2b0591fcd70 |
# csdaiwei@foxmail.com
# naive bayes classifier (multinomial, bernoulli)
import pdb
import numpy as np
from time import time
from math import log
from math import factorial as ftl
class NaiveBayes:
def __init__(self, model = 'multinomial'):
self.labels = [] #unique class labels
self.classprob = {} #prior probilities, classprob[yi] indicates p(yi)
self.condprob = {} #conditonal probilities, condprob[yi][xi] indicates p(xi|yi)
self.model = model
assert model in ['multinomial', 'bernoulli']
def fit(self, X, Y):
#class labels
self.labels = list(set(Y))
#class probilities
for l in self.labels:
self.classprob[l] = (Y == l).sum() / float(len(Y))
#conditional probilities
if self.model == 'multinomial':
for l in self.labels:
self.condprob[l] = (X[Y==l].sum(axis=0) + 1)/ float(X[Y==l].sum()+ len(X[0])) #add one smooth
if self.model == 'bernoulli':
for l in self.labels:
self.condprob[l] = np.array([(((X[Y==l])[:, i] > 0).sum() + 1) for i in xrange(0, (X[Y==l]).shape[1])]) #numerator
self.condprob[l] = self.condprob[l]/float((Y==l).sum() + X.shape[1]) #denominator, add one smooth
def predict(self, X):
p = []
for x in X:
px = np.array([self.__logposterior(x, y) for y in self.labels])
p.append(self.labels[px.argmax()])
return np.array(p)
def __logposterior(self, x, y): #log(p(x|y) * p(y)) (common denominator omitted)
p = log(self.classprob[y])
if self.model == 'multinomial':
#p += log(float(ftl(x.sum()))/reduce(lambda x,y:x*y, map(ftl, x))) #discard by long int to float overflow
for i in xrange(0, len(x)):
if x[i] > 0:
p += log(self.condprob[y][i]) * x[i]
return p
if self.model == 'bernoulli':
for i in xrange(0, len(x)):
if x[i] > 0:
p += log(self.condprob[y][i])
else :
p += log(1 - self.condprob[y][i])
return p
if __name__ == '__main__':
train = np.load('train.npz')
test = np.load('test.npz')
train_samples_full, train_labels = train['samples'], train['labels']
test_samples_full, test_labels = test['samples'], test['labels']
for model in ['multinomial', 'bernoulli']:
print '\n%s model'%model
for dim in [50, 100, 200, 500, 1000, 2000, 5000, 10000]:
train_samples = train_samples_full[:, 0:dim]
test_samples = test_samples_full[:, 0:dim]
start = time()
nb = NaiveBayes(model=model)
nb.fit(train_samples, train_labels)
nb_predicts = nb.predict(test_samples)
print 'naivebayes dim:%d\taccu:%.4f, time:%4f'%(dim, (nb_predicts == test_labels).mean(), time() - start)
pdb.set_trace()
|
992,775 | a0761b30df9296e0fdaf3971cee55984ee05d9aa | import sys
import time
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from pyvirtualdisplay import Display
import scrapy
NEXT_WAIT_TIMEOUT = 120
MAX_RETRY = 3
class BrowserRobot(object):
"""
provide interface for spiders to scrape pages that needs to "manual" click
"""
def __init__(self, profile_preference=None):
self.display = Display(visible=0, size=(1024, 768))
profile = webdriver.FirefoxProfile()
if profile_preference:
for key in profile_preference:
profile.set_preference(key, profile_preference[key])
self.display.start()
self.browser = webdriver.Firefox(profile)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def stop(self):
self.browser.quit()
self.display.stop()
def load(self, url):
self.browser.get(url)
return scrapy.Selector(text=self.browser.page_source)
def load_until(self, url, until):
"""
wait until some specified element is loaded by xpath
:param url:
:param until:
:return:
"""
self.browser.get(url)
WebDriverWait(self.browser, NEXT_WAIT_TIMEOUT) \
.until(EC.element_to_be_clickable((By.XPATH, until)))
return scrapy.Selector(text=self.browser.page_source)
def explore(self, start_url, scrape_func, next_xpath, max_count=-1):
try:
self.browser.get(start_url)
old_page = self.browser.page_source
counter = 0
while True:
# refer to the following blog for wait trick:
# http://www.obeythetestinggoat.com/how-to-get-selenium-to-wait-for-page-load-after-a-click.html
WebDriverWait(self.browser, NEXT_WAIT_TIMEOUT) \
.until(EC.element_to_be_clickable((By.XPATH, next_xpath)))
# always sleep for a while to be polite
time.sleep(0.3)
if old_page == self.browser.page_source or \
(max_count != -1 and counter >= max_count):
break
else:
old_page = self.browser.page_source
counter += 1
response = scrapy.Selector(text=self.browser.page_source)
yield scrape_func(response)
next_elem = self.browser.find_element_by_xpath(next_xpath)
cnt = 0
while cnt < MAX_RETRY:
try:
ActionChains(self.browser).move_to_element(next_elem).click().perform()
break
except WebDriverException as we:
time.sleep(1)
cnt += 1
except TimeoutException as te:
sys.stderr.write("Fail to wait for page to be loaded. Error:{}\n".format(te))
except Exception as oe:
sys.stderr.write("unexpected exception:{}".format(oe))
import traceback
traceback.print_exc()
raise
def uniq(elements):
"""
return unique elements with original order
:param elements:
:return:
"""
us = set()
ret = []
for e in elements:
if e not in us:
ret.append(e)
us.add(e)
return ret
|
992,776 | 0d7cfa7118fd46e5abca25e7e6ad4f3c454bda1e |
from collections import Counter
import operator
import numpy as np
import re
from sklearn.model_selection import train_test_split
def get_vocabs_chars(data):
# Gets the list of words and characters in the dataset
all = [word for sent in data for word in sent]
chars = list(set(char for word in all for char in word))
dictionary = Counter([word for word in all])
words, counts = zip(*sorted(dictionary.items(), key=operator.itemgetter(1), reverse=True))
vocab = list(words) + ["<unk>", "<pad>"]
return vocab, chars, max(len(word) for word in dictionary.keys()), max(len(sent) for sent in data)
def tags_to_id(tags, unique_tags):
# converts the NER tags to numbers
dictionary = {tag: idx for idx, tag in enumerate(unique_tags)}
for item in range(len(tags)):
for tag in range(len(tags[item])):
tags[item][tag] = dictionary[tags[item][tag]]
return tags
def words_to_id(vocab, data):
# converts the words to their index according to generated vocab list
dictionary = {word: idx for idx, word in enumerate(vocab)}
for sent in range(len(data)):
for word in range(len(data[sent])):
try:
data[sent][word] = dictionary[data[sent][word][0]]
except Exception:
data[sent][word] = dictionary["<unk>"]
return data
def read_embedding(vocab, file):
# reads an embedding file and return a dictionary of word: vector
with open(file, 'r') as file:
vectors = dict()
for line in file:
tokens = line.split()
vec = np.array(tokens[len(tokens) - 300:], dtype=np.float32)
token = "".join(tokens[:len(tokens) - 300])
vectors[token] = vec
unk_embedding = np.random.rand(300) * 2. - 1.
embedding = dict()
for v in vocab:
try:
embedding[v] = vectors[v]
except Exception:
# if the word is not in the embeddings, use the random vector
embedding[v] = unk_embedding
return np.array(list(embedding.values()))
def just_batch(corpus):
# changes the input list to a list of batches
batches = []
for idx in range(len(corpus) // 100 + 1):
text_batch = corpus[idx * 100: min((idx + 1) * 100, len(corpus))]
batches.append(np.array([np.array(line) for line in text_batch]))
return batches
def get_batches(corpus, labels=None, max_length=None, pad_idx=None):
# changes two lists of inputs to batches with paddings
batches = []
for idx in range(len(corpus) // 100 + 1):
text_batch = corpus[idx * 100: min((idx + 1) * 100, len(corpus))]
labels_batch = np.array(labels[idx * 100: min((idx + 1) * 100,
len(labels))] if labels is not None else [])
if pad_idx:
lengths = np.array([len(line) for line in text_batch])
text_batch = add_padding(text_batch, pad_idx, max_length)
labels_batch = add_padding(labels_batch, -1, max_length)
batches.append((np.array([np.array(line) for line in text_batch]), lengths, np.array([np.array(label) for label in labels_batch])))
return batches
def add_padding(corpus, pad_idx, max_length):
# adds padding to a dataset according to the padding token and max length
for i in range(len(corpus)):
corpus[i] = corpus[i][:min(len(corpus[i]), max_length) - 1]
while len(corpus[i]) < max_length:
corpus[i].append(pad_idx)
return corpus
def clean(sentences):
for sent in range(len(sentences)):
for word in range(len(sentences[sent])):
sentences[sent][word] = sentences[sent][word].lower()
return sentences
def get_features(sent, idx, POS, POS2):
# extracts features of a word in the sentence
word = sent[idx][0]
try:
features = {
"first": int(idx == 0),
"last": int(idx == len(sent)),
"upper": int(word.isupper()),
"title": int(word.istitle()),
"POS": POS[sent[idx][1]],
"POS2": POS2[sent[idx][2]],
"digit": int(word.isdigit()),
"hasdigit": int(re.match(".*\\d+.*", word) is not None),
"hasnonalpha": int(re.match(".*[^a-zA-Z].*", word) is not None),
"padding": 0
}
vec = np.array(list(features.values()))
except Exception:
pass
vec = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 0])
return vec / sum(vec)
def ids_to_char(chars, vocab, data, max, sent_max):
# generated the list of character ids
new_data = list()
lengths = list()
padding = [len(chars)+1 for i in range(max)]
for sent in range(len(data)):
new_data.append(list())
lengths.append(list())
for word in range(len(data[sent])):
my_word = data[sent][word][0]
temp = []
lengths[sent].append(min(len(my_word), max))
for l in list(my_word):
try:
temp.append(chars.index(l))
except Exception:
temp.append(len(chars)+1)
while len(temp) < max:
temp.append(len(chars)+1)
temp = temp[:max]
new_data[sent].append(temp)
while(len(new_data[sent]) < sent_max):
new_data[sent].append(padding)
lengths[sent].append(0)
new_data[sent] = np.array([np.array(x) for x in new_data[sent]])
return np.array([np.array(d) for d in new_data]), np.array([np.array(l) for l in lengths]) |
992,777 | 5e04d1bf6848767b91e68cfd6ce3338611ab168f | from django.contrib import admin
from slotting.models import Vendor
from slotting.models import MarketDay
from slotting.models import Stall
from profiles.models import VendorProfile
from slotting.models import Assignment
class VendorProfileInline(admin.StackedInline):
model = VendorProfile
class VendorAdmin(admin.ModelAdmin):
inlines = (VendorProfileInline,)
class StallAdmin(admin.ModelAdmin):
model = Stall
list_display = ('building', 'section', 'stall_number')
ordering = ('building', 'section', 'stall_number')
admin.site.register(Vendor, VendorAdmin)
admin.site.register(MarketDay)
admin.site.register(Stall, StallAdmin)
admin.site.register(Assignment)
|
992,778 | 792e21dffd7a7e0feaaeb8b7c5b98cc44ad517d2 | import math
import pytest
from AutoDiff import autodiff as ad
from AutoDiff import *
################# Test functions #####################
def test_negation():
ad1 = ad.AD_eval('-x', "x", 2)
assert (ad1.derivative('x'), ad1.val) == (-1, -2)
def test_add():
ad1 = ad.AD_eval('x + 2', "x", 2)
assert (ad1.derivative('x'), ad1.val) == (1,4)
def test_add2():
ad1 = ad.AD_eval('x + x', "x", 2)
assert (ad1.derivative('x'), ad1.val) == (2,4)
def test_add3():
x = ad.AD_Object(2, "x", 1)
y = ad.AD_Object(3, "y", 1)
f = 2*x**2 + y
assert (f.derivative('x'), f.derivative('y'), f.val) == (8, 1, 11)
def test_radd():
ad1 = ad.AD_eval('2 + x', "x", 2)
assert (ad1.derivative('x'), ad1.val) == (1,4)
def test_sub():
ad1 = ad.AD_eval('x - 2', "x", 2)
assert (ad1.derivative('x'), ad1.val) == (1,0)
def test_sub2():
ad1 = ad.AD_eval('x - 2*x', "x", 2)
assert (ad1.derivative('x'), ad1.val) == (-1,-2)
def test_sub3():
x = ad.AD_Object(2, "x", 1)
y = ad.AD_Object(3, "y", 1)
f = 2*x**2 - y
assert (f.derivative('x'), f.derivative('y'), f.val) == (8, 1, 5)
def test_rsub():
ad1 = ad.AD_eval('2 - x', "x", 2)
assert (ad1.derivative('x'), ad1.val) == (-1,0)
def test_mul():
ad1 = ad.AD_eval('x * 2', 'x', 2)
assert (ad1.derivative('x'), ad1.val) == (2,4)
def test_mul2():
ad1 = ad.AD_eval('x * x', 'x', 2)
assert (ad1.derivative('x'), ad1.val) == (4,4)
def test_mul3():
x = ad.AD_Object(2, "x", 1)
y = ad.AD_Object(3, "y", 1)
f = x*y
assert (f.derivative('x'), f.derivative('y'), f.val) == (3, 2, 6)
def test_rmul():
ad1 = ad.AD_eval('2 * x', 'x', 2)
assert (ad1.derivative('x'), ad1.val) == (2,4)
def test_truediv():
ad1 = ad.AD_eval('x/2', 'x', 2)
assert (ad1.derivative('x'), ad1.val) == (1/2,1)
def test_truediv2():
ad1 = ad.AD_eval('(x+2)/x', 'x', 2)
assert (ad1.derivative('x'), ad1.val) == (-1/2,2)
def test_truediv3():
x = ad.AD_Object(2, "x", 1)
y = ad.AD_Object(1, "y", 1)
f = x/y
assert (f.derivative('x'), f.derivative('y'), f.val) == (1, -2, 2)
def test_rtruediv():
ad1 = ad.AD_eval('3/x', 'x', 2)
assert (ad1.derivative('x'), ad1.val) == (-3/4,3/2)
def test_pow():
ad1 = ad.AD_eval('x**3', 'x', 2)
assert (ad1.derivative('x'), ad1.val) == (12,8)
def test_pow2():
ad1 = ad.AD_eval('x**x','x',2)
assert (ad1.derivative('x'), ad1.val) == (4+ 4*math.log(2),4)
def test_pow3():
x = ad.AD_Object(2, "x", 1)
y = ad.AD_Object(1, "y", 1)
f = x**y
assert (f.derivative('x'), f.derivative('y'), f.val) == (1, 2*math.log(2), 2)
def test_rpow():
ad1 = ad.AD_eval('3**x','x', 2)
assert (ad1.derivative('x'), ad1.val) == (9*math.log(3),9)
def test_rpow2():
ad1 = ad.AD_eval('0**x','x', 2)
assert (ad1.derivative('x'), ad1.val) == (0,0)
def test_powerrule():
ad1 = ad.AD_eval('x**(x+1)','x', 0)
assert (ad1.derivative('x'), ad1.val) == (0,0)
def test_exp():
ad1 = ad.AD_eval('e(x)','x', 2)
ad2 = ad.AD_eval('e(2*x + 1)','x',1)
assert (ad1.derivative('x'), ad1.val) == (math.exp(2),math.exp(2))
assert (ad2.derivative('x'), ad2.val) == (2* math.exp(3),math.exp(3))
def test_log():
ad1 = ad.AD_eval('log(x)','x', 2)
assert (ad1.derivative('x'), ad1.val) == (0.5, math.log(2))
def test_sin():
ad1 = ad.AD_eval('sin(x)','x', 2)
assert (ad1.derivative('x'), ad1.val) == (math.cos(2), math.sin(2))
def test_arcsin():
ad1 = ad.AD_eval('arcsin(x)','x',1/2)
assert (ad1.derivative('x'),ad1.val) == (1 / math.sqrt(1 - 0.5**2),math.asin(1/2))
def test_sinh():
ad1 = ad.AD_eval('sinh(x)','x',2)
assert (ad1.derivative('x'), ad1.val) == (math.cosh(2), math.sinh(2))
def test_cos():
ad1 = ad.AD_eval('cos(x)','x', 2)
assert (ad1.derivative('x'), ad1.val) == (-math.sin(2), math.cos(2))
def test_arccos():
ad1 = ad.AD_eval('arccos(x)','x',0.5)
assert (ad1.derivative('x'), ad1.val) == (-1 / math.sqrt(1 - 0.25),math.acos(0.5))
def test_cosh():
ad1 = ad.AD_eval('cosh(x)','x',2)
assert (ad1.derivative('x'), ad1.val) == (math.sinh(2),math.cosh(2))
def test_tan():
ad1 = ad.AD_eval('tan(x)','x', 2)
assert (ad1.derivative('x'), ad1.val) == (1/ math.cos(2)**2, math.tan(2))
def test_arctan():
ad1 = ad.AD_eval('arctan(x)','x', 2)
assert (ad1.derivative('x'), ad1.val) == (1 / 5,math.atan(2))
def test_tanh():
ad1 = ad.AD_eval('tanh(x)','x',2)
assert (ad1.derivative('x'), ad1.val) == (2 / (1 + math.cosh(4)),math.tanh(2))
#def test_log():
# ad1 = ad.AD_eval('log(x,2)','x',2)
# assert (ad1.derivative('x'), ad1.val) == (1/(2*math.log(2)), math.log(2,2))
def test_log2():
with pytest.raises(ValueError):
ad.AD_eval('log(x)','x',0)
#def test_log3():
# with pytest.raises(ValueError):
# ad.AD_eval('log(x,0)','x',1)
def test_sigmoid():
ad1 = ad.AD_eval('sigmoid(x)','x',2)
val = 1.0/(1.0 + math.exp(-2))
assert (round(ad1.derivative('x'),5), ad1.val) == (round(val*(1-val),5),val)
def test_sqrt():
ad0 = ad.AD_eval('sqrt(x)','x', 4)
ad1 = ad.AD_Object(4, 'x').sqrt()
assert (ad0.derivative('x'), ad0.val) == (1/4,2)
assert (ad1.der['x'], ad1.val) == (1/4,2)
def test_jacobian():
ad1 = ad.AD_Object(1,'x', 2)
ad2 = ad.AD_Object(1, {'x':'x', 'y':'y'},{'x':4, 'y':5})
ad3 = ad.AD_Object(1, {'x':'x', 'y':'y'},{'x':6, 'y':7})\
assert ad.jacobian(ad1, ['x']) == [2]
res = ad.jacobian([ad1, ad2, ad3], ['x','y'])
print(np.sum(res - np.array([[2, 0],[4,5],[6,7]])))
assert np.sum(res - np.array([[2, 0],[4,5],[6,7]])) == 0
with pytest.raises(TypeError):
ad.jacobian(res, 'x')
############ Test AD_eval class assertions ################
def test_repr_AD_eval():
ad1 = repr(ad.AD_eval('x', 'x', 1))
val = 1
der = 1
assert ad1 == "AD Object: Value = %.3f, Derivative: d(x)= %.3f ; "%(val, der)
def test_AD_eval_multiple1_var1():
with pytest.raises(AssertionError):
ad.AD_eval('2*x+y', ['x', 'y'],[1])
def test_AD_eval_multiple1_var2():
with pytest.raises(AssertionError):
ad.AD_eval('2*x+y', ['x', 'y'],[1, '2'])
def test_AD_eval_multiple1_var3():
with pytest.raises(NameError):
ad.AD_eval('exp(x)+y', ['x', 'y'],[1,2])
def test_AD_eval_multiple1_var4():
ad1 = ad.AD_eval('x**2+y', ['x', 'y'],[1,2])
assert (ad1.der['x'], ad1.der['y'], ad1.val) == (2,1,3)
############ Test AD_Vector class assertions ################
def test_AD_Vector_iterable():
with pytest.raises(AssertionError):
ad1 = AD_Vector(1, 'x')
def test_AD_Vector():
x = ad.AD_Vector(np.arange(1,10), label='x')
z = x**2
der = ad.derivative(z,'x')
val = ad.value(z)
print(der)
for i in range(1,10):
assert der[i-1] == 2*i
assert val[i-1] == i**2
def test_AD_Vector2():
x = ad.AD_Vector(np.arange(1,5), label='x')
y = ad.AD_Vector(np.arange(1,5), label='y')
z = AD_FuncVector([2*x + ad.e(y),x**2*y])
assert ad.value(z) == [[2+np.exp(1), 4+np.exp(2), 6+np.exp(3), 8+np.exp(4)],[1,8,27,64]]
assert ad.derivative(z, 'x') == [[2,2,2,2], [2,8,18,32]]
assert ad.derivative(z, 'y') == [[np.exp(1), np.exp(2), np.exp(3), np.exp(4)],[1,4,9,16]]
########### Test AD_object assertions and comparisions ###################
def test_input_AD_Object():
with pytest.raises(AssertionError):
ad.AD_Object(value = '1', label = 'x')
def test_AD_object_input1():
x = 1
with pytest.raises(TypeError):
ad1 = ad.AD_Object(value = 1, label = x, der_initial= 1)
def test_AD_object_input2():
with pytest.raises(TypeError):
ad1 = ad.AD_Object(value = 1, label = 'x', der_initial = '1')
def test_AD_object_derivative():
with pytest.raises(AssertionError):
ad1 = ad.AD_Object(1,'x')
ad1.derivative(1)
def test_AD_object_eq1():
ad1 = ad.AD_Object(1,'x',1)
with pytest.raises(AssertionError):
ad1 == 1
def test_AD_object_eq2():
ad1 = ad.AD_Object(1,'x',1)
ad2 = ad.AD_Object(2,'x',1)
assert((ad1 == ad2) == False)
def test_AD_object_eq3():
ad1 = ad.AD_Object(1, 'x', 1)
ad2 = ad.AD_Object(1, 'y', 1)
assert((ad1 == ad2) == False)
def test_AD_object_eq4():
ad1 = ad.AD_Object(1, 'x', 2)
ad2 = ad.AD_Object(1, 'x', 4)
assert((ad1 == ad2) == False)
def test_AD_object_ne():
ad1 = ad.AD_Object(1,'x',1)
ad1 == ad1
assert((ad1 != ad1) == False)
def test_AD_object_lt():
ad1 = ad.AD_Object(1,'x',1)
ad2 = ad.AD_Object(2,'x',1)
assert(ad1 < ad2)
def test_AD_object_lt2():
ad1 = AD_Object(1,'x',1)
with pytest.raises(AssertionError):
ad1 < 1
def test_AD_object_gt():
ad1 = ad.AD_Object(1,'x',1)
ad2 = ad.AD_Object(2,'x',1)
assert(ad2 > ad1)
def test_AD_object_gt2():
ad1 = ad.AD_Object(1,'x',1)
with pytest.raises(AssertionError):
ad1 > 1
def test_AD_object_le():
ad1 = ad.AD_Object(1,'x',1)
ad2 = ad.AD_Object(2,'x',1)
assert(ad1 <= ad2)
def test_AD_object_le2():
ad1 = ad.AD_Object(1,'x',1)
with pytest.raises(AssertionError):
ad1 <= 1
def test_AD_object_ge():
ad1 = ad.AD_Object(1,'x',1)
ad2 = ad.AD_Object(2,'x',1)
assert(ad2 >= ad1)
def test_AD_object_ge2():
ad1 = ad.AD_Object(1,'x',1)
with pytest.raises(AssertionError):
ad1 >= 1
def test_repr_AD_Object():
ad1 = repr(ad.AD_Object(1, 'x'))
val = 1
der = 1
assert ad1 == "AD Object: Value = %.3f, Derivative: d(x)= %.3f ; "%(val, der)
def test_input_function_types():
x = 2
with pytest.raises(AssertionError):
ad.AD_eval(3*x+2, "x", 2)
def test_input_label_types():
x = 2
with pytest.raises(AssertionError):
ad.AD_eval('3x+2', x, 2)
def test_input_value_types():
with pytest.raises(AssertionError):
ad.AD_eval('3x+2', 'x', '3')
def test_division_by_zero():
with pytest.raises(ValueError):
ad.AD_eval('x/0', 'x', 3)
def test_division_by_zero2():
with pytest.raises(ValueError):
ad.AD_eval('1/x', 'x', 0)
def test_division_by_zero3():
with pytest.raises(ValueError):
ad.AD_eval('(x+1)/x', 'x', 0)
def test_ln_with_negative():
with pytest.raises(ValueError):
ad.AD_eval('log(-1*x)', 'x', 3)
def test_exponential_function_name():
with pytest.raises(NameError):
ad.AD_eval('3*log(5)/exp(x)', 'x', 3)
|
992,779 | e02c2baa425e2bbb01bd8801916860171e285dba | from django.http import HttpResponse
def showStatus404(request):
return HttpResponse("page is not exist :(", status=404)
|
992,780 | e10da47ab7e7a6cea5dabb84fcbef9df32c365e4 | def ticker(filename):
dict = {}
infile = open(filename, "r+")
lst = infile.readlines()
for item in lst:
item = item[:-2]
seperate = item.split(":")
dict.update({seperate[0]:seperate[1]})
return dict
beursDict = ticker("beurs.txt")
print("1. You have a company name: ")
print("2. You have a ticker symbol: ")
functionType = input("Type number of you action: ")
if functionType == "1":
companyName = input("Enter company name: ")
if companyName in beursDict:
print("Ticker symbol: "+beursDict[companyName])
else:
print("Not a correct company name")
if functionType == "2":
tickerSymbol = input("Enter ticker symbol: ")
for name, symbol in beursDict.items():
if symbol == tickerSymbol:
print("company name: " + name)
|
992,781 | 28898676c1678b18cddbd7395da2235880882696 | from settings import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
EMAIL_PORT = 25
STATIC_URL = '/facegame-static/'
MEDIA_URL = '/facegame-media/'
# PROJECT_ROOT -> DEPLOY_ROOT
# ROOT/releases/release/settings.prod
PACKAGE_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
PROJECT_ROOT = os.path.normpath(os.path.join(PACKAGE_ROOT, 'www'))
DEPLOY_ROOT = PACKAGE_ROOT
MEDIA_ROOT = os.path.join(DEPLOY_ROOT, 'media') + os.sep
STATIC_ROOT = os.path.join(DEPLOY_ROOT, 'static') + os.sep
DATABASES['default']['NAME'] = os.path.abspath(os.path.join(DEPLOY_ROOT, 'sqlite.db'))
URLS_BASE = '/facegame/'
USER_GROUPS = ['helsinki', 'tampere', 'berlin', 'london', 'stockholm', 'munich']
try:
from secret_facegame_settings import *
except ImportError:
print "no secret production settings"
|
992,782 | 337f7ca805977f6f367acf5a6ab574d739850a7e | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-01 15:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('resources', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ZabbixHostModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostid', models.CharField(db_index=True, max_length=10, unique=True, verbose_name='zabbix主机编号')),
('host', models.CharField(db_index=True, max_length=50, verbose_name='zabbix主机名')),
('status', models.CharField(choices=[('0', '监控中'), ('1', '未监控')], max_length=5, verbose_name='主机监控状态')),
('ip', models.GenericIPAddressField(db_index=True, protocol='IPv4', unique=True, verbose_name='监控的IP')),
('server', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='resources.ServerModel', verbose_name='关联到服务器表')),
],
options={
'db_table': 'zabbix_host',
'ordering': ['ip'],
},
),
]
|
992,783 | a894532594927c44621bd408a0d9bfd612205826 | from .base import BaseMessageQueue
import redis
import json
class RedisMessageQueue(BaseMessageQueue):
def __init__(self, connUrl, listener=None):
super(RedisMessageQueue, self).__init__(connUrl, listener)
opts = {}
if self.connUrl.hostname:
opts['host'] = self.connUrl.hostname
if self.connUrl.port:
opts['port'] = int(self.connUrl.port)
self.redis = redis.Redis(**opts)
self.key = self.connUrl.path[1:]
def send(self, msg):
self.redis.rpush(self.key, json.dumps(msg))
|
992,784 | 9f0053d2f860e0d25338c70c407f2e1e271e8bc9 | import argparse
from getpass import getpass
from cryptography.exceptions import InvalidSignature
from crypto_agile.agility import encipher, decipher, VERSION_CLASSES
def encrypt(version, key, input_file, output_file):
result = encipher(key=key,
plain_text_stream=input_file,
version_number=version)
output_file.write(result)
def decrypt(key, input_file, output_file):
result = decipher(key=key, cipher_text_stream=input_file)
output_file.write(result)
if __name__ == '__main__':
VERSIONS = VERSION_CLASSES.keys()
parser = argparse.ArgumentParser(description='A crypto agile app which can encrypt a files.')
parser.add_argument('action', choices=['encrypt', 'decrypt'])
parser.add_argument('input_file', type=argparse.FileType('rb'))
parser.add_argument('output_file', type=argparse.FileType('wb'))
parser.add_argument('-V', '--algorithm_version', type=int, choices=VERSIONS, default=VERSIONS[0])
parser.add_argument('--key')
args = parser.parse_args()
if not args.key:
args.key = getpass('encryption key or password ?:')
if args.action == 'encrypt':
encrypt(args.algorithm_version, args.key, args.input_file, args.output_file)
elif args.action == 'decrypt':
try:
decrypt(args.key, args.input_file, args.output_file)
except ValueError as e:
print "wrong password or corrupted file"
print e
except InvalidSignature as e:
print "corrupted file"
print e
|
992,785 | 75bab440c2645eeb0c70d18ae689e078744f633f | # -*- coding: utf-8 -*-
"""
Calculate one and two halo two point correlation functions.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
####import modules########################################################################
import sys
import numpy as np
from math import pi, gamma
from .clustering_helpers import *
from .tpcf_estimators import *
from .pair_counters.double_tree_pairs import npairs
from .pair_counters.marked_double_tree_pairs import marked_npairs
from warnings import warn
##########################################################################################
__all__=['tpcf_one_two_halo_decomp']
__author__ = ['Duncan Campbell']
np.seterr(divide='ignore', invalid='ignore') #ignore divide by zero in e.g. DD/RR
def tpcf_one_two_halo_decomp(sample1, sample1_host_halo_id, rbins,
sample2=None, sample2_host_halo_id=None,
randoms=None, period=None,
do_auto=True, do_cross=True, estimator='Natural',
num_threads=1, max_sample_size=int(1e6),
approx_cell1_size = None, approx_cell2_size = None,
approx_cellran_size = None):
"""
Calculate the real space one-halo and two-halo decomposed two-point correlation
functions, :math:`\\xi^{1h}(r)` and :math:`\\xi^{2h}(r)`.
This returns the correlation function for galaxies which reside in the same halo, and
those that reside in seperate halos, as indicated by a host halo ID.
Example calls to this function appear in the documentation below.
See the :ref:`mock_obs_pos_formatting` documentation page for
instructions on how to transform your coordinate position arrays into the
format accepted by the ``sample1`` and ``sample2`` arguments.
See also :ref:`galaxy_catalog_analysis_tutorial2`.
Parameters
----------
sample1 : array_like
Npts x 3 numpy array containing 3-D positions of points.
sample1_host_halo_id : array_like, optional
*len(sample1)* integer array of host halo ids.
rbins : array_like
array of boundaries defining the real space radial bins in which pairs are
counted.
rbins : array_like
array of boundaries defining the real space radial bins in which pairs are
counted.
sample2 : array_like, optional
Npts x 3 array containing 3-D positions of points.
sample2_host_halo_id : array_like, optional
*len(sample2)* integer array of host halo ids.
randoms : array_like, optional
Npts x 3 array containing 3-D positions of points. If no randoms are provided
analytic randoms are used (only valid for periodic boundary conditions).
period : array_like, optional
Length-3 array defining axis-aligned periodic boundary conditions. If only
one number, Lbox, is specified, period is assumed to be [Lbox]*3.
do_auto : boolean, optional
do auto-correlation?
do_cross : boolean, optional
do cross-correlation?
estimator : string, optional
options: 'Natural', 'Davis-Peebles', 'Hewett' , 'Hamilton', 'Landy-Szalay'
num_threads : int, optional
number of threads to use in calculation. Default is 1. A string 'max' may be used
to indicate that the pair counters should use all available cores on the machine.
max_sample_size : int, optional
Defines maximum size of the sample that will be passed to the pair counter.
If sample size exeeds max_sample_size, the sample will be randomly down-sampled
such that the subsample is equal to max_sample_size.
approx_cell1_size : array_like, optional
Length-3 array serving as a guess for the optimal manner by which
the `~halotools.mock_observables.pair_counters.FlatRectanguloidDoubleTree`
will apportion the sample1 points into subvolumes of the simulation box.
The optimum choice unavoidably depends on the specs of your machine.
Default choice is to use max(rbins) in each dimension,
which will return reasonable result performance for most use-cases.
Performance can vary sensitively with this parameter, so it is highly
recommended that you experiment with this parameter when carrying out
performance-critical calculations.
approx_cell2_size : array_like, optional
Analogous to ``approx_cell1_size``, but for ``sample2``. See comments for
``approx_cell1_size`` for details.
approx_cellran_size : array_like, optional
Analogous to ``approx_cell1_size``, but for ``randoms``. See comments for
``approx_cell1_size`` for details.
Returns
-------
correlation_function(s) : numpy.array
Two *len(rbins)-1* length arrays containing the one and two halo correlation
functions, :math:`\\xi^{1h}(r)` and :math:`\\xi^{2h}(r)`, computed in each of the
radial bins defined by input ``rbins``.
.. math::
1 + \\xi(r) \\equiv \\mathrm{DD} / \\mathrm{RR},
if ``estimator`` is set to 'Natural', where :math:`\\mathrm{DD}` is calculated
by the pair counter, and :math:`\\mathrm{RR}` is counted internally using
"analytic randoms" if no ``randoms`` are passed as an argument
(see notes for an explanation). If a different ``estimator`` is specified, the
appropiate formula is used.
If ``sample2`` is passed as input, six arrays of length *len(rbins)-1* are
returned:
.. math::
\\xi^{1h}_{11}(r), \\ \\xi^{2h}_{11}(r),
.. math::
\\xi^{1h}_{12}(r), \\ \\xi^{2h}_{12}(r),
.. math::
\\xi^{1h}_{22}(r), \\ \\xi^{2h}_{22}(r),
the autocorrelation of one and two halo autocorrelation of ``sample1``,
the one and two halo cross-correlation between ``sample1`` and ``sample2``,
and the one and two halo autocorrelation of ``sample2``.
If ``do_auto`` or ``do_cross`` is set to False, only the appropriate result(s)
is returned.
Notes
-----
Pairs are counted using
`~halotools.mock_observables.pair_counters.marked_npairs`.
This pair counter is optimized to work on points distributed in a rectangular cuboid
volume, e.g. a simulation box. This optimization restricts this function to work
on 3-D point distributions.
If the points are distributed in a continuous "periodic box", then ``randoms`` are not
necessary, as the geometry is very simple, and the monte carlo integration that
randoms are used for in complex geometries can be done analytically.
If the ``period`` argument is passed in, all points' ith coordinate
must be between 0 and period[i].
Examples
--------
>>> #randomly distributed points in a unit cube.
>>> Npts = 1000
>>> x,y,z = (np.random.random(Npts),np.random.random(Npts),np.random.random(Npts))
>>> coords = np.vstack((x,y,z)).T
>>> period = np.array([1.0,1.0,1.0])
>>> rbins = np.logspace(-2,-1,10)
>>> host_halo_IDs = np.random.random_integers(1,10,size=Npts)
>>> xi_1h, xi_2h = tpcf_one_two_halo_decomp(coords, host_halo_IDs, rbins, period=period)
See also
-----------
:ref:`galaxy_catalog_analysis_tutorial3`.
"""
#check input arguments using clustering helper functions
function_args = [sample1, sample1_host_halo_id, rbins, sample2, sample2_host_halo_id,
randoms, period, do_auto, do_cross, estimator, num_threads,
max_sample_size, approx_cell1_size, approx_cell2_size,
approx_cellran_size]
#pass arguments in, and get out processed arguments, plus some control flow variables
sample1, sample1_host_halo_id, rbins, sample2, sample2_host_halo_id, randoms, period,\
do_auto, do_cross, num_threads, _sample1_is_sample2, PBCs =\
_tpcf_one_two_halo_decomp_process_args(*function_args)
def random_counts(sample1, sample2, randoms, rbins, period, PBCs, num_threads,\
do_RR, do_DR, _sample1_is_sample2, approx_cell1_size,\
approx_cell2_size , approx_cellran_size):
"""
Count random pairs. There are two high level branches:
1. w/ or wo/ PBCs and randoms.
2. PBCs and analytical randoms
There are also logical bits to do RR and DR pair counts, as not all estimators
need one or the other, and not doing these can save a lot of calculation.
Analytical counts are N**2*dv*rho, where dv can is the volume of the spherical
shells, which is the correct volume to use for a continious cubic volume with PBCs
"""
def nball_volume(R,k=3):
"""
Calculate the volume of a n-shpere.
This is used for the analytical randoms.
"""
return (np.pi**(k/2.0)/gamma(k/2.0+1.0))*R**k
#randoms provided, so calculate random pair counts.
if randoms is not None:
if do_RR==True:
RR = npairs(randoms, randoms, rbins, period=period,
num_threads=num_threads,
approx_cell1_size=approx_cellran_size,
approx_cell2_size=approx_cellran_size)
RR = np.diff(RR)
else: RR=None
if do_DR==True:
D1R = npairs(sample1, randoms, rbins, period=period,
num_threads=num_threads,
approx_cell1_size=approx_cell1_size,
approx_cell2_size=approx_cellran_size
)
D1R = np.diff(D1R)
else: D1R=None
if _sample1_is_sample2:
D2R = None
else:
if do_DR==True:
D2R = npairs(sample2, randoms, rbins, period=period,
num_threads=num_threads,
approx_cell1_size=approx_cell2_size,
approx_cell2_size=approx_cellran_size)
D2R = np.diff(D2R)
else: D2R=None
return D1R, D2R, RR
#PBCs and no randoms--calculate randoms analytically.
elif randoms is None:
#set the number of randoms equal to the number of points in sample1
NR = len(sample1)
#do volume calculations
v = nball_volume(rbins) #volume of spheres
dv = np.diff(v) #volume of shells
global_volume = period.prod() #volume of simulation
#calculate randoms for sample1
N1 = np.shape(sample1)[0] #number of points in sample1
rho1 = N1/global_volume #number density of points
D1R = (NR)*(dv*rho1) #random counts are N**2*dv*rho
#calculate randoms for sample2
N2 = np.shape(sample2)[0] #number of points in sample2
rho2 = N2/global_volume #number density of points
D2R = (NR)*(dv*rho2) #random counts are N**2*dv*rho
#calculate the random-random pairs.
rhor = (NR**2)/global_volume
RR = (dv*rhor)
return D1R, D2R, RR
def marked_pair_counts(sample1, sample2, rbins, period, num_threads,\
do_auto, do_cross, marks1, marks2, wfunc, _sample1_is_sample2):
"""
Count weighted data pairs.
"""
#add ones to weights, so returned value is return 1.0*1.0
marks1 = np.vstack((marks1,np.ones(len(marks1)))).T
marks2 = np.vstack((marks2,np.ones(len(marks2)))).T
if do_auto==True:
D1D1 = marked_npairs(sample1, sample1, rbins,\
weights1=marks1, weights2=marks1,\
wfunc = wfunc,\
period=period, num_threads=num_threads)
D1D1 = np.diff(D1D1)
else:
D1D1=None
D2D2=None
if _sample1_is_sample2:
D1D2 = D1D1
D2D2 = D1D1
else:
if do_cross==True:
D1D2 = marked_npairs(sample1, sample2, rbins,\
weights1=marks1, weights2=marks2,\
wfunc = wfunc,\
period=period, num_threads=num_threads)
D1D2 = np.diff(D1D2)
else: D1D2=None
if do_auto==True:
D2D2 = marked_npairs(sample2, sample2, rbins,\
weights1=marks2, weights2=marks2,\
wfunc = wfunc,\
period=period, num_threads=num_threads)
D2D2 = np.diff(D2D2)
else: D2D2=None
return D1D1, D1D2, D2D2
#What needs to be done?
do_DD, do_DR, do_RR = _TP_estimator_requirements(estimator)
# How many points are there (for normalization purposes)?
N1 = len(sample1)
N2 = len(sample2)
if randoms is not None:
NR = len(randoms)
else:
#set the number of randoms equal to the number of points in sample1
#this is arbitrarily set, but must remain consistent!
NR = N1
#calculate 1-halo pairs
wfunc=3
one_halo_D1D1,one_halo_D1D2, one_halo_D2D2 =\
marked_pair_counts(sample1, sample2, rbins, period, num_threads,\
do_auto, do_cross, sample1_host_halo_id,\
sample2_host_halo_id, wfunc, _sample1_is_sample2)
#calculate 2-halo pairs
wfunc=4
two_halo_D1D1,two_halo_D1D2, two_halo_D2D2 =\
marked_pair_counts(sample1, sample2, rbins, period, num_threads,\
do_auto, do_cross, sample1_host_halo_id,\
sample2_host_halo_id, wfunc, _sample1_is_sample2)
#count random pairs
D1R, D2R, RR = random_counts(sample1, sample2, randoms, rbins, period,
PBCs, num_threads, do_RR, do_DR, _sample1_is_sample2,
approx_cell1_size,approx_cell2_size,approx_cellran_size)
#check to see if any of the random counts contain 0 pairs.
if D1R is not None:
if np.any(D1R==0):
msg = ("sample1 cross randoms has radial bin(s) which contain no points. \n"
"Consider increasing the number of randoms, or using larger bins.")
warn(msg)
if D2R is not None:
if np.any(D2R==0):
msg = ("sample2 cross randoms has radial bin(s) which contain no points. \n"
"Consider increasing the number of randoms, or using larger bins.")
warn(msg)
if RR is not None:
if np.any(RR==0):
msg = ("randoms cross randoms has radial bin(s) which contain no points. \n"
"Consider increasing the number of randoms, or using larger bins.")
warn(msg)
#run results through the estimator and return relavent/user specified results.
if _sample1_is_sample2:
one_halo_xi_11 = _TP_estimator(one_halo_D1D1,D1R,RR,N1,N1,NR,NR,estimator)
two_halo_xi_11 = _TP_estimator(two_halo_D1D1,D1R,RR,N1,N1,NR,NR,estimator)
return one_halo_xi_11, two_halo_xi_11
else:
if (do_auto==True) & (do_cross==True):
one_halo_xi_11 = _TP_estimator(one_halo_D1D1,D1R,RR,N1,N1,NR,NR,estimator)
one_halo_xi_12 = _TP_estimator(one_halo_D1D2,D1R,RR,N1,N2,NR,NR,estimator)
one_halo_xi_22 = _TP_estimator(one_halo_D2D2,D2R,RR,N2,N2,NR,NR,estimator)
two_halo_xi_11 = _TP_estimator(two_halo_D1D1,D1R,RR,N1,N1,NR,NR,estimator)
two_halo_xi_12 = _TP_estimator(two_halo_D1D2,D1R,RR,N1,N2,NR,NR,estimator)
two_halo_xi_22 = _TP_estimator(two_halo_D2D2,D2R,RR,N2,N2,NR,NR,estimator)
return one_halo_xi_11, two_halo_xi_11, one_halo_xi_12,\
two_halo_xi_12, one_halo_xi_22, two_halo_xi_22
elif (do_cross==True):
one_halo_xi_12 = _TP_estimator(one_halo_D1D2,D1R,RR,N1,N2,NR,NR,estimator)
two_halo_xi_12 = _TP_estimator(two_halo_D1D2,D1R,RR,N1,N2,NR,NR,estimator)
return one_halo_xi_12, two_halo_xi_12
elif (do_auto==True):
one_halo_xi_11 = _TP_estimator(one_halo_D1D1,D1R,D1R,N1,N1,NR,NR,estimator)
one_halo_xi_22 = _TP_estimator(one_halo_D2D2,D2R,D2R,N2,N2,NR,NR,estimator)
two_halo_xi_11 = _TP_estimator(two_halo_D1D1,D1R,D1R,N1,N1,NR,NR,estimator)
two_halo_xi_22 = _TP_estimator(two_halo_D2D2,D2R,D2R,N2,N2,NR,NR,estimator)
return one_halo_xi_11, two_halo_xi_11, one_halo_xi_22, two_halo_xi_22
|
992,786 | f0d496b56dc448a0762f10dd3d9ef33cc789cc8d | import subprocess
import pandas as pd
import re, threading
import PySimpleGUI as sg
#Create a file to save the output of the pip command of the packages needing upgrade
fhandle = open(r'C:\temp\update.txt', 'w')
#subprocess.run('pip list --outdated', shell = True, stdout = fhandle)
thread = threading.Thread(target=lambda: subprocess.run('pip list --outdated', shell=True, stdout=fhandle), daemon=True)
thread.start()
while True:
sg.popup_animated(sg.DEFAULT_BASE64_LOADING_GIF, 'Loading list of packages', time_between_frames=100)
thread.join(timeout=.1)
if not thread.is_alive():
break
sg.popup_animated(None)
fhandle.close()
#All the packages from pip needing updating have been saved in the file
#Create a data frame, and then massage and load the output data in the file to the expected format
df1 = pd.DataFrame(columns=['Package', 'Version', 'Latest', 'Type'])
fhandle = open(r'C:\temp\update.txt', 'r')
AnyPackagesToUpgrade = 0
for i, line in enumerate(fhandle):
if i not in (0, 1): #first two lines have no packages
df1 = df1.append({
'Package': re.findall('(.+?)\s', line)[0],
'Version': re.findall('([0-9].+?)\s', line)[0],
'Latest': re.findall('([0-9].+?)\s', line)[1],
'Type': re.findall('\s([a-zA-Z]+)', line)[0]
}, ignore_index=True)
AnyPackagesToUpgrade = 1 #if no packages, then don't bring up full UI later on
#We now have a dataframe with all the relevant packages to update
#Moving onto the UI
formlists = [] #This will be the list to be displayed on the UI
i = 0
while i < len(df1): #this is the checkbox magic that will show up on the UI
formlists.append([sg.Checkbox(df1.iloc[i, :])])
formlists.append([sg.Text('-'*50)])
i += 1
layout = [
[sg.Column(layout=[
*formlists], vertical_scroll_only=True, scrollable=True, size=(704, 400)
)],
[sg.Output(size=(100, 10))],
[sg.Submit('Upgrade'), sg.Cancel('Exit')]
]
window = sg.Window('Choose Package to Upgrade', layout, size=(800, 650))
if AnyPackagesToUpgrade == 0:
sg.Popup('No Packages requiring upgrade found')
quit()
#The login executed when clicking things on the UI
definedkey = []
while True: # The Event Loop
event, values = window.read()
# print(event, values) # debug
if event in (None, 'Exit', 'Cancel'):
break
elif event == 'Upgrade':
for index, value in enumerate(values):
if values[index] == True:
#print(df1.iloc[index][0])
sg.popup_animated(sg.DEFAULT_BASE64_LOADING_GIF, 'Installing Updates', time_between_frames=100)
subprocess.run('pip install --upgrade ' + df1.iloc[index][0])
sg.popup_animated(None)
print('Upgrading', df1.iloc[index][0])
print('Upgrading process finished.')
|
992,787 | 0945f77003c48bbd05995ce24ae27893de618830 | import os
import sys
import random
import pickle
from pathlib import Path
import xgboost as xgb
import preprocessing
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from passage_retrieval.supervised_xgboost import predict_passage
from answer_extraction.answer_extraction import find_answer
SCRIPT_DIR = os.path.dirname(__file__)
PASSAGES_PATH = os.path.join(SCRIPT_DIR, "preprocessed_data/passages.pickle")
PASSAGES_STANFORD_PATH = os.path.join(SCRIPT_DIR, "preprocessed_data/passages_stanford.pickle")
XGBOOST_MODEL_PATH = os.path.join(SCRIPT_DIR, "xgb_model.model")
print(XGBOOST_MODEL_PATH)
_, test_questions_path, task1_pred_path, task2_pred_path = sys.argv
task1_preds, task2_preds = [], []
with open(test_questions_path, 'r', encoding='utf16') as f:
questions = f.readlines()
with open(PASSAGES_PATH, "rb") as f:
passages = pickle.load(f)
with open(PASSAGES_STANFORD_PATH, "rb") as f:
passages_stanford = pickle.load(f)
xgb_model = xgb.XGBRegressor()
xgb_model.load_model(XGBOOST_MODEL_PATH)
task1_preds = [predict_passage(question, passages, xgb_model) for question in questions]
task2_preds = [find_answer(question, passages_stanford[p_id]) for question, p_id in zip(questions, task1_preds)]
task1_preds = [str(id_) for id_ in task1_preds]
with open(Path(task1_pred_path) / 'submission_2.txt', 'w', encoding='utf16') as f:
f.write('\n'.join(task1_preds))
with open(Path(task2_pred_path) / 'submission_2.txt', 'w', encoding='utf16') as f:
f.write('\n'.join(task2_preds))
|
992,788 | e261cadb108d91bf8956674883536ab0ce7102cf | """A function to abbreviate strings."""
__author__ = "730407570"
def main() -> None:
"""The entrypoint of the program, when run as a module."""
word: str = input("Write some text with some uppercase letters: ")
abbreviation_out: str = abbreviate(word)
print(f"The abbreviation is \"{abbreviation_out}\".")
return None
# TODO 1: Define the abbreviate function, and its logic, here.
def abbreviate(x: str) -> str:
"""Returns capital letters within the input string."""
i = 0
abbreviation: str = ""
while i < len(x):
if x[i].isupper():
abbreviation += x[i]
i += 1
return abbreviation
if __name__ == "__main__":
main() |
992,789 | 192236ce79f6fc7d24c2fc9d7490ce4db22f1e04 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "David S. Batista"
__email__ = "dsbatista@inesc-id.pt"
"""
In computer science, the maximum subarray problem is the task of finding the
contiguous subarray within a one-dimensional array of numbers
(containing at least one positive number) which has the largest sum.
For example, for the sequence of values −2, 1, −3, 4, −1, 2, 1, −5, 4;
the contiguous subarray with the largest sum is 4, −1, 2, 1, with sum 6.
{2,-1, 3,-5,3} output: 4
"""
def max_sum_sub_array(arr):
max_so_far = 0
max_ending_here = 0
for i, x in enumerate(arr):
max_ending_here = max_ending_here + x
if max_ending_here > max_so_far:
max_so_far = max_ending_here
elif max_ending_here < 0:
max_ending_here = 0
return max_so_far
# find the largest sub-sequence given an array that yields the largest sum.
def max_sum_seq(arr):
max_so_far = 0
max_ending_here = 0
start, end, sum_start = -1, -1, -1
for i, x in enumerate(arr):
max_ending_here = max_ending_here + x
if max_ending_here > max_so_far:
max_so_far = max_ending_here
start, end = sum_start, i
elif max_ending_here < 0:
max_ending_here = 0
sum_start = i
return max_so_far, start, end, arr[start + 1:end + 1]
# TODO: Find the largest subsequence of the given array that yields the largest PRODUCT.
# TODO: Maximum product subset with negative and positive integers
def main():
x_array = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print max_sum_sub_array(x_array)
print max_sum_seq(x_array)
if __name__ == "__main__":
main() |
992,790 | e46365f8b3347a9643473ba678a101fe33181d81 | import uuid
from src.common.database import Database
import src.models.friends.constants as FriendConstant
class Friends:
def __init__(self, username, friend, _id=None):
self.username = username
self.friend = friend
self._id = uuid.uuid4().hex if _id is None else _id
def json(self):
return {
'username':self.username,
'friend':self.friend,
'_id':self._id
}
def save_to_mongo(self):
Database.update(FriendConstant.COLLECTION, {'_id':self._id}, self.json())
@classmethod
def find_all_by_username(cls, username):
return [cls(**elem) for elem in Database.find(FriendConstant.COLLECTION, {'username': username})]
@staticmethod
def delete(friend):
Database.remove(FriendConstant.COLLECTION, {'friend':friend})
|
992,791 | 91d115bc7a6eb99390675fe9ce24fb77b616fa10 | from django.urls import path
from vaccination.admin_views import IndexView, NewCenterView, ApproveView, RejectView, CenterView, WorkerRegister, \
WorkerView, ViewChildren, AddEvent, ViewEvents, AddVaccine, ViewAllocation,TakeChildren, RealloRequest, VaccineView,RequestStatus, \
UpdateReq
urlpatterns = [
path('', IndexView.as_view()),
path('newcenter', NewCenterView.as_view()),
path('approve', ApproveView.as_view()),
path('reject', RejectView.as_view()),
path('centerview', CenterView.as_view()),
path('workerreg', WorkerRegister.as_view()),
path('workerview', WorkerView.as_view()),
path('ViewChildren', ViewChildren.as_view()),
path('TakeChildren', TakeChildren.as_view()),
path('RealloRequest', RealloRequest.as_view()),
path('ViewAllocation', ViewAllocation.as_view()),
path('AddEvent', AddEvent.as_view()),
path('ViewEvents', ViewEvents.as_view()),
path('AddVaccine', AddVaccine.as_view()),
path('VaccineView', VaccineView.as_view()),
path('RequestStatus', RequestStatus.as_view()),
path('UpdateReq', UpdateReq.as_view()),
]
def urls():
return urlpatterns, 'admin', 'admin' |
992,792 | f473a85f402fb0dfc36c851d77647ecfac7beb8d |
from collections import Counter
'''
Revature is building a new API! This API contains functions for validating data,
solving problems, and encoding data.
The API consists of 10 functions that you must implement.
Guidelines:
1) Edit the file to match your first name and last name with the format shown.
2) Provide tests in the main method for all functions, We should be able to run
this script and see the outputs in an organized manner.
3) You can leverage the operating system if needed, however, do not use any non
legacy command that solves the problem by just calling the command.
4) We believe in self commenting code, however, provide comments to your solutions
and be organized.
5) Leverage resources online if needed, but remember, be able to back your solutions
up since you can be asked.
6) Plagiarism is a serious issue, avoid it at all costs.
7) Don't import external libraries which are not python native
8) Don't change the parameters or returns, follow the directions.
9) Assignment is optional, but totally recommend to achieve before Monday for practice.
Happy Scripting!
© 2018 Revature. All rights reserved.
'''
'''
Use the main function for testing purposes and to show me results for all functions.
'''
import re
from collections import Counter
def main():
reverse('example')
acronym('Portable Network Graphics')
whichTriangle(2,2,2) #equi
whichTriangle(1,2,2) #isoceles
whichTriangle(2,1,2) #isoceles
whichTriangle(3,2,1) #scalene
scrabble("lebowski") #13
armstrong(153)
armstrong(9)
armstrong(154)
# primeFactors(52) # [1, 2, 4, 13, 26, 52]
# primeFactors(24) # [1, 2, 4, 13, 26, 52]
pangram("The quick brown fox jumps over the lazy dog")
pangram("abc def g")
'''
1. Reverse a String. Example: reverse("example"); -> "elpmaxe"
Rules:
- Do NOT use built-in tools
- Reverse it your own way
param: str
return: str
'''
def reverse(s):
str = ""
for i in s:
str = i + str
print(str)
'''
2. Convert a phrase to its acronym. Techies love their TLA (Three Letter
Acronyms)! Help generate some jargon by writing a program that converts a
long name like Portable Network Graphics to its acronym (PNG).
param: str
return: str
'''
def acronym(phrase):
output = ''
seperate_words = phrase.split(' ')
for char in seperate_words:
output = output + char[0]
print(output)
### FIRST CODE ###
# acronym = []
# whitespace = ' '
# i = 0
# if phrase[0]:
# acronym.append(phrase[0])
# for char in phrase:
# if char == whitespace and i < len(phrase):
# acronym.append(phrase[i + 1])
# i = i + 1
# acronym = ''.join(acronym)
# print(acronym)
'''
3. Determine if a triangle is equilateral, isosceles, or scalene. An
equilateral triangle has all three sides the same length. An isosceles
triangle has at least two sides the same length. (It is sometimes specified
as having exactly two sides the same length, but for the purposes of this
exercise we'll say at least two.) A scalene triangle has all sides of
different lengths.
param: float, float, float
return: str -> 'equilateral', 'isoceles', 'scalene'
'''
def whichTriangle(sideOne, sideTwo, sideThree):
if sideOne == sideTwo and sideTwo == sideThree:
result = 'equilateral'
elif sideOne == sideThree or sideTwo == sideThree or sideOne == sideTwo:
result = 'Isoceles'
else :
result = 'scalene'
print(result)
'''
4. Given a word, compute the scrabble score for that word.
--Letter Values-- Letter Value A, E, I, O, U, L, N, R, S, T = 1; D, G = 2; B,
C, M, P = 3; F, H, V, W, Y = 4; K = 5; J, X = 8; Q, Z = 10; Examples
"cabbage" should be scored as worth 14 points:
3 points for C, 1 point for A, twice 3 points for B, twice 2 points for G, 1
point for E And to total:
3 + 2*1 + 2*3 + 2 + 1 = 3 + 2 + 6 + 3 = 5 + 9 = 14
param: str
return: int
'''
def scrabble(word):
score = 0
onePoint = ['A', 'E', 'I', 'O', 'U', 'L', 'N', 'R', 'S', 'T']
twoPoints = ['D', 'G']
threePoints = ['B', 'C', 'M', 'P']
fourPoints = ['F', 'H', 'V', 'W', 'Y', ]
fivePoints = 'K'
eightPoints = ['J','X']
tenPoints = ['Q', 'Z']
word = word.upper()
for letter in word:
if letter in onePoint:
score = score + 1
print(letter,end=', ')
print(score)
elif letter in twoPoints:
score = score + 2
print(letter,end=', ')
print(score)
elif letter in threePoints:
score = score + 3
print(letter,end=', ')
print(score)
elif letter in fourPoints:
score = score + 4
print(letter,end=', ')
print(score)
elif letter in fivePoints:
score = score + 5
print(letter,end=', ')
print(score)
elif letter in eightPoints:
score = score + 8
print(letter,end=', ')
print(score)
elif letter in tenPoints:
score = score + 10
print(letter,end=', ')
print(score)
else:
print("NOT A LETTER! = 0 points")
print('Your total score is: ' + str(score))
'''
5. An Armstrong number is a number that is the sum of its own digits each
raised to the power of the number of digits.
For example:
9 is an Armstrong number, because 9 = 9^1 = 9 10 is not an Armstrong number,
because 10 != 1^2 + 0^2 = 2 153 is an Armstrong number, because: 153 = 1^3 +
5^3 + 3^3 = 1 + 125 + 27 = 153 154 is not an Armstrong number, because: 154
!= 1^3 + 5^3 + 4^3 = 1 + 125 + 64 = 190 Write some code to determine whether
a number is an Armstrong number.
param: int
return: bool
'''
def armstrong(number):
number_string = str(number)
output = []
is_armstrong = False
sum = 0
number_of_digits = len(str(number))
for digit in number_string:
output = output + digit.split(' ')
digit = int(digit)
sum = sum + (digit**(number_of_digits))
if sum == number:
is_armstrong = True
print(str(is_armstrong) + ', THIS IS AN ARMSTRONG NUMBER!')
else:
print(str(is_armstrong) + ', NOT AN ARMSTRONG NUMBER!')
'''
6. Compute the prime factors of a given natural number.
A prime number is only evenly divisible by itself and 1.
Note that 1 is not a prime number.
EX: Prime numbe 52 and its factors [1, 2, 4, 13, 26, 52]
param: int
return: list
'''
def primeFactors(number):
factors = []
prime_factors = []
for divisible_num in range(1,number + 1):
if number % divisible_num == 0:
factors.append(divisible_num)
for factor in range(1,len(factors)):
if factor % 2 == 0:
prime_factors.append(factor)
# print(factors)
# print(prime_factors)
'''
7. Determine if a sentence is a pangram. A pangram (Greek: παν γράμμα, pan
gramma, "every letter") is a sentence using every letter of the alphabet at
least once. The best known English pangram is:
The quick brown fox jumps over the lazy dog.
The alphabet used consists of ASCII letters a to z, inclusive, and is case
insensitive. Input will not contain non-ASCII symbols.
param: str
return: bool
'''
def pangram(sentence):
alphabet = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'}
sentence_split = sentence.upper().replace(" ", "")
sentence = set()
for i in sentence_split:
sentence.add(i)
intersection = alphabet.intersection(sentence)
if len(intersection) == len(alphabet):
print('The sentence is a PANAGRAM')
else:
print('The sentence is NOT a PANAGRAM')
'''
8. Sort list of integers.
f([2,4,5,1,3,1]) = [1,1,2,3,4,5]
Rules:
- Do NOT sort it with .sort() or sorted(list) or any built-in tools.
- Sort it your own way
param: list
return: list
'''
def sort(numbers):
print('test')
'''
9. Create an implementation of the rotational cipher, also sometimes called
the Caesar cipher.
The Caesar cipher is a simple shift cipher that relies on transposing all the
letters in the alphabet using an integer key between 0 and 26. Using a key of
0 or 26 will always yield the same output due to modular arithmetic. The
letter is shifted for as many values as the value of the key.
The general notation for rotational ciphers is ROT + <key>. The most commonly
used rotational cipher is ROT13.
A ROT13 on the Latin alphabet would be as follows:
Plain: abcdefghijklmnopqrstuvwxyz Cipher: nopqrstuvwxyzabcdefghijklm It is
stronger than the Atbash cipher because it has 27 possible keys, and 25
usable keys.
Ciphertext is written out in the same formatting as the input including
spaces and punctuation.
Examples: ROT5 omg gives trl ROT0 c gives c ROT26 Cool gives Cool ROT13 The
quick brown fox jumps over the lazy dog. gives Gur dhvpx oebja sbk whzcf bire
gur ynml qbt. ROT13 Gur dhvpx oebja sbk whzcf bire gur ynml qbt. gives The
quick brown fox jumps over the lazy dog.
param: int, str
return: str
'''
def rotate(key, string):
print('test')
'''
10. Take 10 numbers as input from the user and store all the even numbers in a file called even.txt and
the odd numbers in a file called odd.txt.
param: none, from the keyboard
return: nothing
'''
def evenAndOdds():
print('test')
if __name__ == "__main__":
main()
|
992,793 | 01a5823a3825baabe3530817bc36d6e52856e378 | """
This scripts performs some basic data quality checks
to ensure that the transformed data was correctly loaded
Airflow will be used for orchestration
"""
import pandas as pd
import numpy as np
import os
import sys
import time
import pathlib
from datetime import datetime, timedelta
import configparser
import json
from functools import reduce
import logging
import boto3
import argparse
from pyspark import SparkContext
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql import functions as F
from pyspark.sql.functions import date_add
from pyspark.sql.types import (StructType as R,
StructField as Fld, DoubleType as Dbl, StringType as Str,
IntegerType as Int, DateType as Date, TimestampType as TimeStamp
)
DATE_FMT = datetime.strftime(datetime.today(), '%Y%m%d')
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# CFG_FILE = r'/usr/local/airflow/config/etl_config.cfg'
# CFG_FILE = "s3://immigrations-analytics1/config/etl_config.cfg"
# SAS_JAR = 'saurfang:spark-sas7bdat:3.0.0-s_2.12'
# SAS_JAR = "saurfang:spark-sas7bdat:2.0.0-s_2.11"
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def create_client(service, region, access_key_id, secret_access_key):
"""
Create client to access AWS resource
:params service - Any AWS service
:params region - AWS specific region
:params access_key_id - AWS credential
:params secret_access_key - AWS credential
Returns - A boto3 client
"""
client = boto3.client(service,
region_name=region,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key
)
return client
def create_spark_session():
"""
Build a Pyspark session
Returns - A Pyspark object
"""
try:
spark = (
SparkSession.builder
# .config("spark.hadoop.fs.s3a.awsAccessKeyId", os.environ['AWS_ACCESS_KEY_ID'])
# .config("spark.hadoop.fs.s3a.awsSecretAccessKey", os.environ['AWS_SECRET_ACCESS_KEY'])
.enableHiveSupport()
.getOrCreate()
)
# spark._jsc.hadoopConfiguration().set("fs.s3a.awsAccessKeyId", os.environ['AWS_ACCESS_KEY_ID'])
# spark._jsc.hadoopConfiguration().set("fs.s3a.awsSecretAccessKey", os.environ['AWS_SECRET_ACCESS_KEY'])
# spark._jsc.hadoopConfiguration().set("fs.s3a.impl","org.apache.hadoop.fs.s3a.S3AFileSystem")
# spark._jsc.hadoopConfiguration().set("com.amazonaws.services.s3.enableV4", "true")
# spark._jsc.hadoopConfiguration().set("fs.s3a.aws.credentials.provider","org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider")
# spark._jsc.hadoopConfiguration().set("fs.s3a.endpoint", "s3.amazonaws.com")
except Exception as e:
logger.error('Pyspark session failed to be created...')
raise
return spark
def check_empty_table(spark, df):
"""
Checks whether the given table is empty or not. It should
have at least 1 row
:params spark - A Pyspark object
:params df - A Pyspark DataFrame
Returns - Number of records
"""
return df.count()
def check_null_columns(spark, df, cols):
"""
Checks whether primary key columns have null value in them
:params spark - A Pyspark object
:params df - A Pyspark DataFrame
:params cols - A list of columns
Returns - A list of columns with null values
"""
null_cols_list = []
# this assumes the col list matches the original schema
# df = df.toDF(*cols)
try:
df = df.select(*cols)
col_null_count = df.select([F.count(F.when(F.isnan(col) | F.col(col).isNull(), col)).alias(col) for col in cols]).toPandas().to_dict()
null_cols_list = [k for k, v in col_null_count.items() if v[0] > 0]
except Exception as e:
logger.error('Probably and invalid column(s) was passed...')
return ['failed']
return null_cols_list
def enable_logging(log_dir, log_file):
"""
Enable logging across modules
:params log_dir - Location of the log directory
:params log_file - Base file name for the log
Returns - A FileHandler object
"""
# instantiate logging
file_handler = logging.FileHandler(os.path.join(log_dir, log_file + DATE_FMT + '.log'))
formatter = logging.Formatter(FORMAT)
file_handler.setFormatter(formatter)
return file_handler
def main():
"""
- Create a Pyspark object
- Read the SAS files
- Create the dimensional and fact DataFrames
- Write them into partitioned/non-partitioned Parquet/CSV formats
"""
t0 = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--env', default='LOCAL', help='Enter one of DOCKER, LOCAL or S3')
parser.add_argument('--bucket-name', help='Enter S3 bucket')
parser.add_argument('--aws-access-key-id', help='Enter AWS access key id')
parser.add_argument('--aws-secret-access-key', help='Enter AWS secrest access key')
parser.add_argument('--aws-region', default='us-west-2', help='Enter AWS region')
parser.add_argument('--tables', default='[]', type=json.loads, help='Enter list of tables to check')
parser.add_argument('--table-col', default='{}', type=json.loads, help='Enter list of tables to check')
# subparser = parser.add_subparsers(dest='subcommand', help='Can choose bucket name if S3 is chosen')
# parser_bucket = subparser.add_parser('S3')
# parser_bucket.add_argument('bucket', help='S3 bucket name')
args = vars(parser.parse_args())
args['env'] = args['env'].upper()
if args['env'] != 'S3' and args['bucket_name']:
parser.error('Can specify a bucket name with only S3...')
if args['env'] == 'S3' and not (args['bucket_name'] and
args['aws_access_key_id'] and
args['aws_secret_access_key']):
parser.error('Specify a bucket, access key and secret access key...')
raise
# print(args)
# print(args['env'])
# print(args['subcommand'])
if args['env'] == 'S3':
s3_client = create_client(
"s3",
region=args['aws_region'],
access_key_id=args['aws_access_key_id'],
secret_access_key=args['aws_secret_access_key']
)
os.environ['AWS_ACCESS_KEY_ID'] = args['aws_access_key_id'].strip()
os.environ['AWS_SECRET_ACCESS_KEY'] = args['aws_secret_access_key'].strip()
tables = args['tables']
table_col_dict = args['table_col']
config = configparser.ConfigParser()
if args['env'] == 'DOCKER':
CFG_FILE = r'/usr/local/airflow/config/etl_config.cfg'
try:
config.read(CFG_FILE)
except Exception as e:
print('Configuration file is missing or cannot be read...')
raise
elif args['env'] == 'S3':
obj = s3_client.get_object(Bucket=args['bucket_name'], Key='config/etl_config.cfg')
try:
config.read_string(obj['Body'].read().decode())
except Exception as e:
print('Configuration file is missing or cannot be read...')
raise
else:
CFG_FILE = r'/Users/home/Documents/dend/Data-Engineering-ND/Capstone/config/etl_config.cfg'
try:
config.read(CFG_FILE)
except Exception as e:
print('Configuration file is missing or cannot be read...')
raise
if args['env'] == 'DOCKER':
base_dir = config['DOCKER']['base_dir']
log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])
log_file = config['LOCAL']['dq_log_file']
output_dir = os.path.join(base_dir, config['DOCKER']['output_dir'])
elif args['env'] == 'S3':
bucket = args['bucket_name']
output_dir = config['S3']['s3_output_key']
output_dir = os.path.join("s3a//", bucket, output_dir)
else:
base_dir = config['LOCAL']['base_dir']
# log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])
# log_file = config['LOCAL']['log_file']
output_dir = os.path.join(base_dir, config['LOCAL']['output_dir'])
try:
# Log file written to Hadoop EMR env
base_dir = config['HADOOP']['base_dir']
log_dir = os.path.join(base_dir, config['HADOOP']['log_dir'])
log_file = config['HADOOP']['dq_log_file']
pathlib.Path(log_dir).mkdir(exist_ok=True)
file_handler = enable_logging(log_dir, log_file)
logger.addHandler(file_handler)
print("Create log dir if it doesn't exist...")
except:
base_dir = config['LOCAL']['base_dir']
log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])
log_file = config['LOCAL']['dq_log_file']
pathlib.Path(log_dir).mkdir(exist_ok=True)
file_handler = enable_logging(log_dir, log_file)
logger.addHandler(file_handler)
print("Create log dir if it doesn't exist...")
logger.info('Data quality check has started...')
spark = create_spark_session()
logger.info('Pyspark session created...')
logger.info("Check whether table exists...")
valid_tables = []
if args['env'] == 'S3':
for table in tables:
res = s3_client.list_objects(Bucket=bucket, Prefix=os.path.join(output_dir, table))
if 'Contents' in res:
valid_tables.append(table)
else:
logger.error(f'Table {table} is invalid...')
else:
for table in tables:
try:
if os.path.isdir(os.path.join(output_dir, table)):
valid_tables.append(table)
except Exception as e:
logger.error(f'Table {table} is invalid...')
logger.error(e)
# assume the table names are the same in the
# list and dict
if len(table_col_dict) > 0:
valid_table_cols = {table: table_col_dict[table] for table in valid_tables}
else:
valid_table_cols = {}
logger.info('Checking for empty Dataframes...')
if len(valid_tables) > 0:
for table in tables:
try:
df = spark.read.parquet(os.path.join(output_dir, table), header=True)
logger.info(f'Table {table} being checked is a parquet table')
except:
df = spark.read.csv(os.path.join(output_dir, table), header=True)
logger.info(f'Table {table} being checked is a csv table...')
if check_empty_table(spark, df) == 0:
logger.error(f'Table {table} has empty rows...')
else:
logger.info(f'Table {table} has at least 1 record...')
else:
logger.info('No tables to check...')
logger.info('Checking for null columns in tables...')
if len(valid_table_cols) > 0:
for table, col_list in table_col_dict.items():
try:
df = spark.read.parquet(os.path.join(output_dir, table), header=True)
logger.info(f'Table {table} being checked is a parquet table')
except:
df = spark.read.csv(os.path.join(output_dir, table), header=True)
logger.info(f'Table {table} being checked is a csv table...')
if len(check_null_columns(spark, df, col_list)) != 0 and check_null_columns(spark, df, col_list)[0] == 'failed':
logger.error('The null column check failed possibly due to invalid column selection...')
elif len(check_null_columns(spark, df, col_list)) > 0:
logger.info(f'Columns with nulls {col_list}')
logger.error(f'Table {table} has columns with null values...')
else:
logger.info(f'Table {table} has no null values in the primary key(s)...')
else:
logger.info('No table columns to check...')
logger.info('Data quality check has completed...')
logger.info('Time taken to complete job {} minutes'.format((time.time() - t0) / 60))
if __name__ == '__main__':
main()
|
992,794 | fb685c8d80c4a28a1bf7fed443d3429bb676add5 | # Various examples
# constants = {"pi": 3.14, "e": 2.71, "root 2": 1.41}
# print(constants)
# results = {'pass': 0, 'fail': '0'}
# print(results)
# results['withdrawl'] = 1
# print(results)
# results['pass'] = 3
# results['fail'] = results['fail'] + '1'
# print(results)
# print('Fail = {}'.format(type(results['fail'])))
# print('Pass = {}'.format(type(results['pass'])))
# print(results['pass'])
# print(results['fail'])
# print(results['withdrawl'])
# a = results.pop('fail')
# print(results)
# print('a = {}'.format(a))
# print('lunghezza dizionario: {} elementi'.format(len(results)))
# if 'pass' in results:
# print(results['pass'])
# if 1 in results.values():
# print(results.keys())
# while loop example
# Count how many times each string is entered by the user
counts = {}
# Loop until 5 distinct strings have been entered
while len(counts) < 5:
s = input("Enter a string: ")
# if s is already a key in the dictionary then increase its count by 1. Otherwise add s to the dictionary with
# a count of 1.
if s in counts:
counts[s] += 1
else:
counts[s] = 1
# Displays all of the strings and their counts
for k in counts:
print('"{}" occurred "{}" times'.format(k, counts[k]))
|
992,795 | 809b513956c33a5a149cc3410c9f8cf8a01859c2 | import pandas as pd
def checkForeignTables(query, path, database):
path += "\\dataDict-" + database + ".csv"
df = pd.read_csv(path)
for dic in query:
relationshipTable = dic['relationshipTable']
relationshipAttribute = dic['relationshipAttribute']
if relationshipTable != 'FALSE':
if ((df['table'] == relationshipTable) & (df[
'attribute'] == relationshipAttribute)).any(): # check if
# table and attribute exist in data dictionary
dummy = 1
else:
return False
return True
def createTableFile(query, path, database, tableName):
tableAttributes = []
for dic in query:
tableAttributes.append(dic['attributeN'])
df_table = pd.DataFrame(columns=tableAttributes) # save table file
csvFileName = path + "\\" + tableName
df_table.to_csv(csvFileName, index=False)
|
992,796 | 38a4b1639a9d75b4ff95317e3253e3e4c28c07d4 | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from typing import Union, TYPE_CHECKING
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
unset,
UnsetType,
)
if TYPE_CHECKING:
from datadog_api_client.v1.model.logs_filter import LogsFilter
class LogsCategoryProcessorCategory(ModelNormal):
@cached_property
def openapi_types(_):
from datadog_api_client.v1.model.logs_filter import LogsFilter
return {
"filter": (LogsFilter,),
"name": (str,),
}
attribute_map = {
"filter": "filter",
"name": "name",
}
def __init__(self_, filter: Union[LogsFilter, UnsetType] = unset, name: Union[str, UnsetType] = unset, **kwargs):
"""
Object describing the logs filter.
:param filter: Filter for logs.
:type filter: LogsFilter, optional
:param name: Value to assign to the target attribute.
:type name: str, optional
"""
if filter is not unset:
kwargs["filter"] = filter
if name is not unset:
kwargs["name"] = name
super().__init__(kwargs)
|
992,797 | 31287674a6974825d45bb32389eb17c9447007ab | """
항구에 들어오는 배
민석이는 항구가 있는 작은 마을에 산다.
이 항구에는 배가 아주 드물게 지나다닌다.
민석이는 어느날 모든 배들이 항구에 들어온 것을 보았다.
민석이는 이 날을 1일차로 지정하였다.
민석이는 배가 한 척이라도 항구에 들렀던 날을 “즐거운 날"로 이름짓고, 1일차부터 즐거운 날들을 모두 기록하였다.
그러던 중, 한 가지 규칙을 발견했는데, 그 규칙은 각 배들은 항구에 주기적으로 들른다는 것이었다.
예를 들어, 주기가 3인 배는 항구에 1일차, 4일차, 7일차, 10일차 등에 방문하게 된다.
민석이가 1일차부터 기록한 “즐거운 날"들의 목록이 주어질 때, 항구에 들렀던 배의 최소 수를 알아내자.
이 때, 항상 답이 존재하는 입력만 주어진다.
[입력]
첫 번째 줄에 테스트 케이스의 수 T가 주어진다.
각 테스트 케이스의 첫 번째 줄에 즐거운 날의 수 N이 주어진다. (2 ≤ N ≤ 5000)
각 테스트 케이스의 두 번째 줄부터 N개의 줄에 걸쳐 즐거운 날의 정보가 오름차순으로 정렬되어 주어진다.
시작하는 날은 항상 1일이고, 마지막 날은 109보다 작은 값이다.
[출력]
각 테스트 케이스마다 항구에 들렀던 배의 최소 수를 출력한다.
입력
3
3
1
3
4
5
1
7
10
13
19
3
1
500000000
999999999
출력
#1 2
#2 2
#3 1
"""
import sys
sys.stdin = open('input.txt', 'r')
T = int(input())
for tc in range(1, T+1):
N = int(input())
happy = []
for i in range(N):
happy.append(int(input()))
cnt = 0
# for i in range(1, len(happy)):
# if happy[i] != 0:
# tmp = happy[i] - happy[0]
# cnt += 1
# for j in range(1, len(happy)-i):
# if happy[i] + tmp*j in happy:
# happy[happy.index(happy[i] + tmp*j)] = 0
# while len(happy) != 1:
# tmp = happy[1] - happy[0]
# cnt += 1
# for i in range(1, len(happy)):
# if happy[0] + tmp*i in happy:
# happy.remove(happy[0] + tmp*i)
# while len(happy) != 1:
# cnt += 1
# tmp = happy[1] - 1
# t = happy[1]
# while t in happy:
# happy.remove(t)
# if not t in happy:
# t += tmp
idx = 1
r = []
while N > 1:
if idx == 1:
tmp = happy[idx] - 1
r.append(tmp)
idx += 1
cnt += 1
else:
tmp = happy[idx] - 1
flag = 0
for i in r:
if tmp % i == 0:
flag = 1
break
if flag == 0:
r.append(tmp)
cnt += 1
idx += 1
N -= 1
print(f"#{tc} {cnt}") |
992,798 | 15e51f3c4d661ed888e5323ec64f91c661bebc82 | import requests
import json
import sys
SerialNumber = sys.argv[1]
file = "/vault/data_collection/test_station_config/gh_station_info.json"
def read_json_config(file):
with open(file) as json_file:
config = json.load(json_file)
return config
stationInfo = read_json_config(file)
shopfloorURL = stationInfo["ghinfo"]["SFC_IP"]
Tsid = stationInfo["ghinfo"]["STATION_ID"]
URL = "http://" + shopfloorURL + " /BobcatService.svc/request?sn=" + SerialNumber + "&p=unit_process_check&c=QUERY_RECORD&tsid=" + Tsid
res = requests.get(url=URL)
print(res.text)
|
992,799 | 38a3c3acaadcf15de99fa619a50d64685e734272 | def hello_world():
print("Hello World from Python")
def calEMI():
print("EMI is 20K")
def homeEMI():
print("Home EMI is 50K")
def personalLoanEMI():
print("Personal Loan EMI**") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.