seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
34825925053 | # todo: add hash sum to judge solution file name on web-app side cuz it can break everything
import os
import shutil
import subprocess
from typing import List
import source.models
from .service import sequence_to_dict
from .static import *
from .config import BUILD_SOURCE_MAX_TIME, SQL_GET_TASK_ATTRIBUTE, SQL_GET_CODE_FILE
class TaskManager(object):
env_dir: str = 'environment'
input_file_name: str = 'input.in'
output_file_name: str = 'output.out'
def __init__(self, event: source.models.Event):
self.working_dir = os.getcwd()
self.solution = event.solution
self.code_file: source.models.CodeFile = event.code_file
self.tests = event.tests
self.judge_solution_source: source.models.CodeFile = source.models.CodeFile()
def prepare_environment(self):
env_path: str = os.path.join(self.working_dir, self.env_dir)
if os.path.exists(env_path):
shutil.rmtree(env_path)
os.mkdir(env_path)
code_file: str = os.path.join(env_path, self.code_file.file_name)
input_file: str = os.path.join(env_path, self.input_file_name)
output_file: str = os.path.join(env_path, self.output_file_name)
__code = open(code_file, write_mode)
__code.write(self.code_file.code)
__code.close()
__input = open(input_file, write_mode)
__input.close()
__output = open(output_file, write_mode)
__output.close()
db = source.models.DataBase()
db.execute(SQL_GET_CODE_FILE, self.__get_task_solution_file_id__())
code_file_attributes = {}
for result in db.result():
code_file_attributes = sequence_to_dict(result, class_CodeFile_attributes)
code_file_obj: source.models.CodeFile = source.models.CodeFile(**code_file_attributes)
__judge_code = open(os.path.join(self.working_dir, self.env_dir, code_file_obj.file_name), write_mode)
__judge_code.write(code_file_obj.code)
__judge_code.close()
self.judge_solution_source = code_file_obj
print(os.listdir(env_path))
# noinspection DuplicatedCode
def check_solution_event(self):
self.prepare_environment()
if self.__build__() != 0:
self.solution.__update__('verdict', source.models.Verdict.BUILD_FAILED)
self.solution.__update__('verdict_text', 'Ошибка компиляции')
return
_1 = BuildHandler.executable_file_name
_2 = BuildHandler.build_log_file_name
BuildHandler.executable_file_name = BuildHandler.executable_judge_file_name
BuildHandler.build_log_file_name = BuildHandler.judge_build_log_file_name
self.build_judge_solution(path=os.path.join(self.working_dir,
self.env_dir,
self.judge_solution_source.file_name),
lang=self.judge_solution_source.language)
BuildHandler.executable_file_name = _1
BuildHandler.build_log_file_name = _2
correct_tests_number: int = 0
points: int = 0
grading_system = None
for test_number, test in enumerate(self.tests):
input_file = open(os.path.join(self.env_dir, self.input_file_name), write_mode)
input_file.write(test.content)
input_file.close()
print(f'Running on test {test_number + 1}')
exec_code = self.__execute__(test_number=test_number + 1)
print('execute code', exec_code)
if exec_code > 0:
self.solution.__update__('verdict', source.models.Verdict.RUNTIME_ERROR)
self.solution.__update__('verdict_text', f'Ошибка исполнения на тесте {test_number + 1}')
return
elif exec_code < 0:
self.solution.__update__('verdict', source.models.Verdict.TIME_LIMIT_ERROR)
self.solution.__update__('verdict_text', f'Превышен лимит времени на тесте {test_number + 1}')
return
output_file = open(os.path.join(self.env_dir, self.output_file_name), read_mode)
user_output = output_file.readlines()
output_file.close()
answer_type = self.__get_task_answer_type__()
grading_system = self.__get_task_grading_system__()
if answer_type == source.models.TaskAnswerType.CONSTANT_ANSWER:
if self.is_constant_answer_valid(user_output=user_output, test_number=test_number):
correct_tests_number += 1
elif answer_type == source.models.TaskAnswerType.VARIABLE_ANSWER:
judge_verdict: int = self.is_variable_answer_valid(user_output=user_output, test_number=test_number)
if grading_system == source.models.TaskGradingSystem.BINARY:
correct_tests_number += 1 if judge_verdict > 0 else 0
elif grading_system == source.models.TaskGradingSystem.BINARY_FOR_EACH_TEST:
correct_tests_number += 1 if judge_verdict > 0 else 0
elif grading_system == source.models.TaskGradingSystem.N_POINTS_FOR_EACH_TEST:
correct_tests_number += 1 if judge_verdict > 0 else 0
points += judge_verdict
if grading_system == source.models.TaskGradingSystem.BINARY:
self.solution.__update__('points', 1 if correct_tests_number == len(self.tests) else 0)
if correct_tests_number == len(self.tests):
self.solution.__update__('verdict', source.models.Verdict.CORRECT_SOLUTION)
self.solution.__update__('verdict_text', f'Все тесты пройдены')
else:
self.solution.__update__('verdict', source.models.Verdict.WRONG_ANSWER)
else:
if correct_tests_number == len(self.tests):
self.solution.__update__('verdict', source.models.Verdict.CORRECT_SOLUTION)
self.solution.__update__('verdict_text', f'Все тесты пройдены')
elif 0 < correct_tests_number < len(self.tests):
self.solution.__update__('verdict', source.models.Verdict.PARTIAL_SOLUTION)
elif correct_tests_number == 0:
self.solution.__update__('verdict', source.models.Verdict.WRONG_ANSWER)
if grading_system == source.models.TaskGradingSystem.BINARY_FOR_EACH_TEST:
self.solution.__update__('points', correct_tests_number)
else:
self.solution.__update__('points', points)
# noinspection DuplicatedCode
def validate_task_event(self):
self.solution.__update__('status', source.models.Status.IN_PROGRESS)
self.prepare_environment()
if self.__build__() != 0:
self.solution.__update__('verdict', source.models.Verdict.BUILD_FAILED)
self.solution.__update__('status', source.models.Status.CHECK_FAILED)
return
for test_number, test in enumerate(self.tests):
input_file = open(os.path.join(self.env_dir, self.input_file_name), write_mode)
input_file.write(test.content)
input_file.close()
print(f'Running on test {test_number + 1}')
exec_code = self.__execute__(test_number=test_number)
if exec_code > 0:
self.solution.__update__('verdict', source.models.Verdict.RUNTIME_ERROR)
self.solution.__update__('status', source.models.Status.CHECK_FAILED)
return
elif exec_code < 0:
self.solution.__update__('verdict', source.models.Verdict.TIME_LIMIT_ERROR)
self.solution.__update__('status', source.models.Status.CHECK_FAILED)
return
output_file = open(os.path.join(self.env_dir, self.output_file_name), read_mode)
judge_output = output_file.read()
output_file.close()
test.__update__('right_answer', judge_output)
self.solution.__update__('verdict', source.models.Verdict.CORRECT_SOLUTION)
self.solution.__update__('status', source.models.Status.CHECK_SUCCESS)
def __get_task_grading_system__(self):
return source.models.Task.get_attribute('grading_system', self.solution.task_id)
def __get_task_time_limit__(self):
return source.models.Task.get_attribute('time_limit_seconds', self.solution.task_id)
def __get_task_answer_type__(self):
return source.models.Task.get_attribute('answer_type', self.solution.task_id)
def __get_task_solution_file_id__(self):
return source.models.Task.get_attribute('solution_file_id', self.solution.task_id)
def build_judge_solution(self, path, lang):
return self.__build__(path=path, lang=lang, )
def __build__(self, **kwargs) -> 'Return code':
""":param kwargs - can contain 'lang' and 'path' """
print('lang', 0 if 'lang' not in kwargs else kwargs['lang'])
if BuildHandler.get_execution_type(
self.code_file.language
if 'lang' not in kwargs else kwargs['lang']
) == source.models.CodeExecuteType.BUILD_AND_RUN:
build_handler = BuildHandler(source_file_path=os.path.join(self.working_dir,
self.env_dir,
self.code_file.file_name
if 'path' not in kwargs else kwargs['path']),
language=self.code_file.language
if 'lang' not in kwargs else kwargs['lang'])
return build_handler.build()
else:
return 0
def __execute__(self, test_number: int):
execute_handler = ExecuteHandler(executable_file_path=self.get_execute_path(self.code_file.language),
language=self.code_file.language,
time_limit=self.__get_task_time_limit__(),
test_number=test_number)
return execute_handler.execute()
def is_constant_answer_valid(self, user_output, test_number: int) -> bool:
print('right ans:\n', TaskManager.string_to_array(
self.tests[test_number].right_answer))
print('user ans: \n', user_output)
if TaskManager.handle_output_array(
TaskManager.string_to_array(
self.tests[test_number].right_answer)) == TaskManager.handle_output_array(user_output):
return True
return False
def is_variable_answer_valid(self, user_output, test_number: int) -> int or None:
judge_execution_handler: ExecuteHandler = ExecuteHandler(
executable_file_path=os.path.join(self.env_dir,
BuildHandler.executable_judge_file_name),
language=self.judge_solution_source.language,
time_limit=2,
test_number=test_number)
user_out: str = open(f'{os.path.join(self.working_dir, self.env_dir, self.output_file_name)}', read_mode).read()
test_content: str = self.tests[test_number].content
print('user output:', user_out)
judge_input_content: str = f'{test_content}\n{user_out}'
print('judge input:\n=====\n', judge_input_content, '\n=====')
__input = open(f'{os.path.join(self.working_dir, self.env_dir, self.input_file_name)}', write_mode)
__input.write(judge_input_content)
__input.close()
if judge_execution_handler.execute() != 0:
return 0
output = open(os.path.join(self.working_dir, self.env_dir, self.output_file_name), read_mode)
print('Judge output:', output.read())
output.seek(0)
return int(output.read())
@staticmethod
def string_to_array(string) -> List[str]:
result: list = []
for line in string.split('\n'):
result.append(line)
return result
@staticmethod
def string_drop_special_symbols(string: str) -> str:
return string.replace('\n', '').replace('\r', '').replace('\t', '')
@staticmethod
def handle_output_array(array: List[str] or List[List[str]]) -> List[List[str]]:
for i in range(len(array)):
array[i] = TaskManager.string_drop_special_symbols(array[i]).split()
return array
# noinspection DuplicatedCode
def get_execute_path(self, language: source.models.Language):
execute_path = {
source.models.Language.GNU_ASM: self.get_gnu_exe_path,
source.models.Language.GNU_C99: self.get_gnu_exe_path,
source.models.Language.GNU_C11: self.get_gnu_exe_path,
source.models.Language.GNU_CXX_11: self.get_gnu_exe_path,
source.models.Language.GNU_CXX_14: self.get_gnu_exe_path,
source.models.Language.GNU_CXX_17: self.get_gnu_exe_path,
source.models.Language.GNU_CXX_20: self.get_gnu_exe_path,
source.models.Language.PYTHON_2_7: self.get_absolute_path,
source.models.Language.PYTHON_3_9: self.get_absolute_path,
source.models.Language.JAVA_8: self.get_class_name,
}
try:
return execute_path[language]()
except KeyError:
return None
def get_absolute_path(self):
return os.path.join(self.working_dir, self.env_dir, self.code_file.file_name)
def get_gnu_exe_path(self):
return os.path.join(self.env_dir, BuildHandler.executable_file_name)
def get_class_name(self):
return self.code_file.file_name.split('.')[0]
class BuildHandler(object):
executable_file_name: str = 'solution_executable'
executable_judge_file_name: str = 'judge_solution_executable'
build_log_file_name: str = 'build_log.out'
judge_build_log_file_name: str = 'judge_build_log.out'
def __init__(self, source_file_path: str, language: source.models.Language):
print('build init')
self.working_dir = os.getcwd()
self.source_file = source_file_path
self.language = language
@staticmethod
def get_execution_type(language: source.models.Language):
print('get_execution_type')
lang_info = {
source.models.Language.GNU_ASM: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.GNU_C99: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.GNU_C11: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.GNU_CXX_11: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.GNU_CXX_14: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.GNU_CXX_17: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.GNU_CXX_20: source.models.CodeExecuteType.BUILD_AND_RUN,
source.models.Language.PYTHON_2_7: source.models.CodeExecuteType.JUST_RUN,
source.models.Language.PYTHON_3_9: source.models.CodeExecuteType.JUST_RUN,
source.models.Language.JAVA_8: source.models.CodeExecuteType.BUILD_AND_RUN,
}
try:
return lang_info[language]
except KeyError:
return None
def build(self) -> 'Return code':
print('build')
build_command: str = self.get_build_command(source_path=self.source_file,
exe_path=os.path.join(
self.working_dir,
TaskManager.env_dir,
self.executable_file_name
),
language=self.language)
print()
print(build_command)
build_process = subprocess.Popen(
build_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
try:
build_process.wait(BUILD_SOURCE_MAX_TIME)
except subprocess.TimeoutExpired:
return 1
log = open(os.path.join(self.working_dir,
TaskManager.env_dir,
self.build_log_file_name), 'a')
log.write(build_process.communicate()[0].decode('utf-8'))
log.write(build_process.communicate()[1].decode('utf-8'))
log.close()
print('code is', build_process.poll())
return build_process.poll()
def get_build_command(self, source_path: str,
exe_path: str,
language: source.models.Language):
print('get_build_command')
build_command = {
source.models.Language.GNU_ASM: self.gbc_gnu_asm,
source.models.Language.GNU_C99: self.gbc_gnu_gcc_c99,
source.models.Language.GNU_C11: self.gbc_gnu_gcc_c11,
source.models.Language.GNU_CXX_11: self.gbc_gnu_gxx_cxx11,
source.models.Language.GNU_CXX_14: self.gbc_gnu_gxx_cxx14,
source.models.Language.GNU_CXX_17: self.gbc_gnu_gxx_cxx17,
source.models.Language.GNU_CXX_20: self.gbc_gnu_gxx_cxx20,
source.models.Language.JAVA_8: self.gbc_java8,
}
try:
return build_command[language](source_path=source_path,
exe_path=exe_path,
log=os.path.join(self.working_dir,
TaskManager.env_dir,
self.build_log_file_name))
except KeyError:
print('key error')
return None
@staticmethod
def gbc_gnu_asm(source_path: str, exe_path: str, log: str):
return f'gcc -s {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_gnu_gcc_c99(source_path: str, exe_path: str, log: str):
return f'gcc -std=c99 {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_gnu_gcc_c11(source_path: str, exe_path: str, log: str):
return f'gcc -std=c11 {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_gnu_gxx_cxx11(source_path: str, exe_path: str, log: str):
return f'g++ -std=c++11 {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_gnu_gxx_cxx14(source_path: str, exe_path: str, log: str):
return f'g++ -std=c++14 {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_gnu_gxx_cxx17(source_path: str, exe_path: str, log: str):
return f'g++ -std=c++17 {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_gnu_gxx_cxx20(source_path: str, exe_path: str, log: str):
return f'g++ -std=c++2a {source_path} -o {exe_path} -Wall -v -O3 -Ofast > {log}'
@staticmethod
def gbc_java8(source_path: str, exe_path: str, log: str):
return f'javac -cp "{os.path.join(os.getcwd(), TaskManager.env_dir)}" {source_path} > {log}'
class ExecuteHandler(object):
execute_log_file_name: str = 'execute_log.out'
def __init__(self, executable_file_path: str,
language: source.models.Language,
time_limit: int,
test_number: int):
self.executable_path = executable_file_path
self.language = language
self.time_limit = time_limit
self.test_number = test_number
try:
self.executable_class = executable_file_path.split('/')[-1].split('.')[0]
print(self.executable_class)
except IndexError:
pass
def get_execute_command(self):
execute_command: dict = {
source.models.Language.GNU_ASM: self.gec_gnu_asm,
source.models.Language.GNU_C99: self.gec_gnu_gcc_c99,
source.models.Language.GNU_C11: self.gec_gnu_gcc_c11,
source.models.Language.GNU_CXX_11: self.gec_gnu_gxx_cxx11,
source.models.Language.GNU_CXX_14: self.gec_gnu_gxx_cxx14,
source.models.Language.GNU_CXX_17: self.gec_gnu_gxx_cxx17,
source.models.Language.GNU_CXX_20: self.gec_gnu_gxx_cxx20,
source.models.Language.PYTHON_2_7: self.gec_python2,
source.models.Language.PYTHON_3_9: self.gec_python3,
source.models.Language.JAVA_8: self.gec_java8,
}
try:
return execute_command[self.language](self.executable_path)
except KeyError:
return None
@staticmethod
def get_iostream_route():
wd: str = os.getcwd()
env: str = TaskManager.env_dir
in_s: str = TaskManager.input_file_name
out_s: str = TaskManager.output_file_name
return f' < {os.path.join(wd, env, in_s)} > {os.path.join(wd, env, out_s)}'
@staticmethod
def gec_gnu_asm(executable_path: str):
return f'.{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_gnu_gcc_c99(executable_path: str):
return f'./{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_gnu_gcc_c11(executable_path: str):
return f'./{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_gnu_gxx_cxx11(executable_path: str):
return f'./{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_gnu_gxx_cxx14(executable_path: str):
return f'./{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_gnu_gxx_cxx17(executable_path: str):
return f'./{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_gnu_gxx_cxx20(executable_path: str):
return f'./{executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_python2(executable_path: str):
return f'python2 {executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_python3(executable_path: str):
return f'python3 {executable_path}' + ExecuteHandler.get_iostream_route()
@staticmethod
def gec_java8(executable_path: str, **kwargs):
env_dir_path: str = os.path.join(os.getcwd(), TaskManager.env_dir)
return f'java -cp "{env_dir_path}/:{env_dir_path}/*" {executable_path}' + ExecuteHandler.get_iostream_route()
def execute(self):
execute_command: str = self.get_execute_command()
print(execute_command)
print(os.listdir(os.path.join(os.getcwd(), TaskManager.env_dir)))
execute_process = subprocess.Popen(execute_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
time_limit_expired: bool = False
try:
execute_process.wait(self.time_limit)
except subprocess.TimeoutExpired:
execute_process.terminate()
execute_process.kill()
print('Time limit exceeded!')
print(execute_process.pid)
return -1
status = execute_process.poll()
print('status is', status)
execute_process.kill()
print(time_limit_expired)
stdout, stderr = execute_process.communicate()
log = open(
os.path.join(os.getcwd(),
TaskManager.env_dir,
f'test_{self.test_number}_' + self.execute_log_file_name), 'a')
log.write(stdout.decode('utf-8'))
log.write(stderr.decode('utf-8'))
log.close()
return status
| TolimanStaR/AtomicJudge | source/task_manager.py | task_manager.py | py | 24,126 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "source.models.models",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "source.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "source.models.model... |
27773611300 | import requests
import os
from telepyrobot.setclient import TelePyroBot
from pyrogram import filters
from pyrogram.types import Message
from telepyrobot import COMMAND_HAND_LER
__PLUGIN__ = os.path.basename(__file__.replace(".py", ""))
__help__ = f"""
Url Shortner Plugin for https://da.gd
**Usage:**
`{COMMAND_HAND_LER}short <long-link>`: Will return shortlink of the long-link.
`{COMMAND_HAND_LER}unshort <shortlink>`: Will return long url of the shortlink.
"""
@TelePyroBot.on_message(filters.command("short", COMMAND_HAND_LER) & filters.me)
async def short_link(c: TelePyroBot, m: Message):
input_str = m.text.split(None, 1)[1]
sample_url = f"https://da.gd/s?url={input_str}"
response_api = requests.get(sample_url).text
if response_api:
await m.edit_text(f"**Generated Link:**\n {response_api} for {input_str}.")
else:
await m.edit_text("something is wrong. please try again later.")
@TelePyroBot.on_message(filters.command("unshort", COMMAND_HAND_LER) & filters.me)
async def unshort_link(c: TelePyroBot, m: Message):
input_str = m.text.split(None, 1)[1]
if not input_str.startswith("http"):
input_str = "http://" + input_str
if not input_str.startswith("http://da.gd"):
await m.edit_text("`I can only unshort da.gd links`")
r = requests.get(input_str, allow_redirects=False)
if str(r.status_code).startswith("3"):
await m.edit_text(
f"Input URL: {input_str}\nReDirected URL: {r.headers['Location']}"
)
else:
await m.edit_text(f"Input URL {input_str} returned status_code {r.status_code}")
| Divkix/TelePyroBot | telepyrobot/plugins/url_shortner.py | url_shortner.py | py | 1,615 | python | en | code | 40 | github-code | 6 | [
{
"api_name": "os.path.basename",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "telepyrobot.COMMAND_HAND_LER",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "telepyrobot.... |
8829027988 | ########################################################
# Rodrigo Leite - drigols #
# Last update: 21/09/2021 #
########################################################
def OLS(dic):
from matplotlib import pyplot as plt
import pandas as pd
df = pd.DataFrame(dic)
df['(x_i - x_mean)'] = df['Grade'] - df['Grade'].mean()
df['(y_i - y_mean)'] = df['Salary'] - df['Salary'].mean()
df['(x_i - x_mean)(y_i - y_mean)'] = df['(x_i - x_mean)'] * df['(y_i - y_mean)']
df['(x_i - x_mean)^2'] = (df['Grade'] - df['Grade'].mean())**2
m = (sum(df['(x_i - x_mean)'] * df['(y_i - y_mean)'])) / sum(df['(x_i - x_mean)^2'])
b = df['Salary'].mean() - (m * df['Grade'].mean())
print("Angular Coefficient (m): {0}\nLinear Coefficient (b): {1}".format(round(m), round(b)))
regression_line = [(m*x) + b for x in df['Grade']]
plt.figure(figsize=(10, 7))
plt.scatter(df.Grade, df.Salary, color='g')
plt.plot(df.Grade, regression_line, color='b')
plt.title('Grades vs Salaries | Ordinary Least Squares: OLS')
plt.xlabel('Grade')
plt.ylabel('Salary')
plt.grid()
plt.savefig('../images/plot-02.png', format='png')
plt.show()
if __name__ =="__main__":
students_dic = {
'Grade':[50, 50, 46, 95, 50, 5, 57, 42, 26, 72, 78, 60, 40, 17, 85],
'Salary':[50000, 54000, 50000, 189000, 55000, 40000, 59000, 42000, 47000, 78000, 119000, 95000, 49000, 29000, 130000]
}
OLS(students_dic)
| drigols/studies | modules/ai-codes/modules/linear-regression/src/students_ols_bestLineFit.py | students_ols_bestLineFit.py | py | 1,511 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.DataFrame",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotli... |
41745621937 | import os
import sqlite3
from bs4 import BeautifulSoup
def scan_folder(parentfile, diff):
for file_name in os.listdir(parentfile):
if "_" in file_name:
diff = eachfile(file_name, parentfile, diff)
else:
current_path = "".join((parentfile, "/", file_name))
if os.path.isdir(current_path):
scan_folder(current_path, diff)
return diff
def eachfile(file_name,parentfile, diff):
conn = sqlite3.connect('healthReviewsChangeDB.db')
cur = conn.cursor()
dbcol = ''
dbTable = ''
story_ID = file_name[:5]
filename =file_name[6:]
if "About" in filename:
dbcol = 'About'
dbTable = 'Review'
elif "Activity" in filename:
dbcol = 'Activity'
dbTable = 'Review'
elif filename == 'Date':
dbcol = 'StoryTime'
dbTable = 'Review'
elif "Feel_Tag" in filename:
dbcol = 'feelTag'
dbTable = 'Review'
elif "Good_Tag" in filename:
dbcol = 'goodTag'
dbTable = 'Review'
elif "Improved_Tag" in filename:
dbcol = 'improvedTag'
dbTable = 'Review'
elif "Progress" in filename:
dbcol = 'Progress'
dbTable = 'Review'
elif "Similar" in filename:
dbcol = 'similarTag'
dbTable = 'Review'
elif "Story" in filename:
dbcol = 'Story'
dbTable = 'Review'
elif "Title" in filename:
dbcol = 'Title'
dbTable = 'Review'
elif "Username" in filename:
dbcol = 'Username'
dbTable = 'Review'
elif filename.endswith("Response"):
dbcol = 'Response'
story_ID = filename[:5]
dbTable = 'Response'
elif "Response_Header" in filename:
dbcol = 'ResponseInfo'
story_ID = filename[:5]
dbTable = 'Response'
elif "Response_Time" in filename:
dbcol = 'ResponseTime'
story_ID = filename[:5]
dbTable = 'Response'
elif filename.endswith("Update"):
dbcol = 'UpdateText'
story_ID = filename[:5]
dbTable = 'userUpdates'
elif "Update_date" in filename:
dbcol = 'updateTime'
story_ID = filename[:5]
dbTable = 'userUpdates'
exeStat = "SELECT "+ dbcol+ " FROM "+dbTable+" WHERE StoryID IS "+ story_ID +";"
AllDBcontent = cur.execute(exeStat)
for eachcontent in AllDBcontent:
with open(parentfile+"/"+file_name, 'r') as reader:
content = reader.read()
if eachcontent[0] != content:
diff.append(file_name)
cur.close()
return diff
def main():
parentfile = "/mnt/c/Users/Juju/DB/project/webscrape/realScrape"
diff = []
scan_folder(parentfile, diff)
if diff:
print("The file name that the content is different from the Database:", diff)
print("Number of files: ", str(len(diff)))
else:
print("Test OK")
if __name__ == "__main__":
main()
| 22650684/Webscraping-Project | testing/dbMatchFile.py | dbMatchFile.py | py | 3,089 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_numb... |
73118020027 | import os
import torch
import matplotlib.pyplot as plt
from config.config import cfg
def get_supports(m):
"""
Returns the number of samples and the percentage of support for each activity in the ground truth data of a given dataset.
Args:
- m (str): the name of the dataset
Returns:
- supports (dict): a dictionary containing the number of samples for each activity in the ground truth data
- supports_perc (dict): a dictionary containing the percentage of support for each activity in the ground truth data
"""
c = cfg()
cfg_ = c.get_config()
dataset_path = cfg_.dataset_path
num_activities = 24
supports = { i : 0 for i in range(num_activities) }
path_gt = os.path.join(dataset_path, m, f"gt_{m}.pt")
data_gt = torch.load(path_gt)
for label in data_gt:
supports[torch.argmax(label).item()] += 1
supports_perc = { i : round(supports[i]/len(data_gt),4) for i in range(num_activities) }
print(f"{len(data_gt)} samples.\n")
return supports, supports_perc
def ig_stats(m):
"""
Computes statistics about the instance graphs in the specified dataset split.
Args:
m (str): The dataset split to compute statistics for. Must be one of "train", "valid", or "test".
Returns:
None
"""
file = m + ".g"
if m == "train":
file = "training.g"
with open('_data/' + file, 'r') as f:
contents = f.read()
graphs = contents.split('\n\n')
print(f"Number of instance graphs: {len(graphs)}")
num_nodes_per_graph = [0 for _ in range(len(graphs))]
for n, graph in enumerate(graphs):
graph = graph[3:]
graph_elems = graph.split('\n') #graph_elems = array di stringhe; ogni stringa è un elemento di un grafo
for elem in graph_elems:
if elem[0] == 'v':
num_nodes_per_graph[n] += 1
print(f"Mean number of nodes per ig: {sum(num_nodes_per_graph)/len(num_nodes_per_graph):.2f}")
print(f"Standard deviation of number of nodes per ig: {torch.std(torch.tensor(num_nodes_per_graph, dtype = torch.float64)):.2f}")
print(f"Min number of nodes per ig: {min(num_nodes_per_graph)}")
print(f"Max number of nodes per ig: {max(num_nodes_per_graph)}")
if __name__ == '__main__':
modes = ["train","val", "test"]
plot_supports = True
for m in modes:
print(f"{m.capitalize()} dataset")
ig_stats(m)
print("After preprocessing:", end = " ")
supports, supports_perc = get_supports(m)
if plot_supports:
"""
plot barchart of supports percentages per class with 24 classes.
Each bar is labeled with value of support per class and the class number
Set the title with the mode name
"""
plt.figure()
plt.bar(range(24), supports_perc.values())
plt.xlabel("Class")
plt.ylabel("Support %")
plt.xticks(range(24))
for i, v in enumerate(supports_perc.values()):
plt.text(i-0.25, v + 0.005, f"{(v*100):.2f}", color='black', fontweight='bold', size = 'small')
plt.title(f"Support percentage per class in {m} dataset")
plt.show()
plt.figure()
plt.bar(range(24), supports.values())
plt.xlabel("Class")
plt.ylabel("Support")
plt.xticks(range(24))
for i, v in enumerate(supports.values()):
plt.text(i-0.25, v + 10, f"{v}", color='black', fontweight='bold', size = 'small')
plt.title(f"Support per class in {m} dataset")
plt.show() | Vito-Scaraggi/mpgnnap | data_info.py | data_info.py | py | 3,704 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "config.config.cfg",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_nu... |
1883485650 | import sys
import os
import pefile
# Imprime as seções de um executável
def print_sections(directory, executable):
pe = pefile.PE(executable if directory is None else directory + "/" + executable)
sections = []
for section in pe.sections:
sections.append(section.Name.decode('utf-8'))
print(executable + ": [", end="")
for i, section in enumerate(sections):
print("'{}'".format(section), end="")
if i < len(sections) - 1:
print(', ', end="")
print("]")
# Checa se o argumento de entrada é um arquivo ou uma pasta
if os.path.isdir(sys.argv[1]):
for filename in os.listdir(sys.argv[1]):
if filename.endswith(".exe"):
print_sections(sys.argv[1], filename)
else:
print_sections(None, sys.argv[1])
| kkatzer/CDadosSeg | T2/Parte2/T2P2a.py | T2P2a.py | py | 791 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pefile.PE",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 23,... |
19773909067 | # -*- coding: utf-8 -*-
"""
IBEIS CORE
Defines the core dependency cache supported by the image analysis api
Extracts annotation chips from imaages and applies optional image
normalizations.
TODO:
* interactive callback functions
* detection interface
* identification interface
NOTES:
HOW TO DESIGN INTERACTIVE PLOTS:
decorate as interactive
depc.get_property(recompute=True)
instead of calling preproc as a generator and then adding,
calls preproc and passes in a callback function.
preproc spawns interaction and must call callback function when finished.
callback function adds the rowids to the table.
Needed Tables:
Chip
NormChip
Feats
Keypoints
Descriptors
ProbChip
IdentifyQuery
NeighborIndex
QualityClassifier
ViewpointClassifier
CommandLine:
python -m ibeis.control.IBEISControl --test-show_depc_annot_graph --show
Setup:
>>> from ibeis.core_annots import * # NOQA
>>> import ibeis
>>> import plottool as pt
>>> ibs = ibeis.opendb('testdb1')
>>> depc = ibs.depc_annot
>>> aid_list = ibs.get_valid_aids()[0:2]
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from six.moves import zip
import dtool
import utool as ut
import vtool as vt
import numpy as np
import cv2
from ibeis.control.controller_inject import register_preprocs, register_subprops
from ibeis.algo.hots.chip_match import ChipMatch
from ibeis.algo.hots import neighbor_index
(print, rrr, profile) = ut.inject2(__name__, '[core_annots]')
derived_attribute = register_preprocs['annot']
register_subprop = register_subprops['annot']
# dtool.Config.register_func = derived_attribute
def testdata_core(defaultdb='testdb1', size=2):
import ibeis
# import plottool as pt
ibs = ibeis.opendb(defaultdb=defaultdb)
depc = ibs.depc_annot
aid_list = ut.get_argval(('--aids', '--aid'), type_=list,
default=ibs.get_valid_aids()[0:size])
return ibs, depc, aid_list
class ChipConfig(dtool.Config):
_param_info_list = [
#ut.ParamInfo('dim_size', 128, 'sz', hideif=None),
#ut.ParamInfo('dim_size', 960, 'sz', hideif=None),
ut.ParamInfo('dim_size', 700, 'sz', hideif=None), # TODO: allow types to vary
ut.ParamInfo(
'resize_dim', 'width', '',
#'resize_dim', 'area', '',
valid_values=['area', 'width', 'height', 'diag', 'maxwh', 'wh'],
hideif=lambda cfg: cfg['dim_size'] is None),
ut.ParamInfo('dim_tol', 0, 'tol', hideif=0),
ut.ParamInfo('preserve_aspect', True, hideif=True),
ut.ParamInfo('histeq', False, hideif=False),
ut.ParamInfo('adapteq', False, hideif=False),
ut.ParamInfo('histeq_thresh', False, hideif=False),
ut.ParamInfo('pad', 0, hideif=0),
ut.ParamInfo('ext', '.png', hideif='.png'),
]
ChipImgType = dtool.ExternType(vt.imread, vt.imwrite, extkey='ext')
@derived_attribute(
tablename='chips', parents=['annotations'],
colnames=['img', 'width', 'height', 'M'],
coltypes=[ChipImgType, int, int, np.ndarray],
configclass=ChipConfig,
#depprops=['image_uuid', 'verts', 'theta'],
fname='chipcache4',
rm_extern_on_delete=True,
chunksize=256,
)
def compute_chip(depc, aid_list, config=None):
r"""
Extracts the annotation chip from the bounding box
Args:
depc (ibeis.depends_cache.DependencyCache):
aid_list (list): list of annotation rowids
config (dict): (default = None)
Yields:
(uri, int, int): tup
CommandLine:
ibeis --tf compute_chip --show
ibeis --tf compute_chip --show --pad=64 --dim_size=256 --db PZ_MTEST
ibeis --tf compute_chip --show --pad=64 --dim_size=None --db PZ_MTEST
ibeis --tf compute_chip --show --db humpbacks
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> import ibeis
>>> defaultdb = 'testdb1'
>>> ibs = ibeis.opendb(defaultdb=defaultdb)
>>> depc = ibs.depc_annot
>>> config = ChipConfig.from_argv_dict(dim_size=None)
>>> aid_list = ibs.get_valid_aids()[0:8]
>>> chips = depc.get_property('chips', aid_list, 'img', config={'dim_size': 256})
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> #interact_obj = pt.interact_multi_image.MultiImageInteraction(chips, nPerPage=4)
>>> import ibeis.viz.interact.interact_chip
>>> interact_obj = ibeis.viz.interact.interact_chip.interact_multichips(ibs, aid_list, config2_=config)
>>> interact_obj.start()
>>> pt.show_if_requested()
"""
print('Preprocess Chips')
print('config = %r' % (config,))
ibs = depc.controller
chip_dpath = ibs.get_chipdir() + '2'
ut.ensuredir(chip_dpath)
#ext = config['ext']
pad = config['pad']
dim_size = config['dim_size']
dim_tol = config['dim_tol']
resize_dim = config['resize_dim']
#cfghashid = config.get_hashid()
#avuuid_list = ibs.get_annot_visual_uuids(aid_list)
# TODO: just hash everything together
#_fmt = 'chip_aid_{aid}_avuuid_{avuuid}_{cfghashid}{ext}'
#cfname_list = [_fmt.format(aid=aid, avuuid=avuuid, ext=ext, cfghashid=cfghashid)
# for aid, avuuid in zip(aid_list, avuuid_list)]
#cfpath_list = [ut.unixjoin(chip_dpath, chip_fname)
# for chip_fname in cfname_list]
#gfpath_list = ibs.get_annot_image_paths(aid_list)
gid_list = ibs.get_annot_gids(aid_list)
# TODO: use verts instead
bbox_list = ibs.get_annot_bboxes(aid_list)
theta_list = ibs.get_annot_thetas(aid_list)
bbox_size_list = ut.take_column(bbox_list, [2, 3])
# Checks
invalid_flags = [w == 0 or h == 0 for (w, h) in bbox_size_list]
invalid_aids = ut.compress(aid_list, invalid_flags)
assert len(invalid_aids) == 0, 'invalid aids=%r' % (invalid_aids,)
if resize_dim == 'wh':
assert isinstance(dim_size, tuple), (
'must specify both width and height in dim_size when resize_dim=wh')
# Aspect ratio is not preserved. Use exact specifications.
newsize_list = [dim_size for _ in range(len(bbox_size_list))]
else:
scale_func_dict = {
'width': vt.get_scaled_size_with_width,
'area': vt.get_scaled_size_with_area, # actually root area
}
scale_func = scale_func_dict[resize_dim]
if dim_size is None:
newsize_list = bbox_size_list
else:
if resize_dim == 'area':
dim_size = dim_size ** 2
dim_tol = dim_tol ** 2
newsize_list = [scale_func(dim_size, w, h, dim_tol) for (w, h) in bbox_size_list]
if pad > 0:
halfoffset_ms = (pad, pad)
extras_list = [vt.get_extramargin_measures(bbox, new_size, halfoffset_ms)
for bbox, new_size in zip(bbox_list, newsize_list)]
# Overwrite bbox and new size with margined versions
bbox_list = ut.take_column(extras_list, 0)
newsize_list = ut.take_column(extras_list, 1)
# Build transformation from image to chip
M_list = [vt.get_image_to_chip_transform(bbox, new_size, theta) for
bbox, theta, new_size in zip(bbox_list, theta_list, newsize_list)]
#arg_iter = zip(cfpath_list, gid_list, newsize_list, M_list)
arg_iter = zip(gid_list, newsize_list, M_list)
arg_list = list(arg_iter)
filterfn_list = []
from vtool import image_filters
if config['histeq']:
filterfn_list.append(image_filters.histeq_fn)
if config['adapteq']:
filterfn_list.append(image_filters.adapteq_fn)
warpkw = dict(flags=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_CONSTANT)
last_gid = None
for tup in ut.ProgIter(arg_list, lbl='computing chips', backspace=True):
# FIXME: THE GPATH SHOULD BE PASSED HERE WITH AN ORIENTATION FLAG
#cfpath, gid, new_size, M = tup
gid, new_size, M = tup
# Read parent image # TODO: buffer this
if gid != last_gid: # We assume the gids are nicely ordered, no need to load the image more than once, if so
imgBGR = ibs.get_images(gid)
last_gid = gid
# Warp chip
chipBGR = cv2.warpAffine(imgBGR, M[0:2], tuple(new_size), **warpkw)
for filtfn in filterfn_list:
chipBGR = filtfn(chipBGR)
width, height = vt.get_size(chipBGR)
yield (chipBGR, width, height, M)
# Write chip to disk
#vt.imwrite(cfpath, chipBGR)
#yield (cfpath, width, height, M)
@register_subprop('chips', 'dlen_sqrd')
def compute_dlen_sqrd(depc, aid_list, config=None):
size_list = np.array(depc.get('chips', aid_list, ('width', 'height'), config))
dlen_sqrt_list = (size_list ** 2).sum(axis=1).tolist()
return dlen_sqrt_list
class AnnotMaskConfig(dtool.Config):
_param_info_list = [
ut.ParamInfo('manual', True)
]
_sub_config_list = [
ChipConfig
]
@derived_attribute(
tablename='annotmask', parents=['annotations'],
colnames=['img', 'width', 'height'],
coltypes=[('extern', vt.imread), int, int],
configclass=AnnotMaskConfig,
fname='../maskcache2',
# isinteractive=True,
)
def compute_annotmask(depc, aid_list, config=None):
r"""
Interaction dispatcher for annotation masks.
Args:
depc (ibeis.depends_cache.DependencyCache):
aid_list (list): list of annotation rowids
config (AnnotMaskConfig): (default = None)
Yields:
(uri, int, int): tup
CommandLine:
python -m ibeis.core_annots --exec-compute_annotmask --show
python -m ibeis.core_annots --exec-compute_annotmask --show --edit
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> ibs, depc, aid_list = testdata_core()
>>> config = AnnotMaskConfig(dim_size=None)
>>> chip_config = config.chip_cfg
>>> edit = ut.get_argflag('--edit')
>>> mask = depc.get_property('annotmask', aid_list, 'img', config, recompute=edit)[0]
>>> chip = depc.get_property('chips', aid_list, 'img', config=chip_config)[0]
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> resized = vt.resize_mask(mask, chip)
>>> blended = vt.blend_images_multiply(chip, resized)
>>> pt.imshow(blended, title='mask')
>>> pt.show_if_requested()
"""
from plottool import interact_impaint
# TODO: Ensure interactive required cache words
# Keep manual things above the cache dir
mask_dpath = ut.unixjoin(depc.cache_dpath, '../ManualChipMask')
ut.ensuredir(mask_dpath)
ibs = depc.controller
chip_config = config.chip_cfg
chip_imgs = depc.get('chips', aid_list, 'img', config=chip_config)
cfghashid = config.get_hashid()
avuuid_list = ibs.get_annot_visual_uuids(aid_list)
# TODO: just hash everything together
ext = '.png'
_fmt = 'mask_aid_{aid}_avuuid_{avuuid}_{cfghashid}{ext}'
fname_list = [_fmt.format(aid=aid, avuuid=avuuid, ext=ext, cfghashid=cfghashid)
for aid, avuuid in zip(aid_list, avuuid_list)]
for img, fname, aid in zip(chip_imgs, fname_list, aid_list):
mask_fpath = ut.unixjoin(mask_dpath, fname)
if ut.checkpath(mask_fpath):
# Allow for editing on recompute
init_mask = vt.imread(mask_fpath)
else:
init_mask = None
mask = interact_impaint.impaint_mask2(img, init_mask=init_mask)
vt.imwrite(mask_fpath, mask)
print('imwrite')
w, h = vt.get_size(mask)
yield mask_fpath, w, h
# Remove the old chips
#ibs.delete_annot_chips([aid])
#ibs.delete_annot_chip_thumbs([aid])
class ProbchipConfig(dtool.Config):
# TODO: incorporate into base
_named_defaults = {
'rf': {
'fw_detector': 'rf',
'smooth_thresh': None,
'smooth_ksize': None,
}
}
_param_info_list = [
#ut.ParamInfo('preserve_aspect', True, hideif=True),
ut.ParamInfo('fw_detector', 'cnn', 'detector='),
ut.ParamInfo('fw_dim_size', 256, 'sz'),
ut.ParamInfo('smooth_thresh', 20, 'thresh='),
ut.ParamInfo('smooth_ksize', 20, 'ksz=', hideif=lambda cfg: cfg['smooth_thresh'] is None),
#ut.ParamInfo('ext', '.png'),
]
#_sub_config_list = [
# ChipConfig
#]
ProbchipImgType = dtool.ExternType(ut.partial(vt.imread, grayscale=True),
vt.imwrite, extern_ext='.png')
@derived_attribute(
tablename='probchip', parents=['annotations'],
colnames=['img'],
coltypes=[ProbchipImgType],
configclass=ProbchipConfig,
fname='chipcache4',
# isinteractive=True,
)
def compute_probchip(depc, aid_list, config=None):
""" Computes probability chips using pyrf
CommandLine:
python -m ibeis.core_annots --test-compute_probchip --nocnn --show --db PZ_MTEST
python -m ibeis.core_annots --test-compute_probchip --show --fw_detector=cnn
python -m ibeis.core_annots --test-compute_probchip --show --fw_detector=rf --smooth_thresh=None
Example1:
>>> # DISABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> import ibeis
>>> ibs, depc, aid_list = testdata_core()
>>> aid_list = ibs.get_valid_aids(species='zebra_plains')[0:10]
>>> config = ProbchipConfig.from_argv_dict(fw_detector='rf', smooth_thresh=None)
>>> #probchip_fpath_list_ = ut.take_column(list(compute_probchip(depc, aid_list, config)), 0)
>>> probchip_list_ = ut.take_column(list(compute_probchip(depc, aid_list, config)), 0)
>>> #result = ut.list_str(probchip_fpath_list_)
>>> #print(result)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> #xlabel_list = list(map(str, [vt.image.open_image_size(p) for p in probchip_fpath_list_]))
>>> #iteract_obj = pt.interact_multi_image.MultiImageInteraction(probchip_fpath_list_, nPerPage=4, xlabel_list=xlabel_list)
>>> xlabel_list = [str(vt.get_size(img)) for img in probchip_list_]
>>> iteract_obj = pt.interact_multi_image.MultiImageInteraction(probchip_list_, nPerPage=4, xlabel_list=xlabel_list)
>>> iteract_obj.start()
>>> ut.show_if_requested()
"""
print('[core] COMPUTING FEATWEIGHTS')
print('config = %r' % (config,))
import vtool as vt
ibs = depc.controller
# Use the labeled species for the fw_detector
species_list = ibs.get_annot_species_texts(aid_list)
fw_detector = config['fw_detector']
dim_size = config['fw_dim_size']
smooth_thresh = config['smooth_thresh']
smooth_ksize = config['smooth_ksize']
if fw_detector == 'rf':
pad = 64
else:
pad = 0
probchip_dir = ibs.get_probchip_dir() + '2'
cfghashid = config.get_hashid()
# TODO: just hash everything together
ut.ensuredir(probchip_dir)
_fmt = 'probchip_avuuid_{avuuid}_' + cfghashid + '.png'
annot_visual_uuid_list = ibs.get_annot_visual_uuids(aid_list)
probchip_fpath_list = [ut.unixjoin(probchip_dir, _fmt.format(avuuid=avuuid))
for avuuid in annot_visual_uuid_list]
chip_config = ChipConfig(pad=pad, dim_size=dim_size)
mchip_path_list = depc.get('chips', aid_list, 'img', config=chip_config, read_extern=False)
aid_list = np.array(aid_list)
species_list = np.array(species_list)
species_rowid = np.array(ibs.get_species_rowids_from_text(species_list))
# Group by species
unique_species_rowids, groupxs = vt.group_indices(species_rowid)
grouped_aids = vt.apply_grouping(aid_list, groupxs)
grouped_species = vt.apply_grouping(species_list, groupxs)
grouped_mpaths = ut.apply_grouping(mchip_path_list, groupxs)
grouped_ppaths = ut.apply_grouping(probchip_fpath_list, groupxs)
unique_species = ut.get_list_column(grouped_species, 0)
if ut.VERBOSE:
print('[preproc_probchip] +--------------------')
print(('[preproc_probchip.compute_and_write_probchip] '
'Preparing to compute %d probchips of %d species')
% (len(aid_list), len(unique_species)))
print(config)
#grouped_probchip_fpath_list = []
grouped_probchips = []
_iter = zip(grouped_aids, unique_species, grouped_ppaths, grouped_mpaths)
_iter = ut.ProgIter(_iter, nTotal=len(grouped_aids),
lbl='probchip for species', enabled=ut.VERBOSE, backspace=True)
if fw_detector == 'rf':
for aids, species, probchip_fpaths, inputchip_fpaths in _iter:
if len(aids) == 0:
continue
gen = rf_probchips(ibs, aids, species, probchip_fpaths, inputchip_fpaths, pad,
smooth_thresh, smooth_ksize)
#grouped_probchip_fpath_list.append(probchip_fpaths)
grouped_probchips.append(list(gen))
elif fw_detector == 'cnn':
for aids, species, probchip_fpaths, inputchip_fpaths in _iter:
if len(aids) == 0:
continue
gen = cnn_probchips(ibs, species, probchip_fpath_list, inputchip_fpaths,
smooth_thresh, smooth_ksize)
#grouped_probchip_fpath_list.append(probchip_fpaths)
grouped_probchips.append(list(gen))
else:
raise NotImplementedError('unknown fw_detector=%r' % (fw_detector,))
if ut.VERBOSE:
print('[preproc_probchip] Done computing probability images')
print('[preproc_probchip] L_______________________')
#probchip_fpath_list = vt.invert_apply_grouping2(
# grouped_probchip_fpath_list, groupxs, dtype=object)
#for fpath in probchip_fpath_list:
# yield (fpath,)
probchip_result_list = vt.invert_apply_grouping2(
grouped_probchips, groupxs, dtype=object)
for probchip in probchip_result_list:
yield (probchip,)
def cnn_probchips(ibs, species, probchip_fpath_list, inputchip_fpaths, smooth_thresh, smooth_ksize):
# dont use extrmargin here (for now)
mask_gen = ibs.generate_species_background_mask(inputchip_fpaths, species)
_iter = zip(probchip_fpath_list, mask_gen)
for chunk in ut.ichunks(_iter, 256):
_progiter = ut.ProgIter(chunk, lbl='compute probchip chunk', adjust=True, time_thresh=30.0, backspace=True)
for probchip_fpath, probchip in _progiter:
if smooth_thresh is not None and smooth_ksize is not None:
probchip = postprocess_mask(probchip, smooth_thresh, smooth_ksize)
yield probchip
#vt.imwrite(probchip_fpath, probchip)
def rf_probchips(ibs, aids, species, probchip_fpaths, inputchip_fpaths, pad,
smooth_thresh, smooth_ksize):
from ibeis.algo.detect import randomforest
extramargin_probchip_fpaths = [ut.augpath(path, '_margin')
for path in probchip_fpaths]
rfconfig = {'scale_list': [1.0], 'mode': 1,
'output_gpath_list': extramargin_probchip_fpaths}
probchip_generator = randomforest.detect_gpath_list_with_species(
ibs, inputchip_fpaths, species, **rfconfig)
# Evalutate genrator until completion
ut.evaluate_generator(probchip_generator)
extramargin_mask_gen = (vt.imread(fpath, grayscale=True)
for fpath in extramargin_probchip_fpaths)
# Crop the extra margin off of the new probchips
_iter = zip(probchip_fpaths, extramargin_mask_gen)
for (probchip_fpath, extramargin_probchip) in _iter:
half_w, half_h = (pad, pad)
probchip = extramargin_probchip[half_h:-half_h, half_w:-half_w]
if smooth_thresh is not None and smooth_ksize is not None:
probchip = postprocess_mask(probchip, smooth_thresh, smooth_ksize)
yield probchip
#vt.imwrite(probchip_fpath, probchip)
def postprocess_mask(mask, thresh=20, kernel_size=20):
r"""
Args:
mask (ndarray):
Returns:
ndarray: mask2
CommandLine:
python -m ibeis.core_annots --exec-postprocess_mask --cnn --show --aid=1 --db PZ_MTEST
python -m ibeis --tf postprocess_mask --cnn --show --db PZ_MTEST --adapteq=True
SeeAlso:
python -m ibeis_cnn --tf generate_species_background_mask --show --db PZ_Master1 --aid 9970
Ignore:
input_tuple = aid_list
tablename = 'probchip'
config = full_config
rowid_kw = dict(config=config)
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> import plottool as pt
>>> ibs, depc, aid_list = testdata_core()
>>> config = ChipConfig.from_argv_dict()
>>> probchip_config = ProbchipConfig(smooth_thresh=None)
>>> chip = ibs.depc_annot.get('chips', aid_list, 'img', config)[0]
>>> mask = ibs.depc_annot.get('probchip', aid_list, 'img', probchip_config)[0]
>>> mask2 = postprocess_mask(mask)
>>> ut.quit_if_noshow()
>>> fnum = 1
>>> pt.imshow(chip, pnum=(1, 3, 1), fnum=fnum, xlabel=str(chip.shape))
>>> pt.imshow(mask, pnum=(1, 3, 2), fnum=fnum, title='before', xlabel=str(mask.shape))
>>> pt.imshow(mask2, pnum=(1, 3, 3), fnum=fnum, title='after', xlabel=str(mask2.shape))
>>> ut.show_if_requested()
"""
import cv2
thresh = 20
kernel_size = 20
mask2 = mask.copy()
# light threshold
mask2[mask2 < thresh] = 0
# open and close
kernel = np.ones((kernel_size, kernel_size), np.uint8)
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernel)
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_OPEN, kernel)
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernel)
return mask2
class FeatConfig(dtool.Config):
r"""
Example:
>>> from ibeis.core_annots import * # NOQA
>>> feat_cfg = FeatConfig()
>>> result = str(feat_cfg)
>>> print(result)
<FeatConfig(hesaff+sift)>
"""
# TODO: FIXME
#_parents = [ChipConfig]
def get_param_info_list(self):
import pyhesaff
default_keys = list(pyhesaff.get_hesaff_default_params().keys())
default_items = list(pyhesaff.get_hesaff_default_params().items())
param_info_list = [
ut.ParamInfo('feat_type', 'hesaff+sift', ''),
ut.ParamInfo('maskmethod', None, hideif=None)
]
param_info_dict = {
name: ut.ParamInfo(name, default, hideif=default)
for name, default in default_items
}
#param_info_dict['scale_max'].default = -1
#param_info_dict['scale_max'].default = 50
param_info_list += ut.dict_take(param_info_dict, default_keys)
return param_info_list
def get_hesaff_params(self):
# Get subset of these params that correspond to hesaff
import pyhesaff
default_keys = list(pyhesaff.get_hesaff_default_params().keys())
hesaff_param_dict = ut.dict_subset(self, default_keys)
return hesaff_param_dict
@derived_attribute(
tablename='feat', parents=['chips'],
colnames=['num_feats', 'kpts', 'vecs'],
coltypes=[int, np.ndarray, np.ndarray],
configclass=FeatConfig,
fname='featcache', chunksize=1024,
)
def compute_feats(depc, cid_list, config=None):
r"""
Computes features and yields results asynchronously: TODO: Remove IBEIS from
this equation. Move the firewall towards the controller
Args:
depc (dtool.DependencyCache):
cid_list (list):
config (None):
Returns:
generator : generates param tups
SeeAlso:
~/code/ibeis_cnn/ibeis_cnn/_plugin.py
CommandLine:
python -m ibeis.core_annots --test-compute_feats:0 --show
python -m ibeis.core_annots --test-compute_feats:1
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> ibs, depc, aid_list = testdata_core()
>>> chip_config = {}
>>> config = FeatConfig()
>>> cid_list = depc.get_rowids('chips', aid_list, config=chip_config)
>>> featgen = compute_feats(depc, cid_list, config)
>>> feat_list = list(featgen)
>>> assert len(feat_list) == len(aid_list)
>>> (nFeat, kpts, vecs) = feat_list[0]
>>> assert nFeat == len(kpts) and nFeat == len(vecs)
>>> assert kpts.shape[1] == 6
>>> assert vecs.shape[1] == 128
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> chip = depc.get_native('chips', cid_list[0:1], 'img')[0]
>>> pt.interact_keypoints.KeypointInteraction(chip, kpts, vecs, autostart=True)
>>> ut.show_if_requested()
Example:
>>> # TIMING
>>> from ibeis.core_annots import * # NOQA
>>> ibs, depc, aid_list = testdata_core('PZ_MTEST', 100)
>>> config = {'dim_size': 450}
>>> num_feats = depc.get('feat', aid_list, 'num_feats', config=config, recompute=True)
ibs.delete_annot_feats(aid_list)
ibs.get_annot_feat_rowids(aid_list)
"""
nInput = len(cid_list)
hesaff_params = config.get_hesaff_params()
feat_type = config['feat_type']
maskmethod = config['maskmethod']
ut.assert_all_not_None(cid_list, 'cid_list')
chip_fpath_list = depc.get_native('chips', cid_list, 'img', read_extern=False)
if maskmethod is not None:
assert False
#aid_list = ibs.get_chip_aids(cid_list)
#probchip_fpath_list = ibs.get_annot_probchip_fpath(aid_list)
else:
probchip_fpath_list = (None for _ in range(nInput))
if ut.NOT_QUIET:
print('[preproc_feat] config = %s' % config)
if ut.VERYVERBOSE:
print('full_params = ' + ut.dict_str())
ibs = depc.controller
if feat_type == 'hesaff+sift':
# Multiprocessing parallelization
dictargs_iter = (hesaff_params for _ in range(nInput))
arg_iter = zip(chip_fpath_list, probchip_fpath_list, dictargs_iter)
# eager evaluation.
# TODO: Check if there is any benefit to just passing in the iterator.
arg_list = list(arg_iter)
featgen = ut.generate(gen_feat_worker, arg_list, nTasks=nInput, freq=10,
ordered=True, force_serial=ibs.force_serial)
elif feat_type == 'hesaff+siam128':
from ibeis_cnn import _plugin
assert maskmethod is None, 'not implemented'
assert False, 'not implemented'
featgen = _plugin.generate_siam_l2_128_feats(ibs, cid_list, config=config)
else:
raise AssertionError('unknown feat_type=%r' % (feat_type,))
for nFeat, kpts, vecs in featgen:
yield (nFeat, kpts, vecs,)
def gen_feat_worker(tup):
r"""
Function to be parallelized by multiprocessing / joblib / whatever.
Must take in one argument to be used by multiprocessing.map_async
Args:
tup (tuple):
Returns:
tuple: (None, kpts, vecs)
CommandLine:
python -m ibeis.core_annots --exec-gen_feat_worker --show
python -m ibeis.core_annots --exec-gen_feat_worker --show --aid 1988 --db GZ_Master1 --affine-invariance=False --scale_max=30
python -m ibeis.core_annots --exec-gen_feat_worker --show --aid 1988 --db GZ_Master1 --affine-invariance=False --maskmethod=None --scale_max=30
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> ibs, depc, aid_list = testdata_core()
>>> aid = aid_list[0]
>>> config = {}
>>> feat_config = FeatConfig.from_argv_dict()
>>> chip_fpath = ibs.depc_annot.get('chips', aid_list[0], 'img', config=config, read_extern=False)
>>> maskmethod = ut.get_argval('--maskmethod', type_=str, default='cnn')
>>> probchip_fpath = ibs.depc_annot.get('probchip', aid_list[0], 'img', config=config, read_extern=False) if feat_config['maskmethod'] == 'cnn' else None
>>> hesaff_params = feat_config.asdict()
>>> # Exec function source
>>> tup = (chip_fpath, probchip_fpath, hesaff_params)
>>> masked_chip, num_kpts, kpts, vecs = ut.exec_func_src(
>>> gen_feat_worker, key_list=['masked_chip', 'num_kpts', 'kpts', 'vecs'],
>>> sentinal='num_kpts = kpts.shape[0]')
>>> result = ('(num_kpts, kpts, vecs) = %s' % (ut.repr2((num_kpts, kpts, vecs)),))
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> from plottool.interactions import ExpandableInteraction
>>> interact = ExpandableInteraction()
>>> interact.append_plot(pt.interact_keypoints.KeypointInteraction(masked_chip, kpts, vecs))
>>> interact.append_plot(lambda **kwargs: pt.plot_score_histograms([vt.get_scales(kpts)], **kwargs))
>>> interact.start()
>>> ut.show_if_requested()
"""
import pyhesaff
#import numpy as np
#import vtool as vt
chip_fpath, probchip_fpath, hesaff_params = tup
chip = vt.imread(chip_fpath)
if probchip_fpath is not None:
probchip = vt.imread(probchip_fpath, grayscale=True)
probchip = vt.resize_mask(probchip, chip)
#vt.blend_images_multiply(chip, probchip)
masked_chip = (chip * (probchip[:, :, None].astype(np.float32) / 255)).astype(np.uint8)
else:
masked_chip = chip
kpts, vecs = pyhesaff.detect_feats_in_image(masked_chip, **hesaff_params)
num_kpts = kpts.shape[0]
return (num_kpts, kpts, vecs)
class FeatWeightConfig(dtool.Config):
_param_info_list = [
ut.ParamInfo('featweight_enabled', True, 'enabled='),
]
# FIXME: incorporate config dependencies in dtool
#_parents = [FeatConfig, ProbchipConfig]
@derived_attribute(
tablename='featweight', parents=['feat', 'probchip'],
colnames=['fwg'],
coltypes=[np.ndarray],
configclass=FeatWeightConfig,
fname='featcache', chunksize=512,
)
def compute_fgweights(depc, fid_list, pcid_list, config=None):
"""
Args:
depc (dtool.DependencyCache): depc
fid_list (list):
config (None): (default = None)
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> ibs, depc, aid_list = testdata_core()
>>> full_config = {}
>>> config = FeatConfig()
>>> fid_list = depc.get_rowids('feat', aid_list, config=full_config)
>>> pcid_list = depc.get_rowids('probchip', aid_list, config=full_config)
>>> prop_list = list(compute_fgweights(depc, fid_list, pcid_list))
>>> featweight_list = ut.take_column(prop_list, 0)
>>> result = np.array_str(featweight_list[0][0:3], precision=3)
>>> print(result)
"""
ibs = depc.controller
nTasks = len(fid_list)
print('[compute_fgweights] Computing %d fgweights' % (nTasks,))
#aid_list = depc.get_ancestor_rowids('feat', fid_list, 'annotations')
#probchip_fpath_list = depc.get(aid_list, 'img', config={}, read_extern=False)
probchip_list = depc.get_native('probchip', pcid_list, 'img')
cid_list = depc.get_ancestor_rowids('feat', fid_list, 'chips')
chipsize_list = depc.get_native('chips', cid_list, ('width', 'height'))
kpts_list = depc.get_native('feat', fid_list, 'kpts')
# Force grayscale reading of chips
arg_iter = zip(kpts_list, probchip_list, chipsize_list)
# ibs = depc.controller
# featweight_gen = ut.generate(gen_featweight_worker, arg_iter,
# nTasks=nTasks, ordered=True, freq=10,
# force_serial=ibs.force_serial)
featweight_gen = ut.generate(gen_featweight_worker, arg_iter,
nTasks=nTasks, ordered=True, freq=10,
force_serial=ibs.force_serial
)
featweight_list = list(featweight_gen)
print('[compute_fgweights] Done computing %d fgweights' % (nTasks,))
for fw in featweight_list:
yield (fw,)
def gen_featweight_worker(tup):
"""
Function to be parallelized by multiprocessing / joblib / whatever.
Must take in one argument to be used by multiprocessing.map_async
Args:
tup (aid, tuple(kpts(ndarray), probchip_fpath )): keypoints and
probability chip file path aid, kpts, probchip_fpath
CommandLine:
python -m ibeis.core_annots --test-gen_featweight_worker --show
python -m ibeis.core_annots --test-gen_featweight_worker --show --dpath figures --save ~/latex/crall-candidacy-2015/figures/gen_featweight.jpg
python -m ibeis.core_annots --test-gen_featweight_worker --show --db PZ_MTEST --qaid_list=1,2,3,4,5,6,7,8,9
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> #test_featweight_worker()
>>> ibs, depc, aid_list = testdata_core()
>>> aid_list = aid_list[0:1]
>>> config = {'dim_size': 450, 'resize_dim': 'area', 'smooth_thresh': 0, 'smooth_ksize': 0}
>>> probchip = depc.get('probchip', aid_list, 'img', config=config)[0]
>>> chipsize = depc.get('chips', aid_list, ('width', 'height'), config=config)[0]
>>> kpts = depc.get('feat', aid_list, 'kpts', config=config)[0]
>>> tup = (kpts, probchip, chipsize)
>>> weights = gen_featweight_worker(tup)
>>> assert np.all(weights <= 1.0), 'weights cannot be greater than 1'
>>> chip = depc.get('chips', aid_list, 'img', config=config)[0]
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> fnum = 1
>>> pnum_ = pt.make_pnum_nextgen(1, 3)
>>> pt.figure(fnum=fnum, doclf=True)
>>> pt.imshow(chip, pnum=pnum_(0), fnum=fnum)
>>> pt.imshow(probchip, pnum=pnum_(2), fnum=fnum)
>>> pt.imshow(chip, pnum=pnum_(1), fnum=fnum)
>>> color_list = pt.draw_kpts2(kpts, weights=weights, ell_alpha=.3)
>>> cb = pt.colorbar(weights, color_list)
>>> cb.set_label('featweights')
>>> pt.show_if_requested()
"""
(kpts, probchip, chipsize) = tup
if probchip is None:
# hack for undetected chips. SETS ALL FEATWEIGHTS TO .25 = 1/4
assert False, 'should not be in this state'
weights = np.full(len(kpts), .25, dtype=np.float32)
else:
sfx, sfy = (probchip.shape[1] / chipsize[0], probchip.shape[0] / chipsize[1])
kpts_ = vt.offset_kpts(kpts, (0, 0), (sfx, sfy))
#vtpatch.get_warped_patches()
if False:
# VERY SLOW
patch_list1 = [vt.get_warped_patch(probchip, kp)[0].astype(np.float32) / 255.0 for kp in kpts_]
weight_list = [vt.gaussian_average_patch(patch1) for patch1 in patch_list1]
#weight_list = [patch.sum() / (patch.size) for patch in patch_list]
else:
# New way
weight_list = vt.patch_gaussian_weighted_average_intensities(probchip, kpts_)
weights = np.array(weight_list, dtype=np.float32)
return weights
class VsOneRequest(dtool.base.VsOneSimilarityRequest):
_tablename = 'vsone'
def postprocess_execute(request, parent_rowids, result_list):
import ibeis
depc = request.depc
ibs = depc.controller
qaid_list, daid_list = list(zip(*parent_rowids))
unique_qaids, groupxs = ut.group_indices(qaid_list)
grouped_daids = ut.apply_grouping(daid_list, groupxs)
unique_qnids = ibs.get_annot_nids(unique_qaids)
single_cm_list = ut.take_column(result_list, 1)
grouped_cms = ut.apply_grouping(single_cm_list, groupxs)
_iter = zip(unique_qaids, unique_qnids, grouped_daids, grouped_cms)
cm_list = []
for qaid, qnid, daids, cms in _iter:
# Hacked in version of creating an annot match object
chip_match = ibeis.ChipMatch.combine_cms(cms)
chip_match.score_maxcsum(request)
cm_list.append(chip_match)
#import utool
#utool.embed()
#cm = cm_list[0]
#cm.print_inspect_str(request)
#cm.assert_self(request, assert_feats=False)
return cm_list
class VsOneConfig(dtool.Config):
"""
Example:
>>> from ibeis.core_annots import * # NOQA
>>> cfg = VsOneConfig()
>>> result = str(cfg)
>>> print(result)
"""
_param_info_list = [
#ut.ParamInfo('sver_xy_thresh', .01),
ut.ParamInfo('sver_xy_thresh', .001),
ut.ParamInfo('ratio_thresh', .625),
ut.ParamInfo('refine_method', 'homog'),
ut.ParamInfo('symmetric', False),
ut.ParamInfo('K', 1),
ut.ParamInfo('Knorm', 1),
ut.ParamInfo('version', 0),
ut.ParamInfo('augment_queryside_hack', False),
]
_sub_config_list = [
FeatConfig,
ChipConfig, # TODO: infer chip config from feat config
FeatWeightConfig,
]
def test_cut(ibs, parent_rowids_T, score_list2):
unique_aids = ut.unique(ut.flatten(parent_rowids_T))
#for view in set(ibs.get_annot_yaw_texts(unique_aids)):
# aid2_idx = ut.make_index_lookup(unique_aids)
# #idx2_aid = ut.invert_dict(aid2_idx)
# idx_pairs = np.array(ut.unflat_take(aid2_idx, zip(*parent_rowids_T)))
# num = len(aid2_idx)
# flat_idx = np.ravel_multi_index(idx_pairs.T, (num, num))
# score_list2 = np.array(score_list2)
# cost_matrix = np.zeros(num * num)
# cost_matrix[flat_idx] = score_list2
# cost_matrix = cost_matrix.reshape((num, num))
# thresh = np.median(cost_matrix)
# thresh = 20
# labels = vt.unsupervised_multicut_labeling(cost_matrix, thresh)
# grouping = ut.group_items(unique_aids, labels)
if True:
#vp2_name2_aids = ibs.group_annots_by_multi_prop(unique_aids, [ibs.get_annot_yaw_texts, ibs.get_annot_name_texts])
aid2_idx = ut.make_index_lookup(unique_aids)
num = len(aid2_idx)
idx_pairs = np.array(ut.unflat_take(aid2_idx, zip(*parent_rowids_T)))
flat_idx = np.ravel_multi_index(idx_pairs.T, (num, num))
score_list2 = np.array(score_list2)
cost_matrix = np.zeros(num * num)
cost_matrix[flat_idx] = score_list2
cost_matrix = cost_matrix.reshape((num, num))
vp2_aids = ibs.group_annots_by_multi_prop(unique_aids, [ibs.get_annot_yaw_texts])
for view, aids in vp2_aids.items():
print('---')
print('view = %r' % (view,))
print('len(aids) = %r' % (len(aids),))
idxs = ut.take(aid2_idx, aids)
if len(idxs) == 1:
continue
real_group = ibs.group_annots_by_name(aids)[0]
sub_cost_matrix = cost_matrix[idxs].T[idxs].T
#ibs = ut.search_stack_for_localvar('ibs')
for thresh in [5, 7, 10, 15, 25, 50]:
labels = vt.unsupervised_multicut_labeling(sub_cost_matrix, thresh)
grouping = ut.group_items(aids, labels)
diff = ut.compare_groupings(real_group, grouping.values())
print('thresh = %r, diff=%r' % (thresh, diff))
#print('--')
if False:
# synthetic data
size = 100
thresh = 50
np.random.randint(0, 1)
np.zeros((size, size))
#np.random.rand(size, size)
size = 40
for size in range(2, 100):
aids = np.arange(size)
encounter_lbls = np.random.randint(0, size, size)
grid1 = np.tile(encounter_lbls, (size, 1))
is_match = grid1.T == grid1
good_pos = np.where(is_match)
bad_pos = np.where(~is_match)
sub_cost_matrix = np.empty((size, size))
sub_cost_matrix[good_pos] = np.random.randn(len(good_pos[0])) + 20
sub_cost_matrix[bad_pos] = np.random.randn(len(bad_pos[0])) - 20
sub_cost_matrix[np.diag_indices_from(sub_cost_matrix)] = np.inf
labels = vt.unsupervised_multicut_labeling(sub_cost_matrix, 0)
diff = ut.compare_groupings(
list(ut.group_items(aids, encounter_lbls).values()),
list(ut.group_items(aids, labels).values()))
print('diff = %r' % (diff,))
@derived_attribute(
tablename='vsone', parents=['annotations', 'annotations'],
colnames=['score', 'match'], coltypes=[float, ChipMatch],
requestclass=VsOneRequest,
configclass=VsOneConfig,
chunksize=128,
#chunksize=16,
fname='vsone',
)
def compute_one_vs_one(depc, qaids, daids, config):
r"""
CommandLine:
python -m ibeis.core_annots --test-compute_one_vs_one --show
python -m ibeis.control.IBEISControl --test-show_depc_annot_graph --show
python -m ibeis.control.IBEISControl --test-show_depc_annot_table_input --show --tablename=vsone
Ignore:
>>> from ibeis.core_annots import * # NOQA
>>> import ibeis
>>> ibs, aid_list = ibeis.testdata_aids('PZ_Master1', 'default:')
>>> occurid2_aids = ibs.temp_group_annot_occurrences(aid_list)
>>> aids_list = [np.unique(aids) for aids in occurid2_aids.values()]
>>> aids_list = [aids for aids in aids_list if len(aids) > 1 and len(aids) < 100]
aids = ut.sortedby([a.tolist() for a in aids_list], ut.lmap(len, aids_list))[-1]
depc = ibs.depc_annot
progiter = ut.ProgIter(aids_list, freq=1)
for aids in progiter:
request = depc.new_request('vsone', aids, aids, {'dim_size': 450})
qaids, daids = request.parent_rowids_T
config = request.config
parent_rowids_T = request.parent_rowids_T
rawres_list2 = request.execute(postprocess=False)
#score_list2 = ut.take_column(rawres_list2, 0)
ut.list_T = ut.list_transpose
#test_cut(ibs, parent_rowids_T, score_list2)
# x = 44
#test_cut(ibs, ut.list_T(ut.list_T(parent_rowids_T)[0:x]), score_list2[0:x])
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> #ibs, depc, aid_list = testdata_core(size=5)
>>> import ibeis
>>> #ibs, aid_list = ibeis.testdata_aids('wd_peter2', 'timectrl:pername=2,view=left,view_ext=0,exclude_reference=True')
>>> ibs, aid_list = ibeis.testdata_aids('testdb2', 'default:')
>>> _, aids = ut.items_sorted_by_value(ut.group_items(aid_list, ibs.get_annot_occurrence_text(aid_list)), key=len)[-1]
>>> aid_list = aids[0:4]
>>> depc = ibs.depc_annot
>>> request = depc.new_request('vsone', aid_list, aid_list, {'resize_dim': 'width', 'dim_size': 450})
>>> config = request.config
>>> parent_rowids_T = request.parent_rowids_T
>>> qaids, daids = request.parent_rowids_T
>>> # Compute using request
>>> print('...Test vsone cache')
>>> rawres_list2 = request.execute(postprocess=False)
>>> score_list2 = ut.take_column(rawres_list2, 0)
>>> res_list2 = request.execute()
>>> print(res_list2)
>>> # Compute using function
>>> #print('...Test vsone function')
>>> #rawres_list1 = list(compute_one_vs_one(depc, qaids, daids, config))
>>> #score_list1 = ut.take_column(rawres_list1, 0)
>>> #print(score_list1)
>>> #assert np.all(score_list1 == score_list2)
>>> ut.quit_if_noshow()
>>> ut.ensure_pylab_qt4()
>>> match = res_list2[0]
>>> match.print_inspect_str(request)
>>> #match.show_analysis(qreq_=request)
>>> #match.ishow_analysis(qreq_=request)
>>> #match.ishow_single_annotmatch(qreq_=request)
>>> match.show_single_annotmatch(qreq_=request, vert=False)
>>> ut.show_if_requested()
Example:
>>> # Example of a one-vs-one query
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> config = {'codename': 'vsone'}
>>> qreq_ = ibs.new_query_request([1], [2], cfgdict=config)
>>> cm_list = qreq_.execute()
>>> match = cm_list[0]
>>> match.print_inspect_str(qreq_)
>>> match.show_single_annotmatch(qreq_=qreq_, vert=False)
>>> import utool as ut
>>> ut.show_if_requested()
Example:
>>> # Example of a one-vs-many query
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> config = {'codename': 'vsmany'}
>>> qreq_ = ibs.new_query_request([1], ibs.get_valid_aids(), cfgdict=config)
>>> cm_list = qreq_.execute()
>>> match = cm_list[0]
>>> match.print_inspect_str(qreq_)
>>> match.show_single_annotmatch(qreq_=qreq_, vert=False)
>>> import utool as ut
>>> ut.show_if_requested()
"""
import ibeis
ibs = depc.controller
qconfig2_ = config
dconfig2_ = config
unique_qaids = np.unique(qaids)
unique_daids = np.unique(daids)
# TODO: Ensure entire pipeline can use new dependencies
# DEPC Precompute
ibs.depc.d.get_feat_rowids(unique_qaids, config=qconfig2_)
ibs.depc.d.get_feat_rowids(unique_daids, config=dconfig2_)
if True:
annot1_list = [ibs.get_annot_lazy_dict2(qaid, config=qconfig2_)
for qaid in unique_qaids]
annot2_list = [ibs.get_annot_lazy_dict2(daid, config=dconfig2_)
for daid in unique_daids]
else:
#config.chip_cfgstr = config.chip_cfg.get_cfgstr()
#config.chip_cfg_dict = config.chip_cfg.asdict()
annot1_list = [ibs.get_annot_lazy_dict(qaid, config2_=qconfig2_)
for qaid in unique_qaids]
annot2_list = [ibs.get_annot_lazy_dict(daid, config2_=dconfig2_)
for daid in unique_daids]
# precache flann structures
# TODO: Make depcache node
flann_params = {'algorithm': 'kdtree', 'trees': 8}
for annot1 in annot1_list:
if 'flann' not in annot1:
annot1['flann'] = lambda: vt.flann_cache(
annot1['vecs'], flann_params=flann_params, quiet=True,
verbose=False)
qaid_to_annot = dict(zip(unique_qaids, annot1_list))
daid_to_annot = dict(zip(unique_daids, annot2_list))
#all_aids = np.unique(ut.flatten([qaids, daids]))
verbose = False
#yeild_ = []
#print("START VSONE")
for qaid, daid in ut.ProgIter(zip(qaids, daids), nTotal=len(qaids),
lbl='compute vsone', backspace=True, freq=1):
annot1 = qaid_to_annot[qaid]
annot2 = daid_to_annot[daid]
metadata = {
'annot1': annot1,
'annot2': annot2,
}
vt_match = vt.vsone_matching2(metadata, cfgdict=config, verbose=verbose)
matchtup = vt_match.matches['TOP+SV']
H = vt_match.metadata['H_TOP']
score = matchtup.fs.sum()
fm = matchtup.fm
fs = matchtup.fs
match = ibeis.ChipMatch(
qaid=qaid,
daid_list=[daid],
fm_list=[fm],
fsv_list=[vt.atleast_nd(fs, 2)],
H_list=[H],
fsv_col_lbls=['L2_SIFT'])
match._update_daid_index()
match.evaluate_dnids(ibs)
match._update_daid_index()
match.set_cannonical_name_score([score], [score])
#import utool
#utool.embed()
if False:
ut.ensure_pylab_qt4()
ibs, depc, aid_list = testdata_core(size=3)
request = depc.new_request('vsone', aid_list, aid_list, {'dim_size': 450})
match.ishow_analysis(request)
#match = SingleMatch_IBEIS(qaid, daid, score, fm)
#yeild_.append((score, match))
yield (score, match)
class IndexerConfig(dtool.Config):
"""
Example:
>>> from ibeis.core_annots import * # NOQA
>>> cfg = VsOneConfig()
>>> result = str(cfg)
>>> print(result)
"""
_param_info_list = [
ut.ParamInfo('algorithm', 'kdtree', 'alg'),
ut.ParamInfo('random_seed', 42, 'seed'),
ut.ParamInfo('trees', 4, hideif=lambda cfg: cfg['algorithm'] != 'kdtree'),
ut.ParamInfo('version', 1),
]
_sub_config_list = [
#FeatConfig,
#ChipConfig, # TODO: infer chip config from feat config
#FeatWeightConfig,
]
def get_flann_params(cfg):
default_params = vt.get_flann_params(cfg['algorithm'])
flann_params = ut.update_existing(default_params, cfg.asdict())
return flann_params
testmode = ut.get_argflag('--testmode')
#if 1 or testmode:
@derived_attribute(
#tablename='neighbor_index', parents=['annotations*'],
#tablename='neighbor_index', parents=['annotations'],
#tablename='neighbor_index', parents=['feat*'],
tablename='neighbor_index', parents=['featweight*'],
# tablename='neighbor_index', parents=['feat*'],
#tablename='neighbor_index', parents=['feat'],
colnames=['indexer'], coltypes=[neighbor_index.NeighborIndex2],
configclass=IndexerConfig,
chunksize=1, fname='indexer',
)
def compute_neighbor_index(depc, fids_list, config):
r"""
Args:
depc (dtool.DependencyCache):
fids_list (list):
config (dtool.Config):
CommandLine:
python -m ibeis.core_annots --exec-compute_neighbor_index --show
python -m ibeis.control.IBEISControl --test-show_depc_annot_table_input --show --tablename=neighbor_index
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> import ibeis
>>> ibs, aid_list = ibeis.testdata_aids('testdb1')
>>> depc = ibs.depc_annot
>>> fid_list = depc.get_rowids('feat', aid_list)
>>> aids_list = tuple([aid_list])
>>> fids_list = tuple([fid_list])
>>> # Compute directly from function
>>> config = ibs.depc_annot['neighbor_index'].configclass()
>>> result1 = list(compute_neighbor_index(depc, fids_list, config))
>>> nnindexer1 = result1[0][0]
>>> # Compute using depcache
>>> result2 = ibs.depc_annot.get('neighbor_index', [aids_list], 'indexer', config, recompute=False, _debug=True)
>>> #result3 = ibs.depc_annot.get('neighbor_index', [tuple(fids_list)], 'indexer', config, recompute=False)
>>> print(result2)
>>> print(result3)
>>> assert result2[0] is not result3[0]
>>> assert nnindexer1.knn(ibs.get_annot_vecs(1), 1) is not None
>>> assert result3[0].knn(ibs.get_annot_vecs(1), 1) is not None
"""
print('[IBEIS] COMPUTE_NEIGHBOR_INDEX:')
# TODO: allow augment
assert len(fids_list) == 1, 'only working with one indexer at a time'
fid_list = fids_list[0]
aid_list = depc.get_root_rowids('feat', fid_list)
flann_params = config.get_flann_params()
cfgstr = config.get_cfgstr()
verbose = True
nnindexer = neighbor_index.NeighborIndex2(flann_params, cfgstr)
# Initialize neighbor with unindexed data
support = nnindexer.get_support(depc, aid_list, config)
nnindexer.init_support(aid_list, *support, verbose=verbose)
nnindexer.config = config
nnindexer.reindex()
yield (nnindexer,)
#class FeatNeighborConfig(dtool.Config)
if testmode:
# NOT YET READY
@derived_attribute(
tablename='feat_neighbs', parents=['featweight', 'neighbor_index'],
colnames=['qfx2_idx', 'qfx2_dist'], coltypes=[np.ndarray, np.ndarray],
#configclass=IndexerConfig,
chunksize=1, fname='neighbors',
)
def compute_feature_neighbors(depc, fid_list, indexer_rowid_list, config):
"""
Args:
depc (dtool.DependencyCache):
aids_list (list):
config (dtool.Config):
CommandLine:
python -m ibeis.core_annots --exec-compute_feature_neighbors --show
python -m ibeis.control.IBEISControl --test-show_depc_annot_table_input --show --tablename=feat_neighbs
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.core_annots import * # NOQA
>>> #ibs, depc, aid_list = testdata_core(size=5)
>>> import ibeis
>>> ibs, qaid_list = ibeis.testdata_aids('seaturtles')
>>> daid_list = qaid_list
>>> depc = ibs.depc_annot
>>> index_config = ibs.depc_annot['neighbor_index'].configclass()
>>> fid_list = depc.get_rowids('feat', qaid_list)
>>> indexer_rowid_list = ibs.depc_annot.get_rowids('neighbor_index', [daid_list], index_config)
>>> config = ibs.depc_annot['feat_neighbs'].configclass()
>>> compute_feature_neighbors(depc, fid_list, indexer_rowid_list, config)
"""
print('[IBEIS] NEAREST NEIGHBORS')
#assert False
# do computation
#num_neighbors = (config['K'] + config['Knorm'])
ibs = depc.controller
num_neighbors = 1
#b = np.broadcast([1, 2, 3], [1])
#list(b)
#[(1, 1), (2, 1), (3, 1)]
# FIXME: not sure how depc should handle this case
# Maybe it groups by indexer_rowid_list and then goes from there.
indexer = depc.get_native('neighbor_index', indexer_rowid_list, 'indexer')[0]
qvecs_list = depc.get_native('feat', fid_list, 'vecs', eager=False, nInput=len(fid_list))
#qvecs_list = depc.get('feat', qaid_list, 'vecs', config, eager=False, nInput=len(qaid_list))
qaid_list = depc.get_ancestor_rowids('feat', fid_list)
ax2_encid = np.array(ibs.get_annot_encounter_text(indexer.ax2_aid))
for qaid, qfx2_vec in zip(qaid_list, qvecs_list):
qencid = ibs.get_annot_encounter_text([qaid])[0]
invalid_axs = np.where(ax2_encid == qencid)[0]
#indexer.ax2_aid[invalid_axs]
nnindxer = indexer
qfx2_idx, qfx2_dist, iter_count = nnindxer.conditional_knn(qfx2_vec,
num_neighbors,
invalid_axs)
yield qfx2_idx, qfx2_dist
# NOT YET READY
@derived_attribute(
tablename='sver', parents=['feat_neighbs'],
colnames=['chipmatch'], coltypes=[ChipMatch],
#configclass=IndexerConfig,
chunksize=1, fname='vsmany',
)
def compute_sver(depc, fid_list, config):
pass
@derived_attribute(
tablename='vsmany', parents=['sver'],
colnames=['chipmatch'], coltypes=[ChipMatch],
#configclass=IndexerConfig,
chunksize=1, fname='vsmany',
)
def compute_vsmany(depc, fid_list, config):
pass
class LabelerConfig(dtool.Config):
_param_info_list = [
ut.ParamInfo('labeler_sensitivity', 0.2),
]
_sub_config_list = [
ChipConfig
]
@derived_attribute(
tablename='labeler', parents=['annotations'],
colnames=['score', 'species', 'viewpoint', 'quality', 'orientation', 'probs'],
coltypes=[float, str, str, str, float, dict],
configclass=LabelerConfig,
fname='chipcache4',
chunksize=128,
)
def compute_labels_annotations(depc, aid_list, config=None):
r"""
Extracts the detections for a given input image
Args:
depc (ibeis.depends_cache.DependencyCache):
gid_list (list): list of image rowids
config (dict): (default = None)
Yields:
(float, str): tup
CommandLine:
ibeis compute_labels_annotations
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.core_images import * # NOQA
>>> import ibeis
>>> defaultdb = 'PZ_MTEST'
>>> ibs = ibeis.opendb(defaultdb=defaultdb)
>>> depc = ibs.depc_annot
>>> aid_list = ibs.get_valid_aids()[0:8]
>>> # depc.delete_property('labeler', aid_list)
>>> results = depc.get_property('labeler', aid_list, None)
>>> print(results)
"""
from ibeis.algo.detect.labeler.labeler import label_chip_list
print('[ibs] Process Annotation Labels')
print('config = %r' % (config,))
# Get controller
ibs = depc.controller
depc = ibs.depc_annot
config = {
'dim_size' : (128, 128),
'resize_dim' : 'wh',
}
chip_list = depc.get_property('chips', aid_list, 'img', config=config)
result_list = label_chip_list(chip_list)
# yield detections
for result in result_list:
yield result
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.core_annots
python -m ibeis.core_annots --allexamples
utprof.py -m ibeis.core_annots --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| smenon8/ibeis | ibeis/core_annots.py | core_annots.py | py | 57,012 | python | en | code | null | github-code | 6 | [
{
"api_name": "utool.inject2",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "ibeis.control.controller_inject.register_preprocs",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "ibeis.control.controller_inject.register_subprops",
"line_number": 63,
"u... |
19314688451 | # -*- coding: utf-8 -*-
'''
A module for startup settings
'''
from __future__ import absolute_import
import logging
import os.path
import sys
from requests.structures import CaseInsensitiveDict # pylint: disable=import-error,3rd-party-local-module-not-gated
# Import local libs
# This file may be loaded out of __pycache__, so the
# directory of its .py may not be in the search path.
IMPORT_PATH = os.path.dirname(__file__)
if IMPORT_PATH.endswith('__pycache__'):
IMPORT_PATH = os.path.dirname(IMPORT_PATH)
sys.path.append(IMPORT_PATH)
try:
import _nisysmgmt_grains # pylint: disable=import-error,3rd-party-local-module-not-gated
finally:
# Remove the extra search path that we added to sys.path
sys.path.remove(IMPORT_PATH)
log = logging.getLogger(__name__)
try:
import salt.modules.cmdmod as cmd
import salt.serializers.json as json
import salt.ext.six.moves.configparser as configparser
except ImportError:
log.critical("Salt is not available")
# Define the module's virtual name
__virtualname__ = 'startup'
NIRTINI_PATH = '/etc/natinst/share/ni-rt.ini'
NIRTCFG_PATH = '/usr/local/natinst/bin/nirtcfg'
FWSETENV_PATH = '/sbin/fw_setenv'
def __virtual__():
'''
Only load this module if the nirtcfg command exist and is older NILinuxRT
:return: True if environment is set up and False otherwise
'''
if 'NILinuxRT' in __grains__['os_family'] and 'nilrt' == __grains__['lsb_distrib_id']:
return True
return False, 'The startup_settings module cannot be loaded.'
def get_all(json_format=False):
'''
.. note::
Get all of these settings:
- NoFPGAApp
- NoApp
- ConsoleOut
- EmbeddedUI
- LabVIEWAccess
:param json_format: If true, returns the result in JSON format
:return: Returns settings
CLI Example:
.. code-block:: bash
salt '*' startup.get_all
salt '*' startup.get_all True
'''
settings = {'NoFPGAApp': get('nofpgaapp'),
'NoApp': get('noapp'),
'ConsoleOut': get('consoleout'),
'LabVIEWAccess': get('labviewaccess')}
cpuarch = __grains__.get('cpuarch')
if cpuarch == 'x86_64':
settings['EmbeddedUI'] = get('embeddedui')
if not json_format:
return settings
return json.serialize(settings)
def get(setting):
'''
.. note::
Get one of these settings:
- NoFPGAApp
- NoApp
- ConsoleOut
- EmbeddedUI
- LabVIEWAccess
:param setting: Name of setting.
:return: Returns value of that setting or -1 if error.
CLI Example:
.. code-block:: bash
salt '*' startup.get noapp
'''
setting = setting.strip().lower()
system_settings = {'nofpgaapp': 'NoFPGAApp.enabled',
'noapp': 'NoApp.enabled',
'consoleout': 'ConsoleOut.enabled',
'embeddedui': 'ui.enabled'}
lvrt_settings = {'labviewaccess': 'RTTarget.RTProtocolAllowed'}
config = configparser.RawConfigParser(dict_type=CaseInsensitiveDict)
config.read(NIRTINI_PATH)
if setting in system_settings:
return config.get('systemsettings', system_settings[setting]).strip('\"')
elif setting in lvrt_settings:
return config.get('lvrt', lvrt_settings[setting]).strip('\"')
return -1
def enable_console_out(enable=True):
'''
.. note::
Enable or disable ConsoleOut
:param enable: If true enable ConsoleOut else disable ConsoleOut. Default is True.
CLI Example:
.. code-block:: bash
salt '*' startup.enable_console_out
'''
cmd.run('{0} --set section=systemsettings,token={1},value={2}'.format(NIRTCFG_PATH, 'ConsoleOut.enabled', enable))
cmd.run('{0} consoleoutenable={1}'.format(FWSETENV_PATH, enable))
system_settings = _nisysmgmt_grains.get_last_known_startup_settings(__grains__)
system_settings['ConsoleOut'] = str(enable)
__salt__['event.fire']({'force_refresh': True}, 'grains_refresh')
return True
def enable_no_fpga_app(enable=True):
'''
.. note::
Enable or disable NoFPGAApp
:param enable: If true enable NoFPGAApp else disable NoFPGAApp. Default is True.
CLI Example:
.. code-block:: bash
salt '*' startup.enable_no_fpga_app
'''
cmd.run('{0} --set section=systemsettings,token={1},value={2}'.format(NIRTCFG_PATH, 'NoFPGAApp.enabled', enable))
system_settings = _nisysmgmt_grains.get_last_known_startup_settings(__grains__)
system_settings['NoFPGAApp'] = str(enable)
__salt__['event.fire']({'force_refresh': True}, 'grains_refresh')
return True
def enable_no_app(enable=True):
'''
.. note::
Enable or disable NoApp
:param enable: If true enable NoApp else disable NoApp. Default is True.
CLI Example:
.. code-block:: bash
salt '*' startup.enable_no_app
'''
cmd.run('{0} --set section=systemsettings,token={1},value={2}'.format(NIRTCFG_PATH, 'NoApp.enabled', enable))
system_settings = _nisysmgmt_grains.get_last_known_startup_settings(__grains__)
system_settings['NoApp'] = str(enable)
__salt__['event.fire']({'force_refresh': True}, 'grains_refresh')
return True
def enable_embedded_ui(enable=True):
'''
.. note::
Enable or disable Embedded UI
:param enable: If true enable Embedded UI else disable Embedded UI. Default is True.
CLI Example:
.. code-block:: bash
salt '*' startup.enable_embedded_ui
'''
cmd.run('{0} --set section=systemsettings,token={1},value={2}'.format(NIRTCFG_PATH, 'ui.enabled', enable))
system_settings = _nisysmgmt_grains.get_last_known_startup_settings(__grains__)
system_settings['EmbeddedUI'] = str(enable)
__salt__['event.fire']({'force_refresh': True}, 'grains_refresh')
return True
def enable_labview_access(enable=True):
'''
.. note::
Enable or disable LabVIEW Project Access
:param enable: If true enable LabVIEW Project Access else disable LabVIEW Project Access. Default is True.
CLI Example:
.. code-block:: bash
salt '*' startup.enable_labview_access
'''
cmd.run('{0} --set section=lvrt,token={1},value={2}'.format(NIRTCFG_PATH, 'RTTarget.RTProtocolAllowed', enable))
system_settings = _nisysmgmt_grains.get_last_known_startup_settings(__grains__)
system_settings['LabVIEWAccess'] = str(enable)
__salt__['event.fire']({'force_refresh': True}, 'grains_refresh')
return True
| BKnight760/ubuntu-systemlink-salt-minion | var/lib/salt/minion/extmods/modules/startup_settings.py | startup_settings.py | py | 6,757 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "os.path.path.dirname",... |
36525273442 | """Parser object that performs coarse-to-fine and postprocessing.
Additionally, a simple command line interface similar to bitpar."""
from __future__ import print_function
import io
import os
import re
import sys
import time
import gzip
import codecs
import logging
import tempfile
import traceback
import string # pylint: disable=W0402
import multiprocessing
if sys.version[0] > '2':
imap = map
else:
from itertools import imap
from math import exp, log
from heapq import nlargest
from getopt import gnu_getopt, GetoptError
from operator import itemgetter
from functools import wraps
import numpy as np
from discodop import plcfrs, pcfg
from discodop.grammar import defaultparse
from discodop.containers import Grammar
from discodop.coarsetofine import prunechart, whitelistfromposteriors
from discodop.disambiguation import getderivations, marginalize, doprerank
from discodop.tree import Tree
from discodop.lexicon import replaceraretestwords, UNKNOWNWORDFUNC, UNK
from discodop.treebank import WRITERS, writetree
from discodop.treebanktransforms import reversetransform, rrbacktransform, \
saveheads, NUMBERRE, readheadrules
from discodop.treetransforms import mergediscnodes, unbinarize, \
removefanoutmarkers
USAGE = '''
usage: %(cmd)s [options] <grammar/> [input [output]]
or: %(cmd)s --simple [options] <rules> <lexicon> [input [output]]
'grammar/' is a directory with a model produced by "discodop runexp".
When no filename is given, input is read from standard input and the results
are written to standard output. Input should contain one sentence per line
with space-delimited tokens. Output consists of bracketed trees in
selected format. Files must be encoded in UTF-8.
General options:
-x Input is one token per line, sentences separated by two
newlines (like bitpar).
-b k Return the k-best parses instead of just 1.
--prob Print probabilities as well as parse trees.
--tags Tokens are of the form "word/POS"; give both to parser.
--fmt=(export|bracket|discbracket|alpino|conll|mst|wordpos)
Format of output [default: discbracket].
--numproc=k Launch k processes, to exploit multiple cores.
--simple Parse with a single grammar and input file; similar interface
to bitpar. The files 'rules' and 'lexicon' define a binarized
grammar in bitpar or PLCFRS format.
--verbosity=x 0 <= x <= 4. Same effect as verbosity in parameter file.
Options for simple mode:
-s x Use "x" as start symbol instead of default "TOP".
--bt=file Apply backtransform table to recover TSG derivations.
--obj=(mpd|mpp|mrp|mcc|shortest|sl-dop)
Objective function to maximize [default: mpd].
-m x Use x derivations to approximate objective functions;
mpd and shortest require only 1.
--bitpar Use bitpar to parse with an unbinarized grammar.
''' % dict(cmd=sys.argv[0], fmt=','.join(WRITERS))
DEFAULTSTAGE = dict(
name='stage1', # identifier, used for filenames
mode='plcfrs', # use the agenda-based PLCFRS parser
prune=False, # whether to use previous chart to prune this stage
split=False, # split disc. nodes VP_2[101] as { VP*[100], VP*[001] }
splitprune=False, # treat VP_2[101] as {VP*[100], VP*[001]} for pruning
markorigin=False, # mark origin of split nodes: VP_2 => {VP*1, VP*2}
collapselabels=None, # options: None, 'head', 'all'. TODO: implement.
k=50, # no. of coarse pcfg derivations to prune with; k=0: filter only
dop=None, # DOP mode (DOP reduction / double DOP)
binarized=True, # for double dop, whether to binarize extracted grammar
# (False requires use of bitpar)
sample=False, kbest=True,
m=10, # number of derivations to sample/enumerate
estimator='rfe', # choices: rfe, ewe
objective='mpp', # choices: mpp, mpd, shortest, sl-dop[-simple]
# NB: w/shortest derivation, estimator only affects tie breaking.
sldop_n=7, # number of trees to consider when using sl-dop[-simple]
mcc_labda=1.0, # weight to assign to recall vs. mistake rate with mcc
mcc_labels=None, # optionally, set of labels to optimize for with mcc
packedgraph=False, # use packed graph encoding for DOP reduction
iterate=False, # for double dop, whether to add fragments of fragments
complement=False, # for double dop, whether to include fragments which
# form the complement of the maximal recurring fragments extracted
neverblockre=None, # do not prune nodes with label that match regex
estimates=None, # compute, store & use outside estimates
)
class DictObj(object):
"""Trivial class to wrap a dictionary for reasons of syntactic sugar."""
def __init__(self, *a, **kw):
self.__dict__.update(*a, **kw)
def update(self, *a, **kw):
"""Update/add more attributes."""
self.__dict__.update(*a, **kw)
def __getattr__(self, name):
"""Dummy function for suppressing pylint E1101 errors."""
raise AttributeError('%r instance has no attribute %r' % (
self.__class__.__name__, name))
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
',\n\t'.join('%s=%r' % a for a in self.__dict__.items()))
PARAMS = DictObj() # used for multiprocessing when using CLI of this module
def main():
"""Handle command line arguments."""
flags = 'prob tags bitpar simple'.split()
options = flags + 'obj= bt= numproc= fmt= verbosity='.split()
try:
opts, args = gnu_getopt(sys.argv[1:], 'b:s:m:x', options)
except GetoptError as err:
print(err, USAGE)
return
if not 1 <= len(args) <= 4:
print('ERROR: incorrect number of arguments')
print(USAGE)
return
for n, filename in enumerate(args):
if not os.path.exists(filename):
raise ValueError('file %d not found: %r' % (n + 1, filename))
opts = dict(opts)
numparses = int(opts.get('-b', 1))
top = opts.get('-s', 'TOP')
prob = '--prob' in opts
tags = '--tags' in opts
oneline = '-x' not in opts
if '--simple' in opts:
if not 2 <= len(args) <= 4:
print('ERROR: incorrect number of arguments')
print(USAGE)
return
rules = (gzip.open if args[0].endswith('.gz') else open)(args[0]).read()
lexicon = codecs.getreader('utf-8')((gzip.open if args[1].endswith('.gz')
else open)(args[1])).read()
bitpar = rules[0] in string.digits
if '--bitpar' in opts:
if not bitpar:
raise ValueError('bitpar requires bitpar grammar format.')
mode = 'pcfg-bitpar-nbest'
else:
mode = 'pcfg' if bitpar else 'plcfrs'
grammar = Grammar(rules, lexicon, start=top, bitpar=bitpar,
binarized='--bitpar' not in opts)
stages = []
stage = DEFAULTSTAGE.copy()
backtransform = None
if opts.get('--bt'):
backtransform = (gzip.open if opts.get('--bt').endswith('.gz')
else open)(opts.get('--bt')).read().splitlines()
stage.update(
name='grammar',
mode=mode,
grammar=grammar,
binarized='--bitpar' not in opts,
backtransform=backtransform if len(args) < 4 else None,
m=numparses,
objective='mpd')
if '--obj' in opts:
stage.update(
dop='reduction' if backtransform is None else 'doubledop',
objective=opts['--obj'],
m=int(opts.get('-m', 1)))
stages.append(DictObj(stage))
if backtransform:
_ = stages[-1].grammar.getmapping(None,
neverblockre=re.compile(b'.+}<'))
parser = Parser(stages, verbosity=int(opts.get('--verbosity', 2)))
morph = None
del args[:2]
else:
from discodop.runexp import readparam
directory = args[0]
if not os.path.isdir(directory):
raise ValueError('expected directory produced by "discodop runexp"')
params = readparam(os.path.join(directory, 'params.prm'))
params['resultdir'] = directory
stages = params['stages']
postagging = params['postagging']
readgrammars(directory, stages, postagging,
top=params.get('top', top))
parser = Parser(stages,
transformations=params.get('transformations'),
binarization=params['binarization'],
postagging=postagging if postagging and
postagging.method == 'unknownword' else None,
relationalrealizational=params.get('relationalrealizational'),
verbosity=int(opts.get('--verbosity',
params.get('verbosity', 2))))
morph = params['morphology']
del args[:1]
infile = (io.open(args[0], encoding='utf-8')
if len(args) >= 1 else sys.stdin)
out = (io.open(args[1], 'w', encoding='utf-8')
if len(args) == 2 else sys.stdout)
doparsing(parser, infile, out, prob, oneline, tags, numparses,
int(opts.get('--numproc', 1)), opts.get('--fmt', 'discbracket'),
morph)
def doparsing(parser, infile, out, printprob, oneline, usetags, numparses,
numproc, fmt, morphology):
"""Parse sentences from file and write results to file, log to stdout."""
times = []
unparsed = 0
if not oneline:
infile = readinputbitparstyle(infile)
infile = (line for line in infile if line.strip())
if numproc == 1:
initworker(parser, printprob, usetags, numparses, fmt, morphology)
mymap = imap
else:
pool = multiprocessing.Pool(processes=numproc, initializer=initworker,
initargs=(parser, printprob, usetags, numparses, fmt,
morphology))
mymap = pool.imap
for output, noparse, sec, msg in mymap(worker, enumerate(infile)):
if output:
print(msg, file=sys.stderr)
out.write(output)
if noparse:
unparsed += 1
times.append(sec)
sys.stderr.flush()
out.flush()
print('average time per sentence', sum(times) / len(times),
'\nunparsed sentences:', unparsed,
'\nfinished',
file=sys.stderr)
out.close()
def initworker(parser, printprob, usetags, numparses,
fmt, morphology):
"""Load parser for a worker process."""
headrules = None
if fmt in ('mst', 'conll'):
headrules = readheadrules(parser.binarization.headrules)
PARAMS.update(parser=parser, printprob=printprob,
usetags=usetags, numparses=numparses, fmt=fmt,
morphology=morphology, headrules=headrules)
def workerfunc(func):
"""Wrap a multiprocessing worker function to produce a full traceback."""
@wraps(func)
def wrapper(*args, **kwds):
"""Apply decorated function."""
try:
import faulthandler
faulthandler.enable() # Dump information on segfault.
except (ImportError, io.UnsupportedOperation):
pass
# NB: only concurrent.futures on Python 3.3+ will exit gracefully.
try:
return func(*args, **kwds)
except Exception: # pylint: disable=W0703
# Put traceback as string into an exception and raise that
raise Exception('in worker process\n%s' %
''.join(traceback.format_exception(*sys.exc_info())))
return wrapper
@workerfunc
def worker(args):
"""Parse a single sentence."""
n, line = args
line = line.strip()
if not line:
return '', True, 0, ''
begin = time.clock()
sent = line.split(' ')
tags = None
if PARAMS.usetags:
sent, tags = zip(*(a.rsplit('/', 1) for a in sent))
msg = 'parsing %d: %s' % (n, ' '.join(sent))
result = list(PARAMS.parser.parse(sent, tags=tags))[-1]
output = ''
if result.noparse:
msg += '\nNo parse for "%s"' % ' '.join(sent)
if PARAMS.printprob:
output += 'prob=%.16g\n' % result.prob
output += '%s\t%s\n' % (result.parsetree, ' '.join(sent))
else:
output += ''.join(
writetree(
PARAMS.parser.postprocess(tree)[0], sent,
n if PARAMS.numparses == 1 else ('%d-%d' % (n, k)),
PARAMS.fmt, headrules=PARAMS.headrules,
morphology=PARAMS.morphology,
comment=('prob=%.16g' % prob) if PARAMS.printprob else None)
for k, (tree, prob, _) in enumerate(nlargest(
PARAMS.numparses, result.parsetrees, key=itemgetter(1))))
sec = time.clock() - begin
msg += '\n%g s' % sec
return output, result.noparse, sec, msg
def readinputbitparstyle(infile):
"""Yields lists of tokens, where '\\n\\n' identifies a sentence break.
Lazy version of ``infile.read().split('\\n\\n')``."""
sent = []
for line in infile:
line = line.strip()
if not line:
yield ' '.join(sent)
sent = []
sent.append(line)
if sent:
yield ' '.join(sent)
class Parser(object):
"""A coarse-to-fine parser based on a given set of parameters.
:param stages: a list of coarse-to-fine stages containing grammars and
parameters.
:param transformations: treebank transformations to reverse on parses.
:param binarization: settings used for binarization; used for the
tailmarker attribute which identifies heads in parser output.
:param postagging: if given, an unknown word model is used to assign POS
tags during parsing. The model consists of a DictObj with (at least)
the following attributes:
- unknownwordfun: function to produces signatures for unknown words.
- lexicon: the set of known words in the grammar.
- sigs: the set of word signatures occurring in the grammar.
:param relationalrealizational: whether to reverse the RR-transform."""
def __init__(self, stages, transformations=None, postagging=None,
binarization=DictObj(tailmarker=None),
relationalrealizational=None, verbosity=2):
self.stages = stages
self.transformations = transformations
self.binarization = binarization
self.postagging = postagging
self.relationalrealizational = relationalrealizational
self.verbosity = verbosity
for stage in stages:
if stage.mode.startswith('pcfg-bitpar'):
exportbitpargrammar(stage)
model = u'default'
if stage.dop:
if (stage.estimator == 'ewe'
or stage.objective.startswith('sl-dop')):
model = u'ewe'
elif stage.estimator == 'bon':
model = u'bon'
if stage.objective == 'shortest':
model = u'shortest'
stage.grammar.switch(model, logprob=stage.mode != 'pcfg-posterior')
if verbosity >= 3:
logging.debug(stage.name)
logging.debug(stage.grammar)
def parse(self, sent, tags=None):
"""Parse a sentence and perform postprocessing.
Yields a dictionary from parse trees to probabilities for each stage.
:param sent: a sequence of tokens.
:param tags: if given, will be given to the parser instead of trying
all possible tags."""
if self.postagging:
if self.transformations and 'FOLD-NUMBERS' in self.transformations:
sent = ['000' if NUMBERRE.match(a) else a for a in sent]
sent = replaceraretestwords(sent,
self.postagging.unknownwordfun,
self.postagging.lexicon, self.postagging.sigs)
sent = list(sent)
if tags is not None:
tags = list(tags)
chart = start = inside = outside = lastsuccessfulparse = None
for n, stage in enumerate(self.stages):
begin = time.clock()
noparse = False
parsetrees = fragments = None
msg = '%s:\t' % stage.name.upper()
model = u'default'
if stage.dop:
if (stage.estimator == 'ewe'
or stage.objective.startswith('sl-dop')):
model = u'ewe'
elif stage.estimator == 'bon':
model = u'bon'
if stage.objective == 'shortest':
model = u'shortest'
x = stage.grammar.currentmodel
stage.grammar.switch(model, logprob=stage.mode != 'pcfg-posterior')
if stage.mode.startswith('pcfg-bitpar') and (
not hasattr(stage, 'rulesfile')
or x != stage.grammar.currentmodel):
exportbitpargrammar(stage)
if not stage.binarized and not stage.mode.startswith('pcfg-bitpar'):
raise ValueError('non-binarized grammar requires use of bitpar')
if not stage.prune or chart:
if n != 0 and stage.prune and stage.mode != 'dop-rerank':
beginprune = time.clock()
if self.stages[n - 1].mode == 'pcfg-posterior':
whitelist, msg1 = whitelistfromposteriors(
inside, outside, start,
self.stages[n - 1].grammar, stage.grammar,
stage.k, stage.splitprune,
self.stages[n - 1].markorigin,
stage.mode.startswith('pcfg'))
else:
whitelist, msg1 = prunechart(
chart, stage.grammar, stage.k,
stage.splitprune,
self.stages[n - 1].markorigin,
stage.mode.startswith('pcfg'),
self.stages[n - 1].mode == 'pcfg-bitpar-nbest')
msg += '%s; %gs\n\t' % (msg1, time.clock() - beginprune)
else:
whitelist = None
if stage.mode == 'pcfg':
chart, msg1 = pcfg.parse(
sent, stage.grammar, tags=tags,
whitelist=whitelist if stage.prune else None)
elif stage.mode == 'pcfg-posterior':
inside, outside, start, msg1 = pcfg.doinsideoutside(
sent, stage.grammar, tags=tags)
chart = start
elif stage.mode.startswith('pcfg-bitpar'):
if stage.mode == 'pcfg-bitpar-forest':
numderivs = 0
elif (n == len(self.stages) - 1
or not self.stages[n + 1].prune):
numderivs = stage.m
else: # request 1000 nbest parses for CTF pruning
numderivs = 1000
chart, cputime, msg1 = pcfg.parse_bitpar(stage.grammar,
stage.rulesfile.name, stage.lexiconfile.name,
sent, numderivs,
stage.grammar.start,
stage.grammar.toid[stage.grammar.start], tags=tags)
begin -= cputime
elif stage.mode == 'plcfrs':
chart, msg1 = plcfrs.parse(
sent, stage.grammar, tags=tags,
exhaustive=stage.dop or (
n + 1 != len(self.stages)
and self.stages[n + 1].prune),
whitelist=whitelist,
splitprune=stage.splitprune
and self.stages[n - 1].split,
markorigin=self.stages[n - 1].markorigin,
estimates=(stage.estimates, stage.outside)
if stage.estimates in ('SX', 'SXlrgaps')
else None)
elif stage.mode == 'dop-rerank':
if chart:
parsetrees = doprerank(chart, sent, stage.k,
self.stages[n - 1].grammar, stage.grammar)
msg1 = 're-ranked %d parse trees. ' % len(parsetrees)
else:
raise ValueError('unknown mode specified.')
msg += '%s\n\t' % msg1
if (n != 0 and not chart and not noparse
and stage.split == self.stages[n - 1].split):
logging.error('ERROR: expected successful parse. '
'sent: %s\nstage: %s.', ' '.join(sent), stage.name)
# raise ValueError('ERROR: expected successful parse. '
# 'sent %s, %s.' % (nsent, stage.name))
if chart and stage.mode not in ('pcfg-posterior', 'dop-rerank'
) and not (self.relationalrealizational and stage.split):
begindisamb = time.clock()
if stage.mode == 'pcfg-bitpar-nbest':
if not stage.kbest or stage.sample:
raise ValueError('sampling not possible with bitpar '
'in nbest mode.')
derivations = chart.rankededges[chart.root()]
entries = [None] * len(derivations)
else:
derivations, entries = getderivations(chart, stage.m,
kbest=stage.kbest, sample=stage.sample,
derivstrings=stage.dop != 'doubledop'
or self.verbosity >= 3
or stage.objective == 'mcc')
if self.verbosity >= 3:
print('sent: %s\nstage: %s' % (' '.join(sent), stage.name))
print('%d-best derivations:\n%s' % (
min(stage.m, 100),
'\n'.join('%d. %s %s' % (n + 1,
('subtrees=%d' % abs(int(prob / log(0.5))))
if stage.objective == 'shortest'
else ('p=%g' % exp(-prob)), deriv)
for n, (deriv, prob) in enumerate(derivations[:100]))))
print('sum of probabitilies: %g\n' %
sum(exp(-prob) for _, prob in derivations[:100]))
if stage.objective == 'shortest':
stage.grammar.switch(u'ewe' if stage.estimator == 'ewe'
else u'default', True)
parsetrees, msg1 = marginalize(
stage.objective if stage.dop else 'mpd',
derivations, entries, chart,
sent=sent, tags=tags,
backtransform=stage.backtransform,
k=stage.m, sldop_n=stage.sldop_n,
mcc_labda=stage.mcc_labda, mcc_labels=stage.mcc_labels,
bitpar=stage.mode == 'pcfg-bitpar-nbest')
msg += 'disambiguation: %s, %gs\n\t' % (
msg1, time.clock() - begindisamb)
if self.verbosity >= 3:
besttrees = nlargest(100, parsetrees, key=itemgetter(1))
print('100-best parse trees:\n%s' % '\n'.join(
'%d. %s %s' % (n + 1, probstr(prob), treestr)
for n, (treestr, prob, _) in enumerate(besttrees)))
print('sum of probabitilies: %g\n' %
sum((prob[1] if isinstance(prob, tuple) else prob)
for _, prob, _ in besttrees))
if self.verbosity >= 4:
print('Chart:\n%s' % chart)
if parsetrees:
try:
resultstr, prob, fragments = max(
parsetrees, key=itemgetter(1))
parsetree, noparse = self.postprocess(resultstr, n)
if not all(a for a in parsetree.subtrees()):
raise ValueError('empty nodes in tree: %s' % parsetree)
if not len(parsetree.leaves()) == len(sent):
raise ValueError('leaves missing. original tree: %s\n'
'postprocessed: %r' % (resultstr, parsetree))
except Exception: # pylint: disable=W0703
logging.error("something's amiss. %s", ''.join(
traceback.format_exception(*sys.exc_info())))
parsetree, prob, noparse = self.noparse(
stage, sent, tags, lastsuccessfulparse)
else:
lastsuccessfulparse = parsetree
msg += probstr(prob) + ' '
else:
fragments = None
parsetree, prob, noparse = self.noparse(
stage, sent, tags, lastsuccessfulparse)
elapsedtime = time.clock() - begin
msg += '%.2fs cpu time elapsed\n' % (elapsedtime)
yield DictObj(name=stage.name, parsetree=parsetree, prob=prob,
parsetrees=parsetrees, fragments=fragments,
noparse=noparse, elapsedtime=elapsedtime, msg=msg)
def postprocess(self, treestr, stage=-1):
"""Take parse tree and apply postprocessing."""
parsetree = Tree.parse(treestr, parse_leaf=int)
if self.stages[stage].split:
mergediscnodes(unbinarize(parsetree, childchar=':',
expandunary=False))
saveheads(parsetree, self.binarization.tailmarker)
unbinarize(parsetree, expandunary=False)
removefanoutmarkers(parsetree)
if self.relationalrealizational:
parsetree = rrbacktransform(parsetree,
self.relationalrealizational['adjunctionlabel'])
if self.transformations:
reversetransform(parsetree, self.transformations)
return parsetree, False
def noparse(self, stage, sent, tags, lastsuccessfulparse):
"""Return parse from previous stage or a dummy parse."""
# use successful parse from earlier stage if available
if lastsuccessfulparse is not None:
parsetree = lastsuccessfulparse.copy(True)
else: # Produce a dummy parse for evaluation purposes.
default = defaultparse([(n, t) for n, t
in enumerate(tags or (len(sent) * ['NONE']))])
parsetree = Tree.parse('(%s %s)' % (stage.grammar.start,
default), parse_leaf=int)
noparse = True
prob = 1.0
return parsetree, prob, noparse
def readgrammars(resultdir, stages, postagging=None, top='ROOT'):
"""Read the grammars from a previous experiment.
Expects a directory ``resultdir`` which contains the relevant grammars and
the parameter file ``params.prm``, as produced by ``runexp``."""
for n, stage in enumerate(stages):
logging.info('reading: %s', stage.name)
rules = gzip.open('%s/%s.rules.gz' % (resultdir, stage.name)).read()
lexicon = codecs.getreader('utf-8')(gzip.open('%s/%s.lex.gz' % (
resultdir, stage.name)))
grammar = Grammar(rules, lexicon.read(),
start=top, bitpar=stage.mode.startswith('pcfg')
or re.match(r'[-.e0-9]+\b', rules), binarized=stage.binarized)
backtransform = outside = None
if stage.dop:
if stage.estimates is not None:
raise ValueError('not supported')
if stage.dop == 'doubledop':
backtransform = gzip.open('%s/%s.backtransform.gz' % (
resultdir, stage.name)).read().splitlines()
if n and stage.prune:
_ = grammar.getmapping(stages[n - 1].grammar,
striplabelre=re.compile(b'@.+$'),
neverblockre=re.compile(b'^#[0-9]+|.+}<'),
splitprune=stage.splitprune and stages[n - 1].split,
markorigin=stages[n - 1].markorigin)
else:
# recoverfragments() relies on this mapping to identify
# binarization nodes
_ = grammar.getmapping(None,
neverblockre=re.compile(b'.+}<'))
elif n and stage.prune: # dop reduction
_ = grammar.getmapping(stages[n - 1].grammar,
striplabelre=re.compile(b'@[-0-9]+$'),
neverblockre=re.compile(stage.neverblockre)
if stage.neverblockre else None,
splitprune=stage.splitprune and stages[n - 1].split,
markorigin=stages[n - 1].markorigin)
if stage.mode == 'dop-rerank':
grammar.getrulemapping(
stages[n - 1].grammar, re.compile(br'@[-0-9]+\b'))
probsfile = '%s/%s.probs.npz' % (resultdir, stage.name)
if os.path.exists(probsfile):
probmodels = np.load(probsfile) # pylint: disable=no-member
for name in probmodels.files:
if name != 'default':
grammar.register(unicode(name), probmodels[name])
else: # not stage.dop
if n and stage.prune:
_ = grammar.getmapping(stages[n - 1].grammar,
neverblockre=re.compile(stage.neverblockre)
if stage.neverblockre else None,
splitprune=stage.splitprune and stages[n - 1].split,
markorigin=stages[n - 1].markorigin)
if stage.estimates in ('SX', 'SXlrgaps'):
if stage.estimates == 'SX' and grammar.maxfanout != 1:
raise ValueError('SX estimate requires PCFG.')
if stage.mode != 'plcfrs':
raise ValueError('estimates require parser w/agenda.')
outside = np.load( # pylint: disable=no-member
'%s/%s.outside.npz' % (resultdir, stage.name))['outside']
logging.info('loaded %s estimates', stage.estimates)
elif stage.estimates:
raise ValueError('unrecognized value; specify SX or SXlrgaps.')
if stage.mode.startswith('pcfg-bitpar'):
if grammar.maxfanout != 1:
raise ValueError('bitpar requires a PCFG.')
_sumsto1, msg = grammar.testgrammar()
logging.info('%s: %s', stage.name, msg)
stage.update(grammar=grammar, backtransform=backtransform,
outside=outside)
if postagging and postagging.method == 'unknownword':
postagging.unknownwordfun = UNKNOWNWORDFUNC[postagging.model]
postagging.lexicon = {w for w in stages[0].grammar.lexicalbyword
if not w.startswith(UNK)}
postagging.sigs = {w for w in stages[0].grammar.lexicalbyword
if w.startswith(UNK)}
def exportbitpargrammar(stage):
"""(re-)export bitpar grammar with current weights."""
if not hasattr(stage, 'rulesfile'):
stage.rulesfile = tempfile.NamedTemporaryFile()
stage.lexiconfile = tempfile.NamedTemporaryFile()
stage.rulesfile.seek(0)
stage.rulesfile.truncate()
if stage.grammar.currentmodel == 0:
stage.rulesfile.write(stage.grammar.origrules)
else:
stage.rulesfile.writelines(
'%g\t%s\n' % (weight, line.split(None, 1)[1])
for weight, line in
zip(stage.grammar.models[stage.grammar.currentmodel],
stage.grammar.origrules.splitlines()))
stage.rulesfile.flush()
stage.lexiconfile.seek(0)
stage.lexiconfile.truncate()
lexicon = stage.grammar.origlexicon.replace(
'(', '-LRB-').replace(')', '-RRB-')
lexiconfile = codecs.getwriter('utf-8')(stage.lexiconfile)
if stage.grammar.currentmodel == 0:
lexiconfile.write(lexicon)
else:
weights = iter(stage.grammar.models[stage.grammar.currentmodel,
stage.grammar.numrules:])
lexiconfile.writelines('%s\t%s\n' % (line.split(None, 1)[0],
'\t'.join('%s %g' % (tag, next(weights))
for tag in line.split()[1::2]))
for line in lexicon.splitlines())
stage.lexiconfile.flush()
def probstr(prob):
"""Render probability / number of subtrees as string."""
if isinstance(prob, tuple):
return 'subtrees=%d, p=%.4g ' % (abs(prob[0]), prob[1])
return 'p=%.4g' % prob
def which(program):
"""Return first match for program in search path."""
for path in os.environ.get('PATH', os.defpath).split(":"):
if path and os.path.exists(os.path.join(path, program)):
return os.path.join(path, program)
raise ValueError('%r not found in path; please install it.' % program)
__all__ = ['DictObj', 'Parser', 'doparsing', 'exportbitpargrammar',
'initworker', 'probstr', 'readgrammars', 'readinputbitparstyle',
'which', 'worker', 'workerfunc']
if __name__ == '__main__':
main()
| pombredanne/disco-dop | discodop/parser.py | parser.py | py | 27,717 | python | en | code | null | github-code | 6 | [
{
"api_name": "sys.version",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "discodop.treebank.WRITERS",
"line_number": 72,
"usage_type": "argument"
},
{
"api_name": "getopt.g... |
36935275213 | # this is focused on speed
# it may not run everything
import pathlib
import numpy as np
from tinygrad.ops import MovementOps, ProcessingOps
from tinygrad.llops.ops_gpu import require_init_gpu, clbuild, get_cl_queue, get_cl_ctx
from tinygrad.llops.ops_gpu import contiguous
from tinygrad.llops.ops_gpu import unary_op as unary_op_gpu, binary_op as binary_op_gpu, reduce_op as reduce_op_gpu
from tinygrad.helpers import prod
from tinygrad.shapetracker import ShapeTracker
import pyopencl as cl
from copy import deepcopy
def roundup(x, n=4): return (x+(n-1))//n * n
def flip(x): return (x[1], x[0])
class OpenCLBuffer:
def __init__(self, shape, hostbuf=None, _buf=None, _image=None):
require_init_gpu()
self.shapetracker = deepcopy(shape) if isinstance(shape, ShapeTracker) else ShapeTracker(*shape)
self._buf = _buf
self._image = _image
self.dtype = np.float32
if hostbuf is not None:
# TODO: lazy?
self._buf = cl.Buffer(get_cl_ctx(), cl.mem_flags.READ_WRITE, 4*roundup(prod(shape)))
cl.enqueue_copy(get_cl_queue(), self._buf, hostbuf.astype(np.float32).ravel())
def clone(self):
return OpenCLBuffer(self.shapetracker, _buf=self._buf, _image=self._image)
@property
def shape(self): return self.shapetracker.shape
@staticmethod
def fromCPU(x):
return OpenCLBuffer(x.shape, x)
def toCPU(self):
data = np.empty(self.shape, dtype=np.float32)
if self.shapetracker.contiguous == False:
tmp = OpenCLBuffer(self.shapetracker.shape)
contiguous(None, self, self.shapetracker, tmp)
else:
tmp = self
cl.enqueue_copy(get_cl_queue(), data, tmp.cl, is_blocking=True)
return data
@property
def cl(self):
if self._buf is None:
self._buf = cl.Buffer(get_cl_ctx(), cl.mem_flags.READ_WRITE, 4*roundup(prod(self.shape)))
if self._image is not None:
assert prod(self.shape) == prod(self._image.shape)*4
print(f"converting {self.shape} back to buffer, image shape is {self._image.shape}")
clbuild("from_image", """
__kernel void from_image(
read_only image2d_t in,
__global float4 *out) {
const sampler_t smp = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
int2 l;
l.y = get_global_id(1);
l.x = get_global_id(0);
int W = get_image_width(in);
out[l.y*W + l.x] = read_imagef(in, smp, l);
}
""")(self._image.shape, None, self._image, self._buf)
self._image = None
return self._buf
@property
def image(self):
if self._image is None:
assert self.shape[2] == 4 and len(self.shape) == 3
fmt = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT)
self._image = cl.Image(get_cl_ctx(), cl.mem_flags.READ_WRITE, fmt, shape=flip(self.shape))
if self._buf is not None:
assert prod(self.shape) == prod(self._image.shape)*4
print(f"converting {self.shape} to image with shape {self._image.shape}")
clbuild("to_image", """
__kernel void to_image(
__global const float4 *in,
write_only image2d_t out) {
int2 l;
l.y = get_global_id(1);
l.x = get_global_id(0);
int W = get_image_width(out);
write_imagef(out, l, in[l.y*W + l.x]);
}
""")(self._image.shape, None, self._buf, self._image)
self._buf = None
return self._image
def unary_op(ctx, op, x):
# TODO: this doesn't actually have to be contiguous
x = contiguous(ctx, x, x.shapetracker) if not x.shapetracker.contiguous else x
return unary_op_gpu(ctx, op, x)
def binary_op(ctx, op, x, y):
x = contiguous(ctx, x, x.shapetracker) if not x.shapetracker.contiguous else x
y = contiguous(ctx, y, y.shapetracker) if not y.shapetracker.contiguous else y
return binary_op_gpu(ctx, op, x, y)
def reduce_op(ctx, op, x, new_shape):
x = contiguous(ctx, x, x.shapetracker) if not x.shapetracker.contiguous else x
return reduce_op_gpu(ctx, op, x, new_shape)
def movement_op(ctx, op, x, arg=None):
xc = x.clone()
# convert from image if the buffer can change shape
if op in [MovementOps.EXPAND, MovementOps.SLICE]: xc.cl
xc.shapetracker.movement_op(op, arg)
if not xc.shapetracker.contiguous: return contiguous(ctx, xc, xc.shapetracker)
else: return xc
def load(x):
with open(x) as f:
ret = f.read()
return ret
def conv(x,w,ret,C):
print(x.shapetracker.expr(), w.shapetracker.expr())
print(x.shape, w.shape, ret.shape)
options = []
if C.cin == 1: options.append("-DDEPTHWISE")
if C.bs > 1:
options.append("-DBATCH")
assert C.py == 0, "batched conv doesn't work with y-padding"
conv_prg = clbuild("conv", load(pathlib.Path(__file__).parent.parent.parent / 'accel/opencl/conv.cl'), tuple(options))
assert C.cout%4 == 0
kernel_args = [C.cout//4, (C.ox+3)//4, C.bs*C.oy]
conv_args = [max(1, C.cin//4), C.groups*C.cin//4, max(1, C.rcout//4), C.cout//4, C.ox, C.oy, C.iy, C.W, C.H, C.px, C.py, C.xs, C.ys, C.dx, C.dy]
print(conv_args, kernel_args)
conv_prg(kernel_args, None, x.image, w.image, ret.image, *[np.int16(x) for x in conv_args])
def processing_op(ctx,op,x,w,out_shape,C):
assert op == ProcessingOps.CONV, f"{op} isn't supported"
ret = ctx.buffer((C.bs*C.oy, C.ox*C.cout//4, 4))
conv(x, w, ret, C)
return ret
def test_image():
hostbuf = np.random.randn(5,8,4).astype(np.float32)
x = OpenCLBuffer((5,8,4), hostbuf)
assert np.allclose(x.toCPU(), hostbuf)
print(x.image)
assert np.allclose(x.toCPU(), hostbuf)
if __name__ == "__main__":
test_image()
| henrylao/tinygrad | accel/opencl/ops_opencl.py | ops_opencl.py | py | 5,655 | python | en | code | null | github-code | 6 | [
{
"api_name": "tinygrad.llops.ops_gpu.require_init_gpu",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tinygrad.shapetracker.ShapeTracker",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "copy.deepcopy",
"line_number": 20,
"usage_type": "call"
... |
74436640828 | import glob
import numpy as np
import pandas as pd
import nibabel as nib
import torch
from torch.utils.data import Dataset
# dataset class for the GenericObjectDecoding dataset
class GODData(Dataset):
FEATURES_PATH = "data/ds001246/derivatives/preproc-spm/output"
TARGETS_PATH = "data/ds001246"
TRAIN_CATEGORIES_PATH = "data/ds001246/stimulus_ImageNetTraining.csv"
TEST_CATEGORIES_PATH = "data/ds001246/stimulus_ImageNetTest.csv"
def __init__(
self,
subject="01",
session_id="01",
task="perception",
train=True,
limit_size=None,
):
session = f"{task}{'Training' if train else 'Test'}{session_id}"
# load data
feature_runs = sorted(glob.glob(f"{self.FEATURES_PATH}/sub-{subject}/ses-{session}/func/*"))
target_runs = sorted(glob.glob(f"{self.TARGETS_PATH}/sub-{subject}/ses-{session}/func/*events*"))
categories = pd.read_csv(self.TRAIN_CATEGORIES_PATH if train else self.TEST_CATEGORIES_PATH, sep="\t", header=None)
# process features and targets
features = []
targets = []
for f_run, t_run in zip(feature_runs, target_runs):
features_run = nib.load(f_run).get_fdata()
targets_run = pd.read_csv(t_run, sep="\t")
# remove resting states
features_run_pp = features_run[:, :, :, 8:-2]
targets_run_pp = targets_run[targets_run["event_type"] != "rest"]
# reshape features into (N, C, D, W, H)
features_run_pp = features_run_pp.transpose(3, 2, 1, 0).reshape(-1, 3, 50, 64, 64)
# extract category labels
targets_run_pp = targets_run_pp.merge(categories, left_on="stim_id", right_on=1)[2]
targets_run_pp = targets_run_pp.to_numpy().reshape(-1, 1)
features.append(features_run_pp)
targets.append(targets_run_pp)
features = np.vstack(features)
targets = np.vstack(targets)
# convert and store as tensors
self.features = torch.from_numpy(features).float()
self.targets = torch.from_numpy(targets).long() - 1
# flatten targets
self.targets = self.targets.squeeze()
# limit dataset size
if limit_size is not None:
self.features = self.features[:limit_size]
self.targets = self.targets[:limit_size]
def __len__(self):
return len(self.features)
def __getitem__(self, index):
feature = self.features[index]
target = self.targets[index]
return feature, target | v15hv4/ViT-fMRI | dataOLD.py | dataOLD.py | py | 2,614 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"l... |
37256463861 | import torch
import torch.nn as nn
from torch.utils import data
from torch.optim import Adam, SGD
from tensorboardX import SummaryWriter
from tqdm import trange
from datetime import datetime
import argparse
from dataset import LoadADHD200
from model import SpatialActivation
def train(lr=0.001, device='cuda', epochs=10,
img_path = "./data/adhd/data/",save_path="./model/",
load_model=True, batch_size=4, load_epochs=1, encoder="se",
optim='sgd', momentum=0.9, step_size=2, gamma=0.95, parallel=False):
AutoEncoder = SpatialActivation()
AutoEncoder.to(device)
if load_model:
AutoEncoder.load_state_dict(torch.load("{}{}_{}.pth".format(save_path, encoder, load_epochs)))
if parallel:
AutoEncoder = nn.DataParallel(AutoEncoder, device_ids=[0, 1])
if optim == 'sgd':
optimizer = SGD(AutoEncoder.parameters(), lr=lr, momentum=momentum)
elif optim == 'adam':
optimizer = Adam(AutoEncoder.parameters(), lr=lr)
mse_loss = nn.MSELoss()
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
print("loading data......")
data_loader = data.DataLoader(LoadADHD200(img_path=img_path),
batch_size=batch_size,
shuffle=True)
print("data load complete.")
TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now())
writer = SummaryWriter("./logdir/" + TIMESTAMP)
for epoch in trange(1, epochs + 1):
total_loss = 0
for img, target_img in data_loader:
img = img.to(device)
decode, _, _ = AutoEncoder(img)
loss = mse_loss(decode, img)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
total_loss = total_loss / len(data_loader)
writer.add_scalar("loss", total_loss, global_step=epoch)
writer.add_scalar("learning rate", optimizer.state_dict()['param_groups'][0]['lr'], global_step=epoch)
scheduler.step()
AutoEncoder_path = save_path + "{}_{}.pth".format(encoder, load_epochs + epoch)
if parallel:
torch.save(AutoEncoder.module.state_dict(), AutoEncoder_path)
else:
torch.save(AutoEncoder.state_dict(), AutoEncoder_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--device', default='cuda', type=str)
parser.add_argument('--epochs', default=20, type=int)
parser.add_argument('--img_path', default="./data/adhd/data/", type=str)
parser.add_argument('--save_path', default="./model/", type=str)
parser.add_argument('--load_model', default=False, type=bool)
parser.add_argument('--load_epochs', default=0, type=int)
parser.add_argument('--encoder', default='se', type=str)
parser.add_argument('--batch_size', default=4, type=int)
parser.add_argument('--optim', default='sgd', type=str)
parser.add_argument('--momentum', default=0.9, type=float)
parser.add_argument('--step_size', default=2, type=int)
parser.add_argument('--gamma', default=0.95, type=float)
parser.add_argument('--parallel', default=False, type=bool)
args = parser.parse_args()
train(lr=args.lr, device=args.device, epochs=args.epochs,
img_path=args.img_path, save_path=args.save_path,
load_model=args.load_model, batch_size=args.batch_size,
load_epochs=args.load_epochs, encoder=args.encoder,
optim=args.optim, momentum=args.momentum,
step_size=args.step_size, gamma=args.gamma, parallel=args.parallel)
| WhatAboutMyStar/SCAAE | train.py | train.py | py | 3,360 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "model.SpatialActivation",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
... |
7897642460 | import pygsheets
def init(secret_path, sheet_name): # Получение нужной таблицы
gc = pygsheets.authorize(client_secret=secret_path)
sh = gc.open(sheet_name)
wks = sh.sheet1
return wks
def get_all_table_data(wks): # Получение всех данных с таблицы
data_list = []
for row in wks.get_all_values():
if row[0] and row[0] != 'register':
data_list.append(row[:10])
return data_list
#def get_rows_ids(data): # Получение id всех записей
#ids = []
#for row in data:
#ids.append(row[2])
#return ids
def get_row_data_by_id(data, id): # Получение нужной записи по id
for row in data:
if row[2] == str(id):
return row
return 'Запись не найдена'
def update_row_data_by_id(wks, id, value_update): # Обновление нужной записи по id
data = wks.get_all_values()
for i in range(len(data)):
if str(id) in data[i]:
wks.update_value(f'J{i+1}', value_update)
wks = init('client_secret.json', 'Test')
table_data = get_all_table_data(wks)
print(table_data)
#print(get_row_data_by_id(table_data, 2028))
update_row_data_by_id(wks, 202, 'test')
| FMaslina/gsheets | gsheets_integration.py | gsheets_integration.py | py | 1,302 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "pygsheets.authorize",
"line_number": 5,
"usage_type": "call"
}
] |
38254642090 | from django.test import TestCase
from hknweb.candidate.tests.models.utils import ModelFactory
class DuePaymentRequirementModelTests(TestCase):
def setUp(self):
semester = ModelFactory.create_semester(
semester="Spring",
year=0,
)
duepayment = ModelFactory.create_duepayment_requirement(
candidateSemesterActive=semester,
)
self.semester = semester
self.duepayment = duepayment
def test_str(self):
expected = "{} - {}".format(self.duepayment.name, self.semester)
actual = str(self.duepayment)
self.assertEqual(expected, actual)
| Gabe-Mitnick/hknweb | hknweb/candidate/tests/models/requirements/payment/test_due_payment.py | test_due_payment.py | py | 649 | python | en | code | null | github-code | 6 | [
{
"api_name": "django.test.TestCase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "hknweb.candidate.tests.models.utils.ModelFactory.create_semester",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "hknweb.candidate.tests.models.utils.ModelFactory",
"line_... |
17972375760 | import dash
from dash import Dash, html, Output, Input, dcc, callback
import dash_bootstrap_components as dbc
import pandas as pd
import plotly.express as px
import dash_ag_grid as dag
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.LUX], suppress_callback_exceptions=True,
meta_tags=[{'name': 'viewport','content': 'width=device-width, initial-scale=1.0, maximum-scale=1.2, minimum-scale=0.5,'}])
server = app.server
df = pd.read_csv('data/nobel.csv')
color_mapping = {
'medicine': 'limegreen',
'physics': 'darkkhaki',
'chemistry': 'goldenrod',
'peace': 'darkslategray',
'literature': 'darkviolet',
'economics': 'darkcyan',
'male':'royalblue',
'female':' fuchsia',
'org': 'silver',
}
columnDefs = [
{'field': 'nobel year'},
{'field': 'firstname'},
{'field': 'lastname'},
{'field': 'category'},
{'field': 'motivation'},
{'field': 'gender'},
{'field': 'age'},
{'field': 'birth_country'},
]
def figMap():
# world map of Country Category
dfMap = df.groupby(['alpha-3','birth_country']).size().reset_index(name='count')
figMap = px.choropleth(dfMap, locations='alpha-3', color='count', hover_name='birth_country')
figMap.update_layout(paper_bgcolor='rgb(248,248,255)')
return figMap
def figGender():
# histogram for Gender Category
dfGroup = df.groupby(['nobel year', 'category', 'gender']).size().reset_index(name='count')
figGender = px.histogram(dfGroup, x="nobel year", y='count', color="gender",
marginal="rug", # or violin, rug
hover_data=dfGroup.columns, labels={'count': 'Count of Gender'}).update_layout(yaxis_title='Count of Gender', paper_bgcolor='#F8F8FF')
return figGender
def figCat():
# Sun burst chart
figSun = px.sunburst(df, path=['category', 'gender']).update_layout(margin=dict(l=0, r=0, t=0, b=0),paper_bgcolor='#F8F8FF')
figSun.update_traces(marker_colors=[color_mapping[cat] for cat in figSun.data[-1].labels])
# Bar chart
dfGroup = df.groupby(['nobel year', 'category']).size().reset_index(name='count')
figBar = px.histogram(dfGroup, x='nobel year', y='count', color='category', barmode='group',
labels={'count': 'Number of Nobel Prize Received'},
color_discrete_map=color_mapping)
figBar.update_layout(yaxis_title='Number of Nobel Prize Received',paper_bgcolor='#F8F8FF')
return figSun, figBar
gender_layout = dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardBody([
dbc.Row([
dbc.Col([
html.H3(['Gender'])
]),
]),
html.Hr(),
dbc.Row([
dbc.Col([
dcc.Graph(figure=figGender())
])
])
])
],
class_name='bg-card mb-5'
)
])
])
category_layout = dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardBody([
dbc.Row([
dbc.Col([
html.H3(['Category'])
]),
]),
html.Hr(),
dbc.Row([
dbc.Col([
html.P(['Year']),
dcc.Dropdown(options=[x for x in df['nobel year'].unique()], id='dropdown_year'),
], width=2),
dbc.Col([
dcc.Loading(children=[dcc.Graph(figure={}, id='cat-sun')])
], width=3),
dbc.Col([
dcc.Loading(children=[dcc.Graph(figure={}, id='cat-bar')])
], width=7)
])
])
],
class_name='bg-card mb-5'
)
])
])
country_layout = dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardBody([
dbc.Row([
dbc.Col([
html.H3(['Country']),
html.Label(['Based on birth country'])
])
]),
html.Hr(),
dbc.Row([
dbc.Col([
html.P(['Country']),
dcc.Dropdown(options=sorted([x for x in df['birth_country'].unique()], key=lambda x: (str(type(x)), x))[1:],
id='dropdown-country')
], width=4)
]),
html.Br(),
dbc.Row([
dbc.Col([
dcc.Graph(figure=figMap()),
html.Br(),
dag.AgGrid(
id='grid-table',
rowData=df.to_dict('records'),
columnDefs=columnDefs,
defaultColDef={"resizable": True, "sortable": True, "filter": True, "minWidth":115},
dashGridOptions={"pagination": True, "paginationPageSize":8, "domLayout": "autoHeight"},
)
])
])
])
],
class_name='bg-card mb-5'
)
])
])
app.layout = dbc.Container([
dbc.Row([
dbc.Col([
html.H1(['The Nobel Prize'])
])
],
class_name='mt-3 mb-2'
),
dcc.Tabs(id='input-tabs', value='gender-tab', children=[
dcc.Tab(label='Gender', value='gender-tab'),
dcc.Tab(label='Category', value='category-tab'),
dcc.Tab(label='Country', value='country-tab')
]),
html.Div(id='output-tabs', children={})
])
# callback for updating interactive Category
@callback(
Output('cat-sun', 'figure'),
Output('cat-bar', 'figure'),
Input('dropdown_year', 'value'),
)
def update_cat(select_year):
dff = df.copy()
if select_year:
# update sunburst chart
figSun = px.sunburst(dff[dff['nobel year'] == select_year], path=['category', 'gender']).update_layout(margin=dict(l=0, r=0, t=0, b=0),paper_bgcolor='#F8F8FF')
figSun.update_traces(marker_colors=[color_mapping[cat] for cat in figSun.data[-1].labels])
# update barchart
dffGroup = dff.groupby(['nobel year', 'category']).size().reset_index(name='count')
mark = (dffGroup['nobel year'] == select_year)
figBar = px.histogram(dffGroup[mark], x='nobel year', y='count', color='category', barmode='group',
labels={'count': 'Number of Nobel Prize Received'},
color_discrete_map=color_mapping)
figBar.update_layout(yaxis_title='Number of Nobel Prize Received',paper_bgcolor='#F8F8FF')
figBar.update_xaxes(visible=False)
return figSun, figBar
else:
return figCat()
# callback for updating interactive Country
@callback(
Output('grid-table', 'rowData'),
Input('dropdown-country', 'value')
)
def update_country(select_country):
dff = df.copy()
if select_country:
mask = (dff['birth_country'] == select_country)
dff = dff[mask]
return dff.to_dict('records')
else:
return dff.to_dict('records')
@callback(
Output('output-tabs', 'children'),
Input('input-tabs', 'value')
)
def content(tab):
if tab == 'gender-tab':
return gender_layout
elif tab == 'category-tab':
return category_layout
elif tab == 'country-tab':
return country_layout
if __name__ == '__main__':
app.run_server(debug=True)
| Natcha-Phonkamhaeng/nobel-viz | src/app.py | app.py | py | 6,264 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "dash.Dash",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.themes",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "plotly.... |
7794747310 | from flask import Flask, render_template, request
import mushroom_data as md
import random
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def main():
if request.method != 'POST':
return render_template('index.html',
cap_shape=md.cap_shape,
cap_surface=md.cap_surface,
cap_color=md.cap_color,
bruises=md.bruises,
odor=md.odor,
gill_attachment=md.gill_attachment,
gill_spacing=md.gill_spacing,
gill_size=md.gill_size,
gill_color=md.gill_color,
stalk_shape=md.stalk_shape,
stalk_root=md.stalk_root,
stalk_surface_above_ring=md.stalk_surface_above_ring,
stalk_surface_below_ring=md.stalk_surface_below_ring,
stalk_color_above_ring=md.stalk_color_above_ring,
stalk_color_below_ring=md.stalk_color_below_ring,
veil_color=md.veil_color,
ring_number=md.ring_number,
ring_type=md.ring_type,
spore_print_color=md.spore_print_color,
population=md.population,
habitat=md.habitat,
prediction=None
)
else:
collected_values = [collect_form_values()]
prediction = classify_mushroom(collected_values)
if prediction == 1:
prediction_value = 'Poisonous'
else:
prediction_value = 'Edible'
return render_template('index.html',
cap_shape=md.cap_shape,
cap_surface=md.cap_surface,
cap_color=md.cap_color,
bruises=md.bruises,
odor=md.odor,
gill_attachment=md.gill_attachment,
gill_spacing=md.gill_spacing,
gill_size=md.gill_size,
gill_color=md.gill_color,
stalk_shape=md.stalk_shape,
stalk_root=md.stalk_root,
stalk_surface_above_ring=md.stalk_surface_above_ring,
stalk_surface_below_ring=md.stalk_surface_below_ring,
stalk_color_above_ring=md.stalk_color_above_ring,
stalk_color_below_ring=md.stalk_color_below_ring,
veil_color=md.veil_color,
ring_number=md.ring_number,
ring_type=md.ring_type,
spore_print_color=md.spore_print_color,
population=md.population,
habitat=md.habitat,
prediction=prediction_value
)
def collect_form_values():
mushroom_values = [request.form.get('cap-shape'), request.form.get('cap-surface'),
request.form.get('cap-color'), request.form.get('bruises'),
request.form.get('odor'), request.form.get('gill-attachment'),
request.form.get('gill-spacing'), request.form.get('gill-size'),
request.form.get('gill-color'), request.form.get('stalk-shape'),
request.form.get('stalk-root'), request.form.get('stalk-surface-above-ring'),
request.form.get('stalk-surface-below-ring'),
request.form.get('stalk-color-above-ring'),
request.form.get('stalk-surface-below-ring'), request.form.get('veil-color'),
request.form.get('ring-number'), request.form.get('ring-type'),
request.form.get('spore-print-color'), request.form.get('population'),
request.form.get('habitat')]
return mushroom_values
def classify_mushroom(values):
prediction = md.model.predict(values)
return prediction
if __name__ == '__main__':
app.run()
| sharmas1ddharth/Mushroom_Classification | app.py | app.py | py | 4,515 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.render_templ... |
43344955993 | '''
This application service return tracks data to visualisation.
'''
import time
import math
from collections import defaultdict
from twisted.application.service import Service
from twisted.internet import defer
from twisted.python import log
import simplejson as json
__author__ = 'Boris Tsema'
# Select track data.
SELECT_DATA = """
SELECT
t.timestamp,
string_agg(
concat_ws(',', tg.track_label, t.lat::text, t.lon::text, t.alt::text, t.v_speed::text, t.g_speed::text, t.distance::text),
';')
FROM
track_data t,
tracks_group tg
WHERE
t.id = tg.track_id AND
tg.group_id = %s AND
t.timestamp BETWEEN %s AND %s
GROUP BY
t.timestamp
ORDER BY
t.timestamp;
"""
# Select track state.
SELECT_DATA_SNAPSHOTS = """
SELECT
s.timestamp,
s.snapshot,
tg.track_label
FROM
track_snapshot s,
tracks_group tg
WHERE
s.id = tg.track_id AND
tg.group_id = %s AND
s.timestamp BETWEEN %s AND %s;
"""
# Select track data just for some tracks.
SELECT_DATA_BY_LABEL = """
SELECT
t.timestamp,
string_agg(
concat_ws(',', tg.track_label, t.lat::text, t.lon::text, t.alt::text, t.v_speed::text, t.g_speed::text, t.distance::text),
';')
FROM
track_data t,
tracks_group tg
WHERE
t.id = tg.track_id AND
tg.group_id = %s AND
t.timestamp BETWEEN %s AND %s AND
tg.track_label in %s
GROUP BY
t.timestamp
ORDER BY
t.timestamp;
"""
# Select track state changes for some tracks.
SELECT_DATA_SNAPSHOTS_BY_LABEL = """
SELECT
s.timestamp,
s.snapshot,
tg.track_label
FROM
track_snapshot s,
tracks_group tg
WHERE
s.id = tg.track_id AND
tg.group_id = %s AND
s.timestamp BETWEEN %s AND %s AND
tg.track_label in %s;
"""
# Select last track point in the past for every track.
GET_HEADERS_DATA = """
WITH tdata AS (
SELECT
timestamp,
concat_ws(',', lat::text, lon::text, alt::text, v_speed::text, g_speed::text, distance::text) as data,
td.id,
row_number() OVER(PARTITION BY td.id ORDER BY td.timestamp DESC) AS rk
FROM track_data td,
tracks_group tg
WHERE
td.id = tg.track_id
AND tg.group_id = %s
AND td."timestamp" BETWEEN %s AND %s)
SELECT
tg.track_label, t.data, t.timestamp
FROM
tdata t,
tracks_group tg
WHERE
t.rk = 1 AND
tg.track_id = t.id;
"""
# Select last state in the past for every track.
GET_HEADERS_SNAPSHOTS = """
WITH
snaps AS (
SELECT
snapshot,
timestamp,
ts.id AS id,
tg.track_label as track_label,
row_number() OVER(PARTITION BY ts.id ORDER BY ts.timestamp DESC) AS rk
FROM
track_snapshot ts,
tracks_group tg
WHERE
ts.id = tg.track_id AND
tg.group_id = %s
AND ts.timestamp <= %s)
SELECT
s.track_label, s.snapshot, s.timestamp
FROM
snaps s,
tracks_group tg
WHERE
s.rk < 4
AND s.id = tg.track_id;
"""
class TrackVisualizationService(Service):
# Don't show pilots earlier then time - track_gap. In seconds
track_gap = 15000
def __init__(self, pool):
self.pool = pool
def startService(self):
Service.startService(self)
log.msg("Starting DB pool")
return self.pool.start()
def stopService(self):
Service.stopService(self)
return self.pool.close()
@defer.inlineCallbacks
def get_track_data(self, params):
'''
Return dict with track data according to specified protocol.
@param params: request parameters, consist of group_id (domain id of
tracks group), from_time (unixtime) to_time (unixtime),
start_positions (show of not last track's position in the past),
track_labels (return result only for tracks with specified labels).
@type params: dict
@return:
@rtype: dict
'''
# TODO: pass keyword arguments into function instead of dictionary.
result = dict()
group_id = params['group_id']
from_time = int(params['from_time'])
to_time = int(params['to_time'])
start_positions = params.get('start_positions')
track_labels = params.get('track_labels', '')
t1 = time.time()
if track_labels:
track_labels = track_labels.split(',')
tracks = yield self.pool.runQuery(SELECT_DATA_BY_LABEL,
(group_id, from_time,
to_time, tuple(track_labels)))
snaps = yield self.pool.runQuery(SELECT_DATA_SNAPSHOTS_BY_LABEL,
(group_id, from_time, to_time, tuple(track_labels)))
else:
tracks = yield self.pool.runQuery(SELECT_DATA,
(group_id, from_time,
to_time))
snaps = yield self.pool.runQuery(SELECT_DATA_SNAPSHOTS,
(group_id, from_time, to_time))
t2 = time.time()
result['timeline'] = self.prepare_result(tracks, snaps)
log.msg("data requested in %0.3f" % (t2 - t1))
if start_positions:
ts1 = time.time()
hdata = yield self.pool.runQuery(GET_HEADERS_DATA, (group_id,
from_time - self.track_gap, from_time))
hsnaps = yield self.pool.runQuery(GET_HEADERS_SNAPSHOTS,
(group_id, from_time))
ts2 = time.time()
start_data = self.prepare_start_data(hdata, hsnaps)
result['start'] = start_data
log.msg("start positions requested in %0.3f" % (ts2 - ts1))
defer.returnValue(result)
def prepare_start_data(self, hdata, hsnaps):
'''
Prepare last state of tracks from their coordinates and snapshots.
@param hdata: (contest_number, data, timestamp)
@type hdata: list of tuples
@param hsnaps: (contest_number, snapshot, timestamp)
@type hsnaps: list of tuples
@return: {'contest_number':{'data':[alt, lon, ...],
'state':'finished', 'statechanged_at': 2134} - that's not true
@rtype:
'''
# TODO: make this method static.
result = defaultdict(dict)
# Add last coords and speeds to result.
for row in hdata:
cont_number, data, timestamp = row
result[cont_number] = parse_result(data.split(','))
# Add last state to result.
for row in hsnaps:
cont_number, state, state_ts = row
try:
state = json.loads(state)
if 'in_air_true' in state:
result[cont_number]['in_air'] = True
del state[state.index('in_air_true')]
if 'in_air_false' in state:
result[cont_number]['in_air'] = False
del state[state.index('in_air_false')]
if 'es_taken' in state:
result[cont_number]['finish_time'] = int(state_ts)
if len(state) > 0:
# XXX: workaround
if result[cont_number].get('state') == 'finished':
continue
if result[cont_number].get('state') == 'es_taken' and \
not state[0] == 'finished':
continue
result[cont_number]['state'] = state[0]
except:
continue
for contest_number in result:
if not result[contest_number].has_key('state'):
result[contest_number]['state'] = 'not started'
if not result[contest_number].has_key('in_air'):
# TODO: everybody in air by default, is it ok?
result[contest_number]['in_air'] = True
return result
def prepare_result(self, tracks, snaps):
'''
@param tracks: [(timestamp, contest_number,lat,lon,
...;contest_number,lat,lon..),...]
@param snaps: [(timestamp, snapshot, contest_number), ...]
@type tracks: list of tuple
@return:{timestamp:{'contnumber':[lat,lon...], },}
@rtype:
'''
# TODO: does this method need to be part of interface or it can be
# static ?
result = defaultdict(dict)
for row in tracks:
for data in row[1].split(';'):
result[int(row[0])][str(data.split(',')[0])
] = parse_result(data.split(',')[1:])
for row in snaps:
timestamp, snapshot, contest_number = row
if result[timestamp].has_key(contest_number):
concrete_pilot = result[timestamp][contest_number]
try:
snapshot = json.loads(snapshot)
if 'in_air_true' in snapshot:
concrete_pilot['in_air'] = True
del snapshot[snapshot.index('in_air_true')]
elif 'in_air_false' in snapshot:
concrete_pilot['in_air'] = False
del snapshot[snapshot.index('in_air_false')]
except:
continue
if len(snapshot) > 0:
concrete_pilot['state'] = snapshot[0]
return result
def parse_result(data):
res = dict()
res['lat'], res['lon'], res['alt'], res['vspd'], res['gspd'], \
res['dist'] = data
def _float(num):
result = round(float(num), 6)
if math.isnan(result):
log.msg("Nan found in float.")
result = 0
if math.isinf(result):
log.msg("Infinity found in float.")
result = 1
return result
formats = dict(lat=_float, lon=_float, alt=int, gspd=_float, vspd=_float,
dist=int)
for key in res:
res[key] = formats[key](res[key])
return dict(dist=res['dist'],
spds=[res['gspd'], res['vspd']],
crds=[res['lat'], res['lon'], res['alt']])
| DmitryLoki/gorynych | gorynych/processor/services/visualization.py | visualization.py | py | 10,219 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "twisted.application.service.Service",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "twisted.application.service.Service.startService",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "twisted.application.service.Service",
"line_number": 1... |
30265323373 | import arcade
SPACING = 20
MARGIN = 110
arcade.open_window(400, 400, "Square of diamonds")
arcade.set_background_color(arcade.color.AMARANTH_PINK)
arcade.start_render()
for row in range(10):
for column in range(10):
if (row%2==0 and column%2==0) or (row%2==1 and column%2==1):
x = column * SPACING + MARGIN
y = row * SPACING + MARGIN
arcade.draw_rectangle_filled(x,y,10,10,arcade.color.LEMON,45)
elif (row%2==0 and column%2==1) or (row%2==1 and column%2==0):
x = column * SPACING + MARGIN
y = row * SPACING + MARGIN
arcade.draw_rectangle_filled(x,y,10,10,arcade.color.GREEN_YELLOW,45)
arcade.finish_render()
arcade.run()
| maryamsaeedi17/PyLearningWorks1 | assignment13/drawsquare.py | drawsquare.py | py | 723 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "arcade.open_window",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "arcade.set_background_color",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "arcade.color",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "arcade.... |
32576643958 | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.views.generic import View
from django.http import HttpResponse
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import logout as log_out
from .models import Task
from .forms import TaskCreateForm
from .forms import TaskEditForm
from .forms import TaskDeleteForm
from .forms import TaskSearchForm
#Display the home page once user has logged in
def home(request):
if request.user.is_active == 0:
return redirect('signin')
tasks = Task.objects.filter(userid_id=request.user.id).order_by('-created_at')
args = {'tasks': tasks}
return render(request, 'home.html', args)
#Display the creation page and handle creation requests to insert into the database
def create(request):
if request.user.is_active == 0:
return redirect('signin')
if request.method == 'POST':
form = TaskCreateForm(data=request.POST)
if form.is_valid():
task = form.save(commit=False)
task.userid_id = request.user.id
task.title = form.cleaned_data.get('title')
task.bodym = form.cleaned_data.get('body')
task.completed_at = form.cleaned_data.get('date')
task.save()
return redirect('home')
else:
error = form.errors
return render(request, 'create.html', {'error' : error})
else:
return render(request, 'create.html', context = None)
#Search task bodies with given string
def search(request):
if request.user.is_active == 0:
return redirect('signin')
if request.method == 'POST':
form = TaskSearchForm(data=request.POST)
if form.is_valid():
search = form.cleaned_data.get('search')
tasks = Task.objects.filter(body__contains=search, userid_id = request.user.id)
args = {'tasks': tasks, 'search' : search}
return render(request, 'search.html', args)
else:
return render(request, 'home.html', context=None)
#Handle edits to task title, body, completion status and completion date and save to the database if exists
#Display the edit page when for a GET request
def edit(request, task_id):
if request.user.is_active == 0:
return redirect('signin')
if request.method == 'POST':
form = TaskEditForm(data=request.POST)
if form.is_valid():
count = Task.objects.filter(id = form.cleaned_data.get('id'), userid_id = request.user.id).count()
if count == 1:
task = Task.objects.get(id = form.cleaned_data.get('id'), userid_id = request.user.id)
task.title = form.cleaned_data.get('title')
task.body = form.cleaned_data.get('body')
task.completed_at = form.cleaned_data.get('date')
if bool(form.cleaned_data.get('completed')):
task.complete = 1
else:
task.complete = 0
task.save()
return redirect(home)
else:
return redirect('home')
else:
return render(request, 'home.html', context=None)
elif request.method == "GET":
count = Task.objects.filter(id = task_id, userid_id = request.user.id).count()
if count == 1:
task = Task.objects.get(id = task_id, userid_id = request.user.id)
task.completed_at = str(task.completed_at)
if "+" in task.completed_at:
splitdate = task.completed_at.split("+")
task.completed_at = splitdate[0]
args = {'task': task}
return render(request, 'edit.html', args)
else:
return render(request, 'home.html', context = None)
else:
return render(request, 'home.html', context = None)
#Delete tasks belonging to the user if exists
def delete(request):
if request.user.is_active == 0:
return redirect('signin')
if request.method == 'POST':
form = TaskDeleteForm(data=request.POST)
if form.is_valid():
count = Task.objects.filter(id = form.cleaned_data.get('taskid'), userid_id = request.user.id).count()
if count == 1:
task = Task.objects.filter(id = form.cleaned_data.get('taskid')).delete()
return redirect('home')
else:
return render(request, 'home.html', context=None)
else:
return render(request, 'home.html', context=None)
#Register, uses built-in mehtods
def register(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('home')
else:
form = UserCreationForm()
return render(request, 'register.html', {'form': form})
#Signin uses built-in methods
def signin(request):
if request.user.is_active == 1:
return redirect('home')
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active == 1:
request.session.set_expiry(86400) #sets the exp. value of the session
login(request, user)
return redirect('home')
else:
form = AuthenticationForm()
return render(request, 'signin.html', {'form': form})
#Log out uses built-in methods
def logout(request):
log_out(request)
form = AuthenticationForm()
return redirect('signin')
| S4ADO/ADW_Django_A1 | TaskManager/tasks/views.py | views.py | py | 5,269 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.redirect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.Task.objects.filter",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.Task.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api... |
13109904746 | import pandas as pd
import yfinance as yf
import json
#csv_list = pd.read_csv('japan_all_stock.csv')
success_list = []
for num in range(1301, 10000):
try:
stock_data = yf.download(f'{num}.T', period = '1d', interval='1d')
success_list.append(f'{num}.T')
except:
continue
with open('japanese_success_stock.json', 'w') as json_file:
json.dump(success_list, json_file) | 39xdgy/Playground_py | japan_stock_data.py | japan_stock_data.py | py | 404 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "yfinance.download",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 15,
"usage_type": "call"
}
] |
27125372338 | import logging
from datetime import datetime
import smtplib
from notifications import *
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from db.client import db_create_close, r
logging.config.fileConfig('/opt/TopPatch/conf/logging.config')
logger = logging.getLogger('rvapi')
@db_create_close
def email_config_exists(customer_name=None, conn=None):
mail_exists = False
try:
mail_config = list(
r
.table(NotificationCollections.NotificationPlugins)
.get_all(customer_name, index=NotificationPluginIndexes.CustomerName)
.filter(
{
NotificationPluginKeys.PluginName: 'email'
}
)
.run(conn)
)
if mail_config:
mail_exists = (True, mail_config[0][NotificationPluginKeys.Id])
except Exception as e:
msg = 'Failed to get mail config: %s' % (e)
logger.error(msg)
return(mail_exists)
@db_create_close
def get_email_config(customer_name=None, conn=None):
mail_config = None
config_exists = False
msg = ''
try:
mail_config = list(
r
.table(NotificationCollections.NotificationPlugins)
.get_all(customer_name, index=NotificationPluginIndexes.CustomerName)
.filter(
{
NotificationPluginKeys.PluginName: 'email'
}
)
.run(conn)
)
if not mail_config:
mail_config = {
'modified_time': '',
'username': '',
'password': '',
'server': '',
'port': '',
'is_tls': '',
'is_ssl': '',
'from_email': '',
'to_email': '',
'last_modify_user': '',
}
msg = 'mail_config does not exist'
else:
config_exists = True
except Exception as e:
msg = 'Failed to get mail config: %s' % (str(e))
logger.exception(e)
return(
{
'pass': config_exists,
'message': msg,
'data': mail_config
}
)
@db_create_close
def delete_email_config(customer_name=None, conn=None):
deleted = False
try:
mail_deleted = (
r
.table(NotificationCollections.NotificationPlugins)
.get_all(customer_name, index=NotificationPluginIndexes.CustomerName)
.filter(
{
NotificationPluginKeys.PluginName: 'email'
}
)
.delete()
.run(conn)
)
if 'deleted' in mail_deleted:
if mail_deleted['deleted'] > 0:
deleted = True
except Exception as e:
msg = (
'Failed to delete mail config for customer %s: %s' %
(customer_name, e)
)
logger.error(msg)
return(deleted)
@db_create_close
def create_or_modify_mail_config(modifying_username=None, customer_name=None,
server=None, username=None, password=None,
port=25, is_tls=False, is_ssl=False,
from_email=None, to_email=None, conn=None):
created = False
msg = ''
base_config = []
email_uuid = None
if (server and username and password and port and customer_name
and modifying_username and from_email and len(to_email) > 0):
modified_time = str(datetime.now())
to_email = ','.join(to_email)
base_config = {
NotificationPluginKeys.ModifiedTime: modified_time,
NotificationPluginKeys.UserName: username,
NotificationPluginKeys.Password: password,
NotificationPluginKeys.Server: server,
NotificationPluginKeys.Port: port,
NotificationPluginKeys.IsTls: is_tls,
NotificationPluginKeys.IsSsl: is_ssl,
NotificationPluginKeys.FromEmail: from_email,
NotificationPluginKeys.ToEmail: to_email,
NotificationPluginKeys.PluginName: 'email',
NotificationPluginKeys.CustomerName: customer_name,
NotificationPluginKeys.ModifiedBy: modifying_username,
}
config_exists = email_config_exists(customer_name=customer_name)
if config_exists:
email_uuid = config_exists[1]
try:
(
r
.table(NotificationCollections.NotificationPlugins)
.get(config_exists[1])
.update(base_config)
.run(conn)
)
created = True
msg = (
'Email config for customer %s has been updated' %
(customer_name)
)
except Exception as e:
msg = 'Failed to update mail config: %s' (e)
logger.error(msg)
else:
try:
is_created = (
r
.table(NotificationCollections.NotificationPlugins)
.insert(base_config, upsert=True)
.run(conn)
)
if 'inserted' in is_created:
if 'generated_keys' in is_created:
if len(is_created['generated_keys']) > 0:
email_uuid = is_created['generated_keys'[0]]
created = True
msg = (
'Email config for customer %s has been created' %
(customer_name)
)
except Exception as e:
msg = 'Failed to update mail config: %s' % (e)
logger.exception(e)
return(
{
'pass': created,
'message': msg,
'data': [base_config]
}
)
class MailClient():
def __init__(self, customer_name):
self.CONFIG = None
self.validated = False
self.connected = False
self.error = None
data = get_email_config(customer_name=customer_name)
self.config_exists = False
if data['pass']:
config = data['data'][0]
self.config_exists = data['pass']
if self.config_exists:
self.server = config['server']
self.username = config['username']
self.password = config['password']
self.port = config['port']
self.from_email = config['from_email']
self.to_email = config['to_email'].split(",")
self.is_tls = config['is_tls']
self.is_ssl = config['is_ssl']
else:
self.server = None
self.username = None
self.password = None
self.port = None
self.from_email = None
self.to_email = None
self.is_tls = None
self.is_ssl = None
def server_status(self):
msg = ''
try:
ehlo = self.mail.ehlo()
if ehlo[0] == 250:
self.connected = True
self.server_reply_code = ehlo[0]
self.server_reply_message = ehlo[1]
msg = self.server_reply_message
logger.info(msg)
except Exception as e:
msg = (
'Connection to mail server %s has not been initialized: %s' %
(self.server, e)
)
logger.exception(msg)
return(msg)
def connect(self):
connected = False
logged_in = False
msg = None
mail = None
try:
if self.is_ssl:
mail = smtplib.SMTP_SSL(self.server, int(self.port), timeout=10)
else:
mail = smtplib.SMTP(self.server, int(self.port), timeout=10)
connected = True
except Exception as e:
logger.exception(e)
msg = e
if connected:
try:
if self.is_tls:
mail.starttls()
mail.login(self.username, self.password)
logged_in = True
except Exception as e:
logger.exception(e)
msg = e
self.connected = connected
self.error = msg
self.logged_in = logged_in
self.mail = mail
return(connected, msg, logged_in, mail)
def disconnect(self):
msg = ''
self.disconnected = False
try:
loggedout = self.mail.quit()
msg = (
'Logged out of Email Server %s: %s' %
(self.server, loggedout)
)
self.disconnected = True
logger.info(msg)
except Exception as e:
msg = (
'Failed to log out of %s: %s' %
(self.server, e)
)
self.disconnected = True
logger.exception(e)
return(self.disconnected, msg)
def send(self, subject, msg_body, to_addresses=None, body_type='html'):
completed = True
from_address = None
try:
from_address = self.from_email
except Exception as e:
msg = 'From_address has not been set'
logger.exception(msg)
if not to_addresses:
try:
to_addresses = self.to_email
except Exception as e:
msg = 'Pass a valid email address:%s' % (e)
logger.exception(msg)
completed = False
if isinstance(to_addresses, list):
msg = MIMEMultipart('alternative')
msg['From'] = from_address
msg['To'] = ','.join(list(to_addresses))
msg['Subject'] = subject
formatted_body = MIMEText(msg_body, body_type)
msg.attach(formatted_body)
try:
self.mail.sendmail(
from_address,
to_addresses,
msg.as_string()
)
except Exception as e:
completed = False
msg = (
'Could not send mail to %s: %s' %
(','.join(to_addresses), e)
)
logger.exception(msg)
return(completed)
| SteelHouseLabs/vFense | tp/src/emailer/mailer.py | mailer.py | py | 10,469 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "logging.config.fileConfig",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.config",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "db.cl... |
45308723306 | import csv
import tqdm
import zoomeye.sdk as zoomeye
import json
import os
# 第一步读取文件
# 获得IP地址
# 使用SDK查询信息
# 保存信息
# 过滤信息
INPUT_FILE_NAME = '../csv/firewall_ip.csv'
# OUTPUT_FILE_NAME = 'csv/result.csv'
def read_csv(ip_list, csv_name):
with open(csv_name) as f:
f_csv = csv.reader(f)
# 获取header
headers = next(f_csv)
# 循环获取每一行的内容
for row in f_csv:
ip_list.append(row[0])
def init_zoomeye():
zm = zoomeye.ZoomEye()
zm.username = '********'
zm.password = '********'
zm.login()
return zm
def zoomeye_sdk(ip, zm):
data = zm.dork_search(ip, resource="host", facets=None)
return data
def write_file(data, ip, file_name):
# f = open(file_name, "w")
# json_data
json_data = {}
# key
ip = ip
# value
value = {}
for service in data:
# 构造字典类型 {ip:{service:{content},service:{content}}}
# key
key = service['portinfo']['service']
# value
content = service
# item service:{content}
item = {key: content}
value.update(item)
json_data = {"ip": ip, "result": value}
with open("../result_5688/"+file_name, "w") as f:
json.dump(json_data, f)
print("写入"+file_name+"文件")
def search_ip(ip_list):
dir_list = []
read_result_5688(dir_list)
# print(len(dir_list))
zm = init_zoomeye()
for ip in tqdm.tqdm(ip_list):
print("正在处理的IP为", ip)
if ip not in dir_list:
data = zoomeye_sdk(ip, zm)
for i in data:
print(i)
print("----------------")
#write_file(data, ip, ip+".json")
print(data)
else:
print(ip+"存在文件中")
def read_result_5688(dir_list):
path = "../result_5688/"
files = os.listdir(path)
for filename in files:
dir_list.append(os.path.splitext(filename)[0])
def main():
# 用来保存待查找的IP
ip_list = []
csv_name = INPUT_FILE_NAME
read_csv(ip_list, csv_name)
print("准备查找的列表为:"+str(ip_list))
# print(dir_list)
search_ip(ip_list)
if __name__ == '__main__':
main() | Judgegao/bitcoin_data | code/Cyberspace search engine/main.py | main.py | py | 2,283 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "csv.reader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "zoomeye.sdk.ZoomEye",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "zoomeye.sdk",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_numb... |
26498728774 | import sys
from typing import Set, TextIO, Any, Tuple
from collections import *
from functools import *
from itertools import *
Data = Tuple[Set[Tuple[int, int]], Set[Tuple[int, int]], int, int]
Result = int
def parse_input(buffer: TextIO) -> Data:
east = set()
south = set()
lines = [line.strip() for line in buffer.readlines() if line.strip()]
for y, line in enumerate(lines):
for x, c in enumerate(line):
if c == ">":
east.add((x, y))
elif c == "v":
south.add((x, y))
return (east, south, len(lines[0]), len(lines))
def part_1(data: Data) -> Result:
east, south, width, height = data
moved = True
steps = 0
while moved:
moved = False
new_east = set()
new_south = set()
for x, y in east:
if not ((x + 1) % width, y) in south and not ((x + 1) % width, y) in east:
new_east.add(((x + 1) % width, y))
moved = True
else:
new_east.add((x, y))
east = new_east
for x, y in south:
if not (x, (y + 1) % height) in east and not (x, (y + 1) % height) in south:
new_south.add((x, (y + 1) % height))
moved = True
else:
new_south.add((x, y))
south = new_south
steps += 1
return steps
if __name__ == "__main__":
if sys.stdin.isatty():
import os
data = parse_input(
open(os.path.join(os.path.dirname(__file__), "test_input.txt"))
)
else:
data = parse_input(sys.stdin)
print(f"Part 1: {part_1(data)}")
print(f"Part 2: 🎄")
| arjandepooter/advent-of-code-2021 | python/25/solution.py | solution.py | py | 1,697 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Tuple",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.TextIO",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sys.stdin.isatty",
"line_numb... |
71522207867 | from django.db import models
class Home(models.Model):
title = models.CharField(max_length = 100)
body = models.TextField()
decriptions = models.TextField(blank=True)
author = models.CharField(max_length = 200,blank=True)
img = models.ImageField(upload_to='posts',blank=True)
created = models.DateTimeField()
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Home,on_delete=models.CASCADE,related_name='comments')
name = models.CharField(max_length=80)
email = models.EmailField()
body = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=False)
class Meta:
ordering = ['created_on']
def __str__(self):
return 'Comment {} by {}'.format(self.body, self.name)
class Aboutme(models.Model):
name = models.CharField(max_length = 200,blank=True)
text = models.TextField()
img = models.ImageField(blank=True,upload_to='posts')
created = models.DateTimeField(auto_now_add=True) | linux-coffee/web | home/models.py | models.py | py | 1,075 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "... |
35525386991 | """
Module containing ports and adapters for forward curve suppliers.
Contains both the abstract interface and concrete implementation.
"""
import abc
import datetime as dt
from typing import Collection
from volfitter.adapters.option_metrics_helpers import create_expiry
from volfitter.adapters.sample_data_loader import AbstractDataFrameSupplier
from volfitter.domain.datamodel import ForwardCurve
class AbstractForwardCurveSupplier(abc.ABC):
"""
Abstract base class for forward curve suppliers.
"""
@abc.abstractmethod
def get_forward_curve(
self, datetime: dt.datetime, expiries: Collection[dt.datetime]
) -> ForwardCurve:
"""
Returns a forward curve.
:param datetime: The datetime for which to return a forward curve.
:param expiries: The expiries for which to return forward prices.
:return: ForwardCurve.
"""
raise NotImplementedError
class OptionMetricsForwardCurveSupplier(AbstractForwardCurveSupplier):
"""
Constructs a ForwardCurve from a DataFrame containing OptionMetrics data.
OptionMetrics is a vendor supplying historical options data. The DataFrame is
expected to be in their format.
See papers/option_metrics_reference_manual.pdf.
"""
def __init__(self, dataframe_supplier: AbstractDataFrameSupplier):
self.dataframe_supplier = dataframe_supplier
def get_forward_curve(
self, datetime: dt.datetime, expiries: Collection[dt.datetime]
) -> ForwardCurve:
"""
Constructs a ForwardCurve from a DataFrame containing OptionMetrics data.
:param datetime: The datetime for which to return a forward curve.
:param expiries: The expiries for which to return forward prices.
:return: ForwardCurve.
"""
df = self.dataframe_supplier.get_dataframe(datetime)
rows = zip(
df["expiration"].values,
df["AMSettlement"].values,
df["ForwardPrice"].values,
)
all_forward_prices = {
create_expiry(date, am_settlement): forward
for (date, am_settlement, forward) in rows
}
requested_forward_prices = {}
for expiry in expiries:
if expiry not in all_forward_prices:
raise ValueError(f"Missing forward price for {expiry}!")
requested_forward_prices[expiry] = all_forward_prices[expiry]
return ForwardCurve(datetime, requested_forward_prices)
| docadam78/vf_project | src/volfitter/adapters/forward_curve_supplier.py | forward_curve_supplier.py | py | 2,503 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "abc.ABC",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "typing.Collection",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "abc.abstractmet... |
8019970208 | import json
from server.nordic import COMMANDS
import static.app.instructions.translations as tr
# from static.app.instructions.translations import _yes
from static.app.instructions.helpers import TXT, NumberedText
class NextPrevious:
def __init__(self, button_text, goto, active):
self.caption = button_text
self.goto = goto
self.active = active
class Next(NextPrevious):
def __init__(self, button_text="next", goto=1, active=True):
NextPrevious.__init__(self, button_text, goto, active)
class Previous(NextPrevious):
def __init__(self, button_text="previous", goto=-1, active=True):
NextPrevious.__init__(self, button_text, goto, active)
class DelayedCommand:
def __init__(self, command, delay):
self.command = command
self.delay = delay
# class ApiCommand:
# def __init__(self, commands, delay=None):
# for command in commands:
# if command not in COMMANDS.keys():
# raise UserWarning("{} not a valid nordic command".format(command))
# self.commands = {'commands': commands}
# if delay is not None:
# # self.delay = delay
# self.commands['delay'] = delay
class Commands:
def __init__(self, first_command, *next_commands):
if first_command not in COMMANDS.keys():
raise UserWarning("{} not a valid nordic command".format(first_command))
for _command in next_commands:
if _command.command not in COMMANDS.keys():
raise UserWarning("{} not a valid nordic command".format(_command.command))
self.commands=[first_command] + [cmd for cmd in next_commands]
# self.command = first_command
# self.commands = next_commands
class ToJson(json.JSONEncoder):
def __init__(self, lang='en'):
json.JSONEncoder.__init__(self, sort_keys=True)
self.lang = lang
def default(self, o):
if isinstance(o, TXT) or isinstance(o, NumberedText):
return o.get_text(self.lang)
# return getattr(o, self.lang)
else:
return o.__dict__
class Instruction:
def __init__(self, version):
self.version = version
self.products = []
def create_file(self, language):
return json
'''
product = {'type': 'dict',
'schema': {'title': {'type': 'string', 'required': True},
'steps': {'type': 'list', 'schema': step}}}
'''
class Product:
def __init__(self, title, steps):
self.title = title,
self.steps = steps
class Step:
def __init__(self, title, instructions, confirm=None, nav_next=Next(), nav_previous=Previous(), id=None):
for instruction in instructions:
if not isinstance(instruction, Row):
raise UserWarning("instruction is not of type Row.")
self.title = title
self.instructions = instructions
self.confirm = confirm
self.next = nav_next
self.previous = nav_previous
self.id = id
'''
instruction = {'type': 'dict', 'schema': {
'col1': col, 'col2': col, 'col3': col, 'col4': col}}
'''
'''
confirm = {'img': {'type': 'string', 'required': True},
'text': {'type': 'string', 'required': True},
'yes': {'type': 'integer'},
'no': {'type': 'integer'}}
'''
class Confirm:
def __init__(self, img, text, yes_text=tr._yes, no_text=tr._no, yes=1, no=0):
self.img = img
self.text = text
self.yes = yes
self.no = no
self.yes_text = yes_text
self.no_text = no_text
class UiElement:
def __init__(self, width):
if isinstance(width, int):
self.width = str(width) + '%'
else:
raise UserWarning("not an integer : {}".format(width))
class NavigationCommand:
def __init__(self,goto):
self.goto=goto
# class OkayCommand(Commands):
# def __init__(self,first_command=None, goto=None, *next_commands):
# if first_command is not None:
# Commands.__init__(self,first_command,*next_commands)
# #ApiCommand.__init__(self, commands, delay)
# if goto is not None:
# self.goto = goto
class Spacer(UiElement):
def __init__(self, width):
UiElement.__init__(self, width)
'''
pv_keypad = {'width': {'type': 'string', 'regex': '\d{1,2}%'},
'type': {'type': 'string', 'required': True, 'allowed': 'pv-keypad'},
'active_buttons': {'type': 'list',
'allowed': ['open', 'close', 'stop', 'tiltup', 'tiltdown', 'okay', 'cancel'],
'required': True},
'confirm': {'type': 'string',
'allowed': ['open', 'close', 'stop', 'tiltup', 'tiltdown', 'okay', 'cancel']},
'okay': {'type': 'dict',
'schema': {'commands': {'type': 'dict', 'schema': api_commands}, 'goto': {'type': 'integer'}}},
'cancel': {'type': 'integer', 'required': True}}
'''
class PvKeypad(UiElement):
allowed = ['open', 'close', 'tiltup', 'tiltdown', 'stop', 'okay', 'cancel']
def __init__(self, width, active_buttons, confirm=None, okay=None, cancel=None):
'''
:param width: defines the width in percentage of the element.
:param active_buttons: which buttons to activate
:param confirm: which button will have an "open confirm dialog" method to it.
:param okay: what actions should be taken when ok is clicked.
:param cancel: where should cancel take you ?
'''
UiElement.__init__(self, width)
self.type = 'pv-keypad'
self.active_buttons = active_buttons
for button in active_buttons:
if button not in PvKeypad.allowed:
raise UserWarning("'{}' not allowed as pvkeypad button".format(button))
if confirm is not None:
if confirm not in active_buttons:
raise UserWarning("'{}' not allowed as it is not an active button".format(confirm))
self.confirm = confirm
if okay is not None:
if 'okay' not in PvKeypad.allowed:
raise UserWarning("'okay' defined but not defined as an active button.")
self.okay = okay
if cancel is not None:
if 'cancel' not in PvKeypad.allowed:
raise UserWarning("'cancel' defined but not defined as an active button.")
self.cancel = cancel
'''
text = {'width': {'type': 'string', 'regex': '\d{1,2}%'},
'type': {'type': 'string', 'required': True, 'allowed': ['text']},
'content': {'type': 'string', 'required': True}}
'''
class Text(UiElement):
def __init__(self, width_percentage, content):
UiElement.__init__(self, width_percentage)
self.type = 'text'
self.content = content
'''
image = {'width': {'type': 'string', 'regex': '\d{1,2}%'},
'type': {'type': 'string', 'required': True, 'allowed': ['image']},
'src': {'type': 'string', 'required': True}}
'''
class Image(UiElement):
def __init__(self, width, src):
UiElement.__init__(self, width)
self.type = "image"
self.src = src
'''
next_prev_buttons = [{'type': 'boolean'},
{'type': 'dict',
'schema': {'caption': {'type': 'string', 'required': True},
'goto': {'type': 'integer'}}}]
'''
class Row:
allowed = [PvKeypad, Text, Image, Spacer]
def __init__(self, col1, col2=None, col3=None, col4=None):
self._check(col1)
self.col1 = col1
if col2 is not None:
self._check(col2)
self.col2 = col2
if col3 is not None:
self._check(col3)
self.col3 = col3
if col4 is not None:
self._check(col4)
self.col4 = col4
def _check(self, instance):
for _allowed in Row.allowed:
if isinstance(instance, _allowed):
return
raise UserWarning("not allowed: {} {}".format(repr(instance), repr(_allowed)))
| sander76/nordic | static/app/instructions/components.py | components.py | py | 8,161 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "server.nordic.COMMANDS.keys",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "server.nordic.COMMANDS",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "server.nordic.COMMANDS.keys",
"line_number": 49,
"usage_type": "call"
},
{
"ap... |
10251263267 | import collections
import random
import sys
from os import path as osp
import json
import pandas as pd
from fol import beta_query_v2
from fol.foq_v2 import parse_formula
from utils.util import read_indexing, load_graph, load_data_with_indexing
sys.path.append(osp.dirname(osp.dirname(__file__)))
stanford_data_path = 'data/FB15k-237-betae'
def random_e_ground(foq_formula):
for i, c in enumerate(foq_formula):
if c == 'e':
return foq_formula[:i] + "{" + str(random.randint(0, 99)) + "}" + foq_formula[i + 1:]
raise ValueError("Nothing to gound")
def random_p_ground(foq_formula):
for i, c in enumerate(foq_formula):
if c == 'p':
return foq_formula[:i] + "[" + str(random.randint(0, 99)) + "]" + foq_formula[i + 1:]
raise ValueError("Nothing to gound")
def complete_ground(foq_formula):
while 1:
try:
foq_formula = random_e_ground(foq_formula)
except:
break
while 1:
try:
foq_formula = random_p_ground(foq_formula)
except:
break
return foq_formula
def test_parse_formula():
for k, v in beta_query_v2.items():
obj = parse_formula(v)
assert obj.formula == v, print(obj.formula, v)
oobj = parse_formula(obj.formula)
assert oobj.formula == obj.formula
print(k, obj, obj.formula)
# we don't need this any more
def test_parse_grounded_formula():
for k, v in beta_query_v2.items():
gv = random_p_ground(random_e_ground(v))
obj = parse_formula(v)
gobj = parse_formula(gv)
oobj = parse_formula(obj.formula)
assert gobj.formula == oobj.formula
'''
ogobj = parse_formula(gobj.ground_formula)
assert gobj.ground_formula == ogobj.ground_formula
'''
def test_additive_ground():
for k, v in beta_query_v2.items():
obj = parse_formula(v)
for _ in range(10):
gv = random_p_ground(random_e_ground(obj.dumps))
obj.additive_ground(json.loads(gv))
assert obj.formula == obj.formula
'''
def test_embedding_estimation():
for k, v in beta_query_v2.items():
cg_formula = complete_ground(v)
obj = parse_formula(cg_formula)
for _ in range(10):
cg_formula = complete_ground(v)
obj.additive_ground(cg_formula)
print(f"multi-instantiation for formula {obj.ground_formula}")
obj.embedding_estimation(estimator=TransEEstimator())
'''
def test_sample():
ent2id, rel2id, proj_train, reverse_train, proj_valid, reverse_valid, proj_test, reverse_test = \
load_data_with_indexing(stanford_data_path)
for name in beta_query_v2:
query_structure = beta_query_v2[name]
ansclass = parse_formula(query_structure)
ans_sample = ansclass.random_query(proj_train, cumulative=True)
ans_check_sample = ansclass.deterministic_query(proj_train)
assert ans_sample == ans_check_sample
query_dumps = ansclass.dumps
brand_new_instance = parse_formula(query_structure)
brand_new_instance.additive_ground(json.loads(query_dumps))
ans_another = brand_new_instance.deterministic_query(proj_train)
assert ans_another == ans_sample
print(ansclass.dumps)
def test_backward_sample():
ent2id, rel2id, proj_train, reverse_train, proj_valid, reverse_valid, proj_test, reverse_test = \
load_data_with_indexing(stanford_data_path)
for name in beta_query_v2:
query_structure = beta_query_v2[name]
ansclass = parse_formula(query_structure)
ans_back_sample = ansclass.backward_sample(proj_train, reverse_train, requirement=None,
cumulative=True, meaningful_difference=False)
ans_check_back_sample = ansclass.deterministic_query(proj_train)
assert ans_check_back_sample == ans_back_sample
query_dumps = ansclass.dumps
check_instance = parse_formula(query_structure)
check_instance.additive_ground(json.loads(query_dumps))
ans_another = check_instance.deterministic_query(proj_train)
assert ans_another == ans_check_back_sample
print(name, ansclass.dumps)
for name in beta_query_v2:
query_structure = beta_query_v2[name]
ansclass = parse_formula(query_structure)
ans_back_sample = ansclass.backward_sample(proj_train, reverse_train, requirement=None,
cumulative=True, meaningful_difference=True)
ans_check_back_sample = ansclass.deterministic_query(proj_train)
assert ans_check_back_sample == ans_back_sample
query_dumps = ansclass.dumps
check_instance = parse_formula(query_structure)
check_instance.additive_ground(json.loads(query_dumps))
ans_another = check_instance.deterministic_query(proj_train)
assert ans_another == ans_check_back_sample
print(name, ansclass.dumps)
def test_benchmark_backward_sample():
ent2id, rel2id, proj_train, reverse_train, proj_valid, reverse_valid, proj_test, reverse_test = \
load_data_with_indexing(stanford_data_path)
formula_file = "outputs/test_generated_formula_anchor_node=3.csv"
df = pd.read_csv(formula_file)
for i, query_structure in enumerate(df['original']):
ansclass = parse_formula(query_structure)
ans_back_sample = ansclass.backward_sample(proj_train, reverse_train, requirement=None,
cumulative=True, meaningful_difference=True)
ans_check_back_sample = ansclass.deterministic_query(proj_train)
assert ans_check_back_sample == ans_back_sample
query_dumps = ansclass.dumps
check_instance = parse_formula(query_structure)
check_instance.additive_ground(json.loads(query_dumps))
ans_another = check_instance.deterministic_query(proj_train)
assert ans_another == ans_check_back_sample
print(i, ansclass.dumps)
if __name__ == "__main__":
test_parse_formula()
test_sample()
test_backward_sample()
test_benchmark_backward_sample()
# test_additive_ground()
# test_embedding_estimation()
# test_parse_grounded_formula()
# test_gen_foq_meta_formula()
| HKUST-KnowComp/EFO-1-QA-benchmark | fol/test_foq_v2.py | test_foq_v2.py | py | 6,319 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
4552159327 | import random
import sys
sys.path.insert(1, '../')
from utils import read_instance, objetive_function, corrent_solution_size
import config
# Heurística Construtiva 02
# Constructive Heuristic 02
# Random para selecionar o teste e calculado a melhor mesa para aplicá-lo
def constructive_heuristic_02(corrent_size):
def validade_best_desk(solution, idx):
# idx = test number / index
iteration_value = 100000
iteration_solution = solution.copy()
for i in range(0, desk_count):
current_solution = solution.copy()
if solution[i] == 0: # Não sobrescrever casos já alocados
current_solution[i] = idx
current_value = objetive_function(current_solution)
if current_value < iteration_value:
iteration_value = current_value
iteration_solution = current_solution.copy()
return iteration_solution, iteration_value
desks, tests, empty = config.desks, config.tests, config.empty
desk_count = len(desks)
test_count = len(tests)
s = [0 for _ in range(desk_count)]
best_value = 0
desk_with_test = 0
while desk_with_test < (desk_count):
sort_list = [i for i in range(1, test_count)]
idx = random.choice(sort_list)
s, best_value = validade_best_desk(s, idx)
desk_with_test += 1
if corrent_size:
s, best_value = corrent_solution_size(s, empty)
return s, best_value
if __name__ == '__main__':
file_name = sys.argv[1]
read_instance(file_name)
response_solution, objetive = constructive_heuristic_02(True)
print()
print(response_solution)
print(objetive)
| guilhermelange/Test-Assignment-Problem | stage_01/constructive_heuristic_02.py | constructive_heuristic_02.py | py | 1,701 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.insert",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "utils.objetive_function",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "config.desks",
... |
25814086176 | from selenium.webdriver import Firefox
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import pytest
import time
@pytest.mark.needs_server
class TestMaxlifeFeature:
"""
Checks if the maxlife feature is working
"""
def setup_class(self):
"""
Setup: Open a mozilla browser, login
"""
self.browser = Firefox()
self.browser.get('http://localhost:5000/')
token = self.browser.find_element_by_name("token")
password = "foo"
# login
token.send_keys(password)
token.send_keys(Keys.ENTER)
time.sleep(.1)
try:
self.browser.find_element_by_xpath("//input[@value='Logout']")
except NoSuchElementException:
raise ValueError("Can't login!!! Create a user 'foo' with the permissions"
"'read' and 'create' in your PERMISSIONS in the config")
def teardown_class(self):
"""
Tear down: Close the browser
"""
self.browser.quit()
@property
def page_body_lowercase(self):
return self.browser.find_element_by_tag_name("body").text.lower()
def test_unit_input_exists(self):
unit_input = self.browser.find_element_by_name("maxlife-unit")
assert unit_input is not None
value_input = self.browser.find_element_by_name("maxlife-value")
assert value_input is not None
def fill_form(self):
"""
Fills test values to the form and submits it
:return: tuple(filename, pasted_text)
"""
filename = "test.txt"
text_to_paste = "This is test"
paste_input = self.browser.find_element_by_id("formupload")
paste_input.send_keys(text_to_paste)
filename_input = self.browser.find_element_by_id("filename")
filename_input.send_keys(filename)
contenttype_input = self.browser.find_element_by_id("contenttype")
contenttype_input.send_keys("text/plain")
contenttype_input.send_keys(Keys.ENTER)
time.sleep(.2) # give some time to render next view
return filename, text_to_paste
def delete_current_file(self):
self.browser.find_element_by_id("del-btn").click()
time.sleep(.2)
self.browser.find_element_by_class_name("bootbox-accept").click()
def test_paste_keep_forever(self):
self.browser.find_element_by_xpath("//select[@name='maxlife-unit']/option[@value='forever']").click()
value_input = self.browser.find_element_by_name("maxlife-value")
value_input.clear()
value_input.send_keys(1)
self.fill_form()
assert "max lifetime: forever" in self.page_body_lowercase
self.delete_current_file()
def test_paste_keep_minutes(self):
self.browser.find_element_by_xpath("//select[@name='maxlife-unit']/option[@value='minutes']").click()
value_input = self.browser.find_element_by_name("maxlife-value")
value_input.clear()
value_input.send_keys(1)
self.fill_form()
assert "max lifetime: forever" not in self.page_body_lowercase
self.delete_current_file()
def test_filename_gets_displayed(self):
filename, _ = self.fill_form()
assert filename.lower() in self.page_body_lowercase
self.delete_current_file()
def test_pasted_text_gets_displayed(self):
_, pasted_text = self.fill_form()
self.browser.find_element_by_id("inline-btn").click()
assert pasted_text.lower() in self.page_body_lowercase
self.browser.back()
self.delete_current_file()
@pytest.mark.slow
def test_file_gets_deleted_after_expiry_time(self):
self.browser.find_element_by_xpath("//select[@name='maxlife-unit']/option[@value='minutes']").click()
value_input = self.browser.find_element_by_name("maxlife-value")
value_input.clear()
value_input.send_keys(1)
self.fill_form()
time.sleep(61)
self.browser.find_element_by_id("inline-btn").click()
assert "not found" in self.page_body_lowercase
| bepasty/bepasty-server | src/bepasty/tests/test_website.py | test_website.py | py | 4,149 | python | en | code | 162 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.ENTER",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.keys.Keys",
"line_number": 26,
"usage... |
32043962015 | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 12 02:46:22 2019
@author: Michael
"""
#importing the necessary libraries
import os
import shutil
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import numpy as np
from imutils import paths
from utilities.preprocessing import AspectAwarePreprocessor
from utilities.datasets import SimpleDatasetLoader
from utilities.nn.cnn import MiniVGGNet
#importing the dataset
segment = 80
path = r'C:\Users\Michael\Desktop\Data Science\My directory set-up for Computer-Vision\Deep learning for computer vision - Practitioneers bundle\datasets\Flowers17'
pl = os.listdir(path)
flower_className = ['Daffodil', 'Snowdrop', 'Lily_Valley', 'Bluebell',
'Crocus', 'Iris', 'Tigerlily', 'Tulip',
'Fritillary', 'Sunflower', 'Daisy', 'Colts\'s_Foot',
'Dandelion', 'Cowslip', 'Buttercup', 'Windflower', 'Pansy']
for p in pl:
if '.jpg' in p:
index = int(p.split("_")[-1].strip(".jpg")) - 1
classname = index // 80
classname = flower_className[classname]
os.makedirs(path + '/' + classname, exist_ok=True)
shutil.move(path + '/' + p, path + '/' + classname + '/' + p)
print("[INFO]")
imagePaths = list(paths.list_images(r'C:\Users\Michael\Desktop\Data Science\My directory set-up for Computer-Vision\Deep learning for computer vision - Practitioneers bundle\datasets\Flowers17'))
aap = AspectAwarePreprocessor(64,64)
sdl = SimpleDatasetLoader(preprocessors=[aap])
(data, labels) = sdl.load(imagePaths, verbose=500)
#preprocessing the data
data = data.astype("float")/255.0
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
trainX, testX, trainY, testY = train_test_split(data, labels, random_state=42, test_size=0.25)
#building the netwok and applying data augmentaion
opt = SGD(lr = 0.05, nesterov=True, momentum = 0.9)
aug = ImageDataGenerator(rotation_range = 30, width_shift_range = 0.1, zoom_range = 0.2,
height_shift_range = 0.1, shear_range = 0.2, horizontal_flip = True,
fill_mode = "nearest")
model = MiniVGGNet.build(width = 64, height = 64, depth = 3, classes = len(flower_className))
model.compile(optimizer = opt, loss = "categorical_crossentropy", metrics = ["accuracy"])
H = model.fit_generator(aug.flow(trainX, trainY, batch_size = 32), steps_per_epoch = len(trainX)//32,
validation_data = (testX, testY), epochs = 100, verbose = 1)
#saving the model
model.save("MiniVGGNet on flowers 17 dataset with data augmentation.hdf5")
#plotting and evaluating the dataset progress reports
plt.style.use("ggplot")
plt.figure("MiniVGGNet on flowers 17 with data aumentation")
plt.plot(np.arange(0, 100), H.history["acc"], label = "Training accuracy")
plt.plot(np.arange(0, 100), H.history["val_acc"], label = "Validation accuracy")
plt.title("Training loss and accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
plt.savefig("MiniVGGNet on flowers 17 with data aumentation")
| Monarene/CV-Deep-learning-Pracittioner | minivggnet_flower17_data_aug.py | minivggnet_flower17_data_aug.py | py | 3,346 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "imutils.paths.list_images",
"l... |
12894651014 | #!/usr/bin/env python
# coding: utf-8
#
# # Task 1- Prediction using Supervised ML
#
# ### Task: Predict the percentage of a student based on the no. of study hours.
# ## The Sparks Foundation(GRIP), July 2021
# #### By: Rishi Raj Dhar
# In[11]:
#importing the required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# In[12]:
#Reading the data
url="https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv"
# In[13]:
data=pd.read_csv(url)
# In[14]:
print(data)
# In[15]:
#See the first 5 rows of the data
data.head(5)
# In[16]:
#See the last 5 rows of the data
data.tail(5)
# In[17]:
data.shape
# In[18]:
data.info()
# In[19]:
data.describe()
# In[20]:
#Check for the null values if any.
data.isnull().sum()
# ### As there is no null values, we can now visualize our data.
# In[21]:
# Plotting the distribution of scores
sns.scatterplot(y=data['Scores'], x=data['Hours'])
plt.title('Marks vs Study hours', size=18)
plt.ylabel('Marks Percentage', size=15)
plt.xlabel('Hours Studied', size=15)
plt.show()
# #### From the above scatterplot, we can clearly see that there is a positive linear relation between the "Number of hours studied" and "Percentage of score". Now plotting a regression line to confirm the correlation.
#
# In[22]:
#plotting the regression line
sns.regplot(x=data['Hours'],y=data['Scores'])
plt.title('Regression Plot', size=20)
plt.ylabel('Marks Percentage', size=12)
plt.xlabel('Hours Studied', size=12)
plt.show()
#Correlation
print(data.corr())
# ### From the above output it is confirmed that the variables are postively correlated.
# # Preparing the data
# # The next step is to divide the data into "attributes"(inputs) and "labels"(outputs)
# In[23]:
#x- attributes, y- labels
x= data.iloc[:,:-1].values
y= data.iloc[:, 1].values
# ### Doing this by using Scikit-Learn's built-in train_test_split() method.
# In[24]:
#Splitting the data(Training & Test datasets)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.25, random_state=0)
# In[25]:
#We have split the dataset as 75% training data and 25% test data.
# #### Training the model
# ##### We will be using the Linear Regression which is a supervised machine learning algortithm
# In[26]:
from sklearn.linear_model import LinearRegression
lr= LinearRegression()
lr.fit(x_train, y_train)
print("Training complete.")
# # Making Predictions
# In[27]:
# Predicting the scores
y_pred=lr.predict(x_test)
y_pred
# In[28]:
df=pd.DataFrame({'Hours': [i[0] for i in x_test], 'Predicted Marks' : [k for k in y_pred]})
df
# In[29]:
# Comparing the Actual marks and the predicted marks
compare_scores = pd.DataFrame({'Actual Marks': y_test, 'Predicted Marks': y_pred})
compare_scores
# In[30]:
plt.scatter(x=x_test, y=y_test, color='blue')
plt.plot(x_test, y_pred, color='Black')
plt.title('Actual vs Predicted', size=20)
plt.ylabel('Actual Marks', size=15)
plt.xlabel('Predicted Marks', size=15)
plt.show()
# # Evaluating the model
# In[31]:
from sklearn import metrics as m
print('Accuracy of Actual and Predicted Scores R-Squared is:', m.r2_score(y_test,y_pred))
MSE= m.mean_squared_error(y_test, y_pred)
RMSE= np.sqrt(MSE)
MAE= m.mean_absolute_error(y_test,y_pred)
print('Mean Squared Error:', MSE)
print('Root Mean Squared Error:', RMSE)
print('Mean Absolute Error:', MAE)
# In[32]:
hours = [9.5]
answer = lr.predict([hours])
print('Score: {}'.format(round(answer[0],3)))
# ###### The accuracy is around 94% and the small value of error metrics indicates that the chances of error or wrong forecasting through the model are very less.
# ## ................................. END OF TASK 1..................................................
# In[ ]:
| Rishirajdhar/griptask1 | GRIP_TASK_1_Student_Scores.py | GRIP_TASK_1_Student_Scores.py | py | 4,112 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "seaborn.scatterplot",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotli... |
2615744068 | # topics = ["Таѕ"]
from typing import List
class Solution:
def simplifyPath(self, path: str) -> str:
st: List[str] = []
for s in path.split('/'):
if not s or s == '.':
continue
if s == '..':
if st:
st.pop()
else:
st.append(s)
return f'/{"/".join(st)}'
| show-me-code/signInHelper-using-face- | algorithms/[71]简化路径/solution.py | solution.py | py | 389 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
}
] |
20299042800 | """crud URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from .views import create_view,list_view,create_view_curso,delete_view_estudiante,delete_view_curso,update_view_estudiante,update_view_curso
urlpatterns = [
path('admin/', admin.site.urls),
path('estudiante/', create_view,name = 'estudiante'),
path('curso/', create_view_curso,name = 'curso'),
path('lista/', list_view,name = 'lista'),
path('delete_estudiante/<int:int>', delete_view_estudiante,name = 'delete_view_estudiante' ),
path('delete_curso/<int:int>', delete_view_curso,name = 'delete_view_curso' ),
path('update_estudiante/<int:int>', update_view_estudiante,name = 'update_view_estudiante' ),
path('update_curso/<int:int>', update_view_curso,name = 'update_view_curso' )
]
| JairoObregon/django | crud/urls.py | urls.py | py | 1,405 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "... |
27127443756 | """
基于Memoization的递归可以大大提升性能,此时可以自定义一个memorize修饰器
author:Andy
"""
import functools
def memorize(fn):
# 缓存字典
know = dict()
# 为创建修饰器提供便利,保留被修饰函数的__name__和__doc__属性
@functools.wraps(fn)
def memoizer(*args):
# 如果缓存字典中已经存在
if args in know:
return know[args]
# 如果缓存字典中不存在
else:
know[args] = fn(*args)
return know[args]
return memoizer
@memorize
# 返回前n个数的和
def nsum(n):
assert (n >= 0), "n must be >=0"
return n if n == 0 else n + nsum(n - 1)
@memorize
# 返回斐波那契数列的第n个数
def fib(n):
assert (n >= 0), "n must be >=0"
return n if n in (0, 1) else fib(n - 1) + fib(n - 2)
if __name__ == '__main__':
from timeit import Timer
measures = [
{"exec": "fib(100)", "import": "fib", "func": fib},
{"exec": "nsum(100)", "import": "nsum", "func": nsum},
]
for item in measures:
t = Timer(
item["exec"],
"from __main__ import {}".format(item["import"])
)
print("name: {}, doc: {}, executing: {}, time:{}". \
format(item["func"].__name__, item["func"].__doc__, item["exec"], t.timeit()))
| LiUzHiAn/pythonDesignPatterns | decorate_pattern/my_math.py | my_math.py | py | 1,213 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "functools.wraps",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "timeit.Timer",
"line_number": 48,
"usage_type": "call"
}
] |
38905374725 | from torch import Tensor, LongTensor, max
from typing import Dict
from sklearn.metrics import accuracy_score
def compute_metrics(
outputs: Tensor,
labels: LongTensor,
) -> Dict[str, float]:\
metrics = {}
outputs = outputs.cpu()
labels = labels.cpu()
_, pred = max(outputs.data, 1)
y_true = labels
y_pred = pred
# accuracy
accuracy = accuracy_score(
y_true=y_true,
y_pred=y_pred,
)
# Optional add metrics
metrics["accuracy"] = accuracy
return metrics
if __name__ == '__main__':
pass | Agiratex/histological-image-classification | utils/compute_metrics.py | compute_metrics.py | py | 570 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.Tensor",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.LongTensor",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.max",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score"... |
5975605110 | from . import sql, chunk, rarity_info, BASE_PATH, Page
from datetime import datetime as dt, timedelta as td
from io import BytesIO
from pandas import DataFrame
from PIL import Image
from random import choice, choices, random
import json
import logging
import requests as r
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_format = logging.Formatter('[%(asctime)s - %(name)s - %(levelname)s] %(message)s')
stream_handler.setFormatter(stream_format)
log.addHandler(stream_handler)
#############
# Constants #
#############
UPDATE_ROULETTE = '''update roulette
set amount = (select tr.amount from tmp_roulette tr where tr.user_id = roulette.user_id and tr.poke_id = roulette.poke_id and tr.shiny = roulette.shiny)
where roulette.user_id in (select tr.user_id from tmp_roulette tr where tr.user_id = roulette.user_id and tr.poke_id = roulette.poke_id and tr.shiny = roulette.shiny)
and roulette.poke_id in (select tr.poke_id from tmp_roulette tr where tr.user_id = roulette.user_id and tr.poke_id = roulette.poke_id and tr.shiny = roulette.shiny)
and roulette.shiny in (select tr.shiny from tmp_roulette tr where tr.user_id = roulette.user_id and tr.poke_id = roulette.poke_id and tr.shiny = roulette.shiny);'''
shiny_chance = 1/8192
default_rewards = {
'levels': 0,
'cash': 0,
'rolls': 0,
'rewards': 0
}
#############
# Functions #
#############
# Pokemon #
def get_all_pokemon():
df = sql('select * from pokemon')
return [Pokemon(**d) for d in df.to_dict('records')]
def get_pokemon(args):
shiny = 0
name = ' '.join(args) if isinstance(args, tuple) else args
if isinstance(name, str):
if name[-1] == 'S':
name = name[:-1]
shiny = 1
try:
id = float(name)
pkmn = get_pokemon_by_id(id)
except ValueError:
pkmn = get_pokemon_by_name(name)
if not pkmn:
return None
pkmn.shiny = shiny
return pkmn
def get_pokemon_by_id(id):
df = sql('select * from pokemon where id = ?', (id,))
if df.empty:
return None
return Pokemon(**df.to_dict('records')[0])
def get_pokemon_by_name(name):
df = sql('select * from pokemon where lower(name) = ?', (name.lower(),))
if df.empty:
return None
return Pokemon(**df.to_dict('records')[0])
def get_all_pokemon_by_rarity(rarity):
df = sql('select * from pokemon where rarity = ?', (rarity,))
return [Pokemon(**d) for d in df.to_dict('records')]
def get_random_pokemon_by_rarity(rarity):
return choice(get_all_pokemon_by_rarity(rarity))
# Pokedex #
def get_user_pokedex(user):
df = sql('select r.user_id, r.amount, r.shiny, p.* from roulette r left join pokemon p on p.id = r.poke_id where r.user_id = ?', (user.id,))
if df.empty:
return []
return [PokedexEntry(**d) for d in df.to_dict('records')]
def get_user_pokedex_entry(user, pkmn):
df = sql('select r.user_id, r.amount, r.shiny, p.* from roulette r left join pokemon p on p.id = r.poke_id where r.user_id = ? and r.poke_id = ? and r.shiny = ?', (user.id, pkmn.id, pkmn.shiny))
if df.empty:
return None
return PokedexEntry(**df.to_dict('records')[0])
def get_duplicate_pokedex_extries(user, rarity):
return [pkdx for pkdx in get_user_pokedex(user) if pkdx.amount > 1 and pkdx.rarity <= rarity]
def add_or_update_user_pokedex_entry_from_pokemon(user, pkmn_list, pkmn_counter):
user_pkdx = get_user_pokedex(user)
unique = list(set(pkmn_list))
new = [p for p in unique if p not in user_pkdx]
updating = [p for p in unique if p not in new]
if new:
new_chunks = chunk(new, 249)
for nc in new_chunks:
vals = []
sql_str = 'insert into roulette values '
for p in nc:
sql_str += ' (?,?,?,?),'
vals.extend((user.id, p.id, pkmn_counter[p], p.shiny))
sql(sql_str[:-1], vals)
if updating:
sql('drop table if exists tmp_roulette')
sql('create table tmp_roulette (user_id INTEGER, poke_id INTEGER, amount INTEGER, shiny INTEGER)')
pkdx_map = {pkdx.id: pkdx for pkdx in user_pkdx}
updating_chunks = chunk(updating, 249)
for uc in updating_chunks:
vals = []
sql_str = 'insert into tmp_roulette values '
for p in uc:
sql_str += ' (?,?,?,?),'
amt = pkdx_map.get(p.id).amount + pkmn_counter[p]
vals.extend((user.id, p.id, amt, p.shiny))
sql(sql_str[:-1], vals)
sql(UPDATE_ROULETTE)
def add_or_update_user_pokedex_entry_from_pokedex_entries(user, pokedex_entries):
new = [pkdx for pkdx in pokedex_entries if pkdx.amount == -1]
updating = [pkdx for pkdx in pokedex_entries if pkdx.amount >= 0]
deleting = []
if new:
new_chunks = chunk(new, 249)
for nc in new_chunks:
vals = []
sql_str = 'insert into roulette values '
for p in nc:
sql_str += ' (?,?,?,?),'
vals.extend([p.user_id, p.id, 1, p.shiny])
sql(sql_str[:-1], vals)
if updating:
sql('drop table if exists tmp_roulette')
sql('create table tmp_roulette (user_id INTEGER, poke_id INTEGER, amount INTEGER, shiny INTEGER)')
updating_chunks = chunk(updating, 249)
for uc in updating_chunks:
vals = []
sql_str = 'insert into tmp_roulette values '
for p in uc:
sql_str += ' (?,?,?,?),'
vals.extend(p.to_row)
if p.amount == 0:
deleting.append(p)
sql(sql_str[:-1], vals)
sql(UPDATE_ROULETTE)
if deleting:
sql('delete from roulette where amount = 0')
deleting_chunks = chunk(deleting, 249)
for dc in deleting_chunks:
vals = [user.id]
sql_str = 'delete from battle where user_id = ? and poke_id in ('
for p in dc:
sql_str += '?,'
vals.append(p.id)
sql_str = f'{sql_str[:-1]})'
sql(sql_str, vals)
# PokeBattle #
def get_pokemon_info(id):
df = sql('select * from poke_info where poke_id = ?', (id,))
return df.to_dict('records')[0]
def get_all_user_poke_battle(user, level=1):
df = sql('select b.user_id, b.level, b.exp, pi.hp, pi.attack, pi.defense, p.* from battle b left join poke_info pi on b.poke_id = pi.poke_id left join pokemon p on b.poke_id = p.id where b.user_id = ? and level >= ?', (user.id, level))
if df.empty:
return []
return [PokeBattle(**d) for d in df.to_dict('records')]
def get_user_poke_battle(user, poke_id):
df = sql('select b.user_id, b.level, b.exp, pi.hp, pi.attack, pi.defense, p.* from battle b left join poke_info pi on b.poke_id = pi.poke_id left join pokemon p on b.poke_id = p.id where b.user_id = ? and b.poke_id = ?', (user.id, poke_id))
if df.empty:
return None
return PokeBattle(**df.to_dict('records')[0])
def get_user_total_level(user):
pkbts = get_all_user_poke_battle(user)
if pkbts:
return sum([pkbt.level for pkbt in pkbts])
return 0
def create_user_poke_battle(user, pkmn):
pkin = get_pokemon_info(pkmn.id)
pkbt = PokeBattle(user.id, 1, 0, pkin['hp'], pkin['attack'], pkin['defense'], id=pkmn.id, name=pkmn.name, rarity=pkmn.rarity, shiny=pkmn.shiny)
sql('insert into battle values (?,?,?,?)', pkbt.pokebattle_creation_row)
return pkbt
# Daycare #
def get_user_daycare(user):
df = sql('select d.user_id, d.enter_time, d.rewards, p.* from daycare d left join pokemon p on d.poke_id = p.id where user_id = ?', (user.id,))
if df.empty:
return None
return Daycare(**df.to_dict('records')[0])
def create_user_daycare(user, pkmn):
pkdc = Daycare(user.id, dt.now(), default_rewards, **pkmn.to_dict())
sql('insert into daycare values (?,?,?,?)', pkdc.daycare_creation_row)
return pkdc
def delete_user_daycare(user):
sql('delete from daycare where user_id = ?', (user.id,))
# Badge #
def get_badges(user):
df = sql('select b.user_id, b.level, b.amount, p.* from badges b left join pokemon p on b.poke_id = p.id where user_id = ?', (user.id,))
if df.empty:
return []
return [Badge(**d) for d in df.to_dict('records')]
def get_badge(user, poke_id):
df = sql('select b.user_id, b.level, b.amount, p.* from badges b left join pokemon p on b.poke_id = p.id where user_id = ? and poke_id = ?', (user.id, poke_id))
if df.empty:
return ()
return Badge(**df.to_dict('records')[0])
def add_badge(user, poke_id):
sql('insert into badges values (?,?,?,?)', (user.id, poke_id, 1, 1))
# General #
def roll_pokemon(user):
ret = []
hunting = get_pokemon_by_id(user._hunting['poke_id']) if user._hunting['poke_id'] else None
for i in range(6):
chance = min(rarity_info.get(i+1).get('chance') * (1.1 ** user.get_upgrade('baserarity')), .99)
if random() <= chance:
if hunting and hunting.rarity == i+1:
all_pkmn = get_all_pokemon_by_rarity(i+1)
pkmn_weights = [(1 if hunting != p else min(3, 1 * ((1.02 + .02 * user.get_upgrade('huntchance')) ** max(user._hunting['caught'], 1)))) for p in all_pkmn]
pkmn = choices(all_pkmn, weights=pkmn_weights)[0]
else:
pkmn = get_random_pokemon_by_rarity(i+1)
shiny = shiny_chance * (1.1 ** user.get_upgrade('shinyrarity'))
if random() <= shiny:
pkmn.shiny = 1
ret.append(pkmn)
else:
ret.append(None)
return ret
def roll_all_pokemon(user):
tmp = []
for _ in range(int(user.stored_rolls)):
tmp.extend([1,2,3,4,5,6])
df = DataFrame(tmp, columns=['rarity'])
all_pkmn = {1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: []}
for pkmn in get_all_pokemon():
all_pkmn[pkmn.rarity].append(pkmn)
caught = []
hunting = get_pokemon_by_id(user._hunting['poke_id']) if user._hunting['poke_id'] else None
user_chance = 1.1 ** user.get_upgrade('baserarity')
user_shiny = shiny_chance * 1.1 ** user.get_upgrade('shinyrarity')
for row in df.values.tolist():
chance = min(rarity_info.get(row[0]).get('chance') * user_chance, .99)
if random() <= chance:
if hunting and hunting.rarity == row[0]:
pkmn_weights = [(1 if hunting != p else min(3, 1 * ((1.02 + .02 * user.get_upgrade('huntchance')) ** max(user._hunting['caught'], 1)))) for p in all_pkmn[row[0]]]
pkmn = choices(all_pkmn[row[0]], weights=pkmn_weights)[0]
else:
pkmn = choice(all_pkmn[row[0]])
if random() <= user_shiny:
pkmn.shiny = 1
caught.append(pkmn)
else:
caught.append(None)
df['caught'] = caught
return df
def gen_result_pic(pkmn_rolls):
ids = [(p.id if p else 'x') for p in pkmn_rolls]
imgs = [Image.open(f'{BASE_PATH}/rsc/{i}.png') for i in ids]
w = sum([i.size[0] for i in imgs])
h = max([i.size[1] for i in imgs])
bg = Image.new('RGBA', (w, h), color=(255,255,255,0))
x = 0
for img in imgs:
img = img.convert('RGBA')
bg.paste(img, (x, h-img.size[1]), img)
x += img.size[0]
bg.save(f'{BASE_PATH}/rsc/tmp.png')
def get_pkmn_colour(url):
resp = r.get(url)
im = Image.open(BytesIO(resp.content))
im.thumbnail((1, 1))
return im.getpixel((0, 0))
###########
# Classes #
###########
class Pokemon:
def __init__(self, id, name, rarity, shiny=0):
self.id = id
self.name = name
self.rarity = rarity
self.shiny = shiny
@property
def url(self):
url_name = self.name
if url_name == 'NidoranF':
url_name = 'nidoran-f'
if url_name == 'NidoranM':
url_name = 'nidoran-m'
if url_name == 'Meowstic':
url_name = 'meowstic-m'
if 'Mega' in url_name:
if url_name[-1] == 'X':
suffix = 'megax'
elif url_name[-1] == 'Y':
suffix = 'megay'
else:
suffix = 'mega'
url_name = f'{self.name.split(" ")[1]}-{suffix}'
url_name = url_name.lower().replace(':','').replace('.','').replace("'",'').replace(' ','-')
if self.shiny:
return f'https://projectpokemon.org/images/shiny-sprite/{url_name}.gif'
return f'https://projectpokemon.org/images/normal-sprite/{url_name}.gif'
@property
def icon(self):
url_name = self.name
if url_name == 'NidoranF':
url_name = 'nidoran-f'
if url_name == 'NidoranM':
url_name = 'nidoran-m'
if url_name == 'Meowstic':
url_name = 'meowstic-m'
if 'Mega' in url_name:
if url_name[-1] == 'X':
suffix = 'mega-x'
elif url_name[-1] == 'Y':
suffix = 'mega-y'
else:
suffix = 'mega'
url_name = f'{self.name.split(" ")[1]}-{suffix}'
url_name = url_name.lower().replace(':','').replace('.','').replace("'",'').replace(' ','-')
if self.shiny:
return f'https://img.pokemondb.net/sprites/home/shiny/{url_name}.png'
return f'https://img.pokemondb.net/sprites/home/normal/{url_name}.png'
def embed(self, user):
pkin = get_pokemon_info(self.id)
owned = get_user_pokedex_entry(user, self)
desc = ':star:' * self.rarity + '\n'
desc += f'**PokeCash value** - {rarity_info[self.rarity]["cash"]}\n**Roll refund value** - {rarity_info[self.rarity]["rolls"]}\n'
desc += f'**HP:** {pkin["hp"]} | **ATK:** {pkin["attack"]} | **DEF:** {pkin["defense"]}\n'
if owned:
desc += f'**Owned:** Yes - **{owned.amount}**\n'
if owned in user._party:
desc += '**In Party:** Yes\n'
else:
desc += '**In Party:** No\n'
else:
desc += f'**Owned:** No\n'
user_badge = get_badge(user, self.id)
if user_badge:
desc += f'**Badge:** {user_badge.display.replace(self.name, "")}\n'
else:
desc += '**Badge:** N\n'
r, g, b, _ = get_pkmn_colour(self.icon)
return Page(f'{int(self.id)} - {self.name}', desc, colour=(r, g, b), icon=self.icon, image=self.url)
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'rarity': self.rarity,
'shiny': self.shiny
}
def __eq__(self, p):
if p == None:
return False
try:
return self.user_id == p.user_id and self.id == p.id
except AttributeError:
return self.id == p.id
def __bool__(self):
return self.id > 0
def __repr__(self):
return f'Pokemon({self.id}, {self.name})'
def __str__(self):
return f'{self.id} - {self.name}'
def __hash__(self):
if '.' in str(self.id):
if str(self.id).split('.')[-1] == '0':
return int(self.id)
else:
return hash(str(self.id))
# return self.id
class PokedexEntry(Pokemon):
def __init__(self, user_id, amount, **kwargs):
super().__init__(**kwargs)
self.user_id = user_id
self.amount = amount
@property
def to_row(self):
return (self.user_id, self.id, self.amount, self.shiny)
def to_dict(self):
return {
'user_id': self.user_id,
'poke_id': self.id,
'amount': self.amount,
'shiny': self.shiny
}
class PokeBattle(Pokemon):
def __init__(self, user_id, level, exp, hp, attack, defense, **kwargs):
super().__init__(**kwargs)
self.user_id = user_id
self.level = level
self.exp = exp
self.next_lvl_exp = (self.level + 1) ** 3
self.hp = int((((2 * hp * self.level) // 100) + self.level + 10) // 1)
self.current_hp = self.hp
self.attack = int((((2 * attack * self.level) // 100) + 5) // 1)
self.defense = int((((2 * defense * self.level) // 100) + 5) // 1)
self.loaded = self.to_dict().copy()
@property
def pokebattle_creation_row(self):
return (
self.user_id,
self.id,
self.level,
self.exp
)
@classmethod
def from_id(cls, id, level=1):
pkmn = get_pokemon_by_id(id)
pkin = get_pokemon_info(pkmn.id)
return cls(1, level, 0, pkin['hp'], pkin['attack'], pkin['defense'], id=pkmn.id, name=pkmn.name, rarity=pkmn.rarity)
def embed(self, user):
pkin = get_pokemon_info(self.id)
owned = get_user_pokedex_entry(user, self)
desc = ':star:' * self.rarity + '\n'
desc += f'**PokeCash value** - {rarity_info[self.rarity]["cash"]}\n**Roll refund value** - {rarity_info[self.rarity]["rolls"]}\n'
desc += f'At lvl **{self.level}** | **HP:** {self.hp} | **ATK:** {self.attack} | **DEF:** {self.defense}\n'
if owned:
desc += f'**Owned:** Yes - **{owned.amount}**\n'
if owned in user._party:
desc += '**In Party:** Yes\n'
else:
desc += '**In Party:** No\n'
else:
desc += f'**Owned:** No\n'
user_badge = get_badge(user, self.id)
if user_badge:
desc += f'**Badge:** {user_badge.display.replace(self.name, "")}\n'
else:
desc += '**Badge:** N\n'
r, g, b, _ = get_pkmn_colour(self.icon)
return Page(f'{self.id} - {self.name}', desc, colour=(r, g, b), icon=self.icon, image=self.url)
def add_exp(self, exp):
if self.level >= 100:
self.level = 100
return False
starting_level = self.level
while exp > 0 and self.level < 100:
exp_to_lvl = max(self.next_lvl_exp - self.exp, 0)
if exp >= exp_to_lvl:
self.exp += exp_to_lvl
self.level_up()
exp -= exp_to_lvl
else:
self.exp += exp
exp = 0
return self.level > starting_level
def level_up(self):
if self.level >= 100:
return
self.level += 1
self.next_lvl_exp = (self.level + 1) ** 3
def full_health(self):
self.current_hp = self.hp
def to_dict(self):
return {
'user_id': self.user_id,
'poke_id': self.id,
'level': self.level,
'exp': self.exp
}
def update(self):
current = self.to_dict()
sql_str = 'update battle set '
col_val = []
for k in ['level', 'exp']:
if current[k] != self.loaded[k]:
col_val.append((k, current[k]))
sql_str += ', '.join([f'{col} = ?' for col, _ in col_val])
sql_str += ' where user_id = ? and poke_id = ?'
vals = [v for _, v in col_val]
vals.extend([self.user_id, self.id])
if not col_val:
return
return sql(sql_str, vals)
class Daycare(Pokemon):
def __init__(self, user_id, enter_time, rewards, **kwargs):
super().__init__(**kwargs)
self.user_id = user_id
self._enter_time = dt.strptime(enter_time, '%Y-%m-%d %H:%M:%S') if isinstance(enter_time, str) else enter_time
self._rewards = json.loads(rewards) if isinstance(rewards, str) else rewards
self.loaded = self.to_dict().copy()
@property
def enter_time(self):
return dt.strftime(self._enter_time, '%Y-%m-%d %H:%M:%S')
@property
def rewards(self):
return json.dumps(self._rewards)
@property
def daycare_creation_row(self):
return (
self.user_id,
self.id,
self.enter_time,
self.rewards
)
def generate_rewards(self, upgrade_level):
total_seconds = (dt.now() - self._enter_time).total_seconds()
rewards = total_seconds // (43200 - 3600 * upgrade_level)
while rewards > self._rewards['rewards']:
rw = choice([1, 2, 3, 4, 5, 6])
if rw == 1:
self._rewards['levels'] += 1
elif rw == 2:
self._rewards['cash'] += rarity_info[self.rarity]['cash']
elif rw == 3:
self._rewards['rolls'] += rarity_info[self.rarity]['rolls']
self._rewards['rewards'] += 1
self.update()
def embed(self, user):
desc = 'Welcome to the Daycare!\nTo claim your pokemon use **.daycare claim**\n\n'
desc += f'**{self.name}** {":star:" * self.rarity}\n\n'
for reward, value in self._rewards.items():
if reward == 'rewards':
pass
else:
desc += f'**{reward.capitalize()}:** {value}\n'
r, g, b, _ = get_pkmn_colour(self.icon)
return Page(f'{self.id} - {self.name}', desc, colour=(r, g, b), icon=self.icon, image=self.url, footer=f'Rewards rolled: {self._rewards["rewards"]}')
def to_dict(self):
return {
'user_id': self.user_id,
'poke_id': self.id,
'enter_time': self.enter_time,
'rewards': self.rewards
}
def update(self):
current = self.to_dict()
sql_str = 'update daycare set '
col_val = []
if current['rewards'] != self.loaded['rewards']:
col_val.append(('rewards', current['rewards']))
sql_str += ', '.join([f'{col} = ?' for col, _ in col_val])
sql_str += ' where user_id = ? and poke_id = ?'
vals = [v for _, v in col_val]
vals.extend([self.user_id, self.id])
if not col_val:
return
return sql(sql_str, vals)
class Badge(Pokemon):
def __init__(self, user_id, level, amount, **kwargs):
super().__init__(**kwargs)
self.user_id = user_id
self.level = level
self.amount = amount
self.loaded = self.to_dict().copy()
@property
def display(self):
if self.level == 1:
return f':third_place: {self.name}'
if self.level == 2:
return f':second_place: {self.name}'
if self.level == 3:
return f':first_place: {self.name}'
return f':military_medal: {self.name} x{self.amount}'
def to_dict(self):
return {
'user_id': self.user_id,
'poke_id': self.id,
'level': self.level,
'amount': self.amount
}
def update(self):
current = self.to_dict()
sql_str = 'update badges set '
col_val = []
for k in ['level', 'amount']:
if current[k] != self.loaded[k]:
col_val.append((k, current[k]))
sql_str += ', '.join([f'{col} = ?' for col, _ in col_val])
sql_str += ' where user_id = ? and poke_id = ?'
vals = [v for _, v in col_val]
vals.extend([self.user_id, self.id])
if not col_val:
return
return sql(sql_str, vals)
| austinmh12/DiscordBots | TestBot/test_cogs/pokerouletteFunctions/pokemon.py | pokemon.py | py | 20,054 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.Fo... |
232662350 | import os
import sys
import glob
import subprocess
import glob
from pefile import PE
name = "ReBarDxe"
version = "1.0"
GUID = "a8ee1777-a4f5-4345-9da4-13742084d31e"
shell = sys.platform == "win32"
buildtype = "RELEASE"
def filesub(filep, f, r):
# Read in the file
with open(filep, 'r') as file :
filedata = file.read()
# Replace the target string
filedata = filedata.replace(f, r)
# Write the file out again
with open(filep, 'w') as file:
file.write(filedata)
def set_bit(data, bit):
"""Sets a specific bit."""
return data | (1 << bit)
def set_nx_compat_flag(pe):
"""Sets the nx_compat flag to 1 in the PE/COFF file."""
dllchar = pe.OPTIONAL_HEADER.DllCharacteristics
dllchar = set_bit(dllchar, 8) # 8th bit is the nx_compat_flag
pe.OPTIONAL_HEADER.DllCharacteristics = dllchar
pe.merge_modified_section_data()
return pe
if len(sys.argv) > 1:
buildtype = sys.argv[1].upper()
# 3 arguments = Github Actions
if len(sys.argv) == 3:
print("TARGET: ", os.environ['TARGET'])
print("TARGET_ARCH: ", os.environ['TARGET_ARCH'])
print("TOOL_CHAIN_TAG: ", os.environ['TOOL_CHAIN_TAG'])
# setup Conf/target.txt
filesub("./Conf/target.txt", "DEBUG", os.environ['TARGET'])
filesub("./Conf/target.txt", "IA32", os.environ['TARGET_ARCH'])
filesub("./Conf/target.txt", "VS2015x86", os.environ['TOOL_CHAIN_TAG'])
else:
os.chdir("../..")
subprocess.run(["build", "--platform=ReBarUEFI/ReBarDxe/ReBar.dsc"], shell=shell, env=os.environ, stderr=sys.stderr, stdout=sys.stdout)
ReBarDXE = glob.glob(f"./Build/ReBarUEFI/{buildtype}_*/X64/ReBarDxe.efi")
if len(ReBarDXE) != 1:
print("Build failed")
sys.exit(1)
# set NX_COMPAT
pe = PE(ReBarDXE[0])
set_nx_compat_flag(pe)
os.remove(ReBarDXE[0])
pe.write(ReBarDXE[0])
print(ReBarDXE[0])
print("Building FFS")
os.chdir(os.path.dirname(ReBarDXE[0]))
try:
os.remove("pe32.sec")
os.remove("name.sec")
os.remove("ReBarDxe.ffs")
except FileNotFoundError:
pass
subprocess.run(["GenSec", "-o", "pe32.sec", "ReBarDxe.efi", "-S", "EFI_SECTION_PE32"], shell=shell, env=os.environ, stderr=sys.stderr, stdout=sys.stdout)
subprocess.run(["GenSec", "-o", "name.sec", "-S", "EFI_SECTION_USER_INTERFACE", "-n", name], shell=shell, env=os.environ, stderr=sys.stderr, stdout=sys.stdout)
subprocess.run(["GenFfs", "-g", GUID, "-o", "ReBarDxe.ffs", "-i", "pe32.sec", "-i" ,"name.sec", "-t", "EFI_FV_FILETYPE_DRIVER", "--checksum"], shell=shell, env=os.environ, stderr=sys.stderr, stdout=sys.stdout)
try:
os.remove("pe32.sec")
os.remove("name.sec")
except FileNotFoundError:
pass
print("Finished") | xCuri0/ReBarUEFI | ReBarDxe/buildffs.py | buildffs.py | py | 2,663 | python | en | code | 562 | github-code | 6 | [
{
"api_name": "sys.platform",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_nu... |
23944471661 | import json
import pytest
from deepdiff import DeepDiff
from eth_keys.datatypes import PrivateKey
from hexbytes import HexBytes
from jsonschema import ValidationError
from web3 import Web3
from polyswarmtransaction.exceptions import InvalidKeyError, InvalidSignatureError, WrongSignatureError, \
UnsupportedTransactionError
from polyswarmtransaction.transaction import Transaction, SignedTransaction, CustomTransaction
def test_recover_when_computed(ethereum_accounts):
# Must be a string exact match
data = {
'name': 'polyswarmtransaction.transaction:Transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = Transaction().sign(ethereum_accounts[0].key)
assert signed.signature == PrivateKey(ethereum_accounts[0].key).sign_msg_hash(Web3.keccak(text=json.dumps(data)))
def test_sign_transaction(ethereum_accounts):
expected = '0xed2e8602439eec57a84bb372c6de718d88d2c27f265d7c01fe59a940f9c44eb25f849639669897e376dca6b3e745f4d9667' \
'32f731b6ec20d908673ad882aeed301'
data = {
'name': 'polyswarmtransaction.transaction:Transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
transaction = Transaction()
signed = transaction.sign(ethereum_accounts[0].key)
assert json.loads(signed.raw_transaction) == data
assert signed.signature.hex() == expected
def test_sign_customtransaction_data_body(ethereum_accounts):
expected = '0xbd112f273df4e3a7d1b97525513c41f42e737c513bad190d74eb92947869747415a857110b02a17cc37f1a0e80514efd94c' \
'e807196a90cbc88a09377faf202e200'
custom_data = {'spam': 'eggs', 'pi': 3, 'it_moves': True}
data = {
'name': 'polyswarmtransaction.transaction:CustomTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': custom_data,
}
transaction = CustomTransaction(data_body=json.dumps(custom_data))
signed = transaction.sign(ethereum_accounts[0].key)
assert json.loads(signed.raw_transaction) == data
assert signed.signature.hex() == expected
assert isinstance(signed.transaction(), CustomTransaction)
def test_recover_signed_transaction(ethereum_accounts):
transaction = Transaction()
signed = transaction.sign(ethereum_accounts[0].key)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_recover_signed_transaction_from_parts():
signature = ('0xed2e8602439eec57a84bb372c6de718d88d2c27f265d7c01fe59a940f9c44eb25f849639669897e376dca6b3e745f4d966'
'732f731b6ec20d908673ad882aeed301')
# Must be a string exact match
data = {
'name': 'polyswarmtransaction.transaction:Transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = SignedTransaction(json.dumps(data), signature)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_recover_signed_transaction_from_signed_output(ethereum_accounts):
transaction = Transaction()
signed = transaction.sign(ethereum_accounts[0].key)
signed = SignedTransaction(signed.raw_transaction, signed.signature)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_recover_signed_transaction_from_payload(ethereum_accounts):
transaction = Transaction()
signed = transaction.sign(ethereum_accounts[0].key)
signed = SignedTransaction(**signed.payload)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_sign_none():
transaction = Transaction()
with pytest.raises(InvalidKeyError):
transaction.sign(None)
def test_recover_empty_signature():
signed = SignedTransaction('', '')
with pytest.raises(InvalidSignatureError):
signed.ecrecover()
def test_recover_invalid_signature():
signed = SignedTransaction('', '0xaa')
with pytest.raises(InvalidSignatureError):
signed.ecrecover()
def test_recover_changed_body(ethereum_accounts):
signature = Transaction().sign(ethereum_accounts[0].key).signature
data = {
'name': 'polyswarmtransaction.transaction:Transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {
'different': 'asdf'
}
}
signed = SignedTransaction(json.dumps(data), signature)
with pytest.raises(WrongSignatureError):
signed.ecrecover()
def test_recover_changed_signature(ethereum_accounts):
transaction = Transaction().sign(HexBytes(ethereum_accounts[0].key)).raw_transaction
signature = Transaction().sign(ethereum_accounts[1].key).signature
signed = SignedTransaction(transaction, signature)
with pytest.raises(WrongSignatureError):
signed.ecrecover()
def test_load_transaction_string():
signed = SignedTransaction('this is not json', bytes([0] * 65))
with pytest.raises(json.JSONDecodeError):
signed.transaction()
def test_load_transaction_schema_mismatch():
transaction = {
'name': 'polyswarmtransaction.transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = SignedTransaction(json.dumps(transaction), bytes([0] * 65))
with pytest.raises(ValidationError):
signed.transaction()
def test_load_transaction_missing_module():
transaction = {
'name': 'polyswarmtransaction.no:Transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = SignedTransaction(json.dumps(transaction), bytes([0] * 65))
with pytest.raises(UnsupportedTransactionError):
signed.transaction()
def test_load_transaction_missing_class():
transaction = {
'name': 'polyswarmtransaction.transaction:NoTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = SignedTransaction(json.dumps(transaction), bytes([0] * 65))
with pytest.raises(UnsupportedTransactionError):
signed.transaction()
def test_load_transaction_non_transaction():
transaction = {
'name': 'polyswarmtransaction.transaction:SignedTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = SignedTransaction(json.dumps(transaction), bytes([0] * 65))
with pytest.raises(UnsupportedTransactionError):
signed.transaction()
def test_load_transaction():
transaction = {
'name': 'polyswarmtransaction.transaction:Transaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {}
}
signed = SignedTransaction(json.dumps(transaction), bytes([0] * 65))
assert isinstance(signed.transaction(), Transaction)
assert not DeepDiff(signed.transaction().data, Transaction().data, ignore_order=True)
| polyswarm/polyswarm-transaction | tests/test_transaction.py | test_transaction.py | py | 6,888 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "polyswarmtransaction.transaction.Transaction",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "eth_keys.datatypes.PrivateKey",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "web3.Web3.keccak",
"line_number": 23,
"usage_type": "call"
}... |
25844066272 | """Test Role"""
import unittest
import json
from flask import url_for
from app.test import BaseTest
class RolePermissionTests(BaseTest):
""" Role Permission Test api class """
def test_insert_update_delete(self):
""" insert, update, delete roles permission"""
role_url = url_for('auth.role_role_list')
prm = {
'name': 'role_test',
'active': True,
}
role_data = json.dumps(prm)
response = self.client.post(
role_url,
data=role_data,
content_type='application/json'
)
role_id = response.json['data']['id']
permission_url = url_for('auth.permission_permission_list')
prm = {
'code': 'permission_test',
'name': 'permission_test',
'active': True
}
permission_data = json.dumps(prm)
response = self.client.post(
permission_url,
data=permission_data,
content_type='application/json'
)
permission_id = response.json['data']['id']
# insert role permission
params = {"role_id": role_id,
"permission_id": permission_id,
"status": "false", }
role_permission = json.dumps(params)
url = url_for('auth.role-permission_role_permission_list')
response = self.client.post(
url,
data=role_permission,
content_type='application/json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.json['data']['permission_id'], permission_id)
# update role permission
params = {
"status": "true",
}
role_permission = json.dumps(params)
url = url_for('auth.role-permission_role_permission_detail',
uuid=response.json['data']['id'])
response = self.client.put(
url,
data=role_permission,
content_type='application/json'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json['data']['status'], True)
# check readonly fields value changing
created_at = response.json['data']['created_at']
updated_at = response.json['data']['updated_at']
self.assertIsNotNone(updated_at)
self.assertNotEqual(created_at, updated_at)
url = url_for('auth.role-permission_role_permission_list')
response = self.client.get(url,
content_type='application/json'
)
self.assertEqual(response.status_code, 200)
self.assertGreaterEqual(len(response.json['data']), 1)
if __name__ == "__main__":
unittest.main()
| ekramulmostafa/ms-auth | app/test/test_role_permission.py | test_role_permission.py | py | 2,795 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app.test.BaseTest",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "flask.url_for",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_n... |
41957074771 | import json
import os
j = None
searchables = {}
path = os.path.dirname(os.path.abspath(__file__))
with open (os.path.join(path, 'fhir_parser/downloads/search-parameters.json'), 'r') as f:
j = json.loads(f.read())
for entry in j['entry']:
resource = entry['resource']
for base in resource['base']:
searchables[base] = searchables.get(base, []) + [resource['name']]
import pprint
pprint.pprint(searchables)
| zensoup/fhirbug | tools/get_searchables.py | get_searchables.py | py | 420 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
23229945677 | from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
admin.site.site_header = "Адмнистрирование TBO Dashboard"
admin.site.site_title = "Адмнистрирование TBO Dashboard"
admin.site.index_title = "TBO Dashboard"
urlpatterns = [
path('admin/', admin.site.urls),
path('api/docs/', include('tbo_dash.docs.urls')),
path('api/', include('djoser.urls')),
path('api/', include('djoser.urls.authtoken')),
path('api/', include('tbo_dash.dashboards.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) \
+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
# Silk profiler
urlpatterns = [
path('silk/', include('silk.urls', namespace='silk')),
] + urlpatterns
| alldevic/tbo_dash_old | tbo_dash/urls.py | urls.py | py | 875 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.admin.site",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 7,
"usage_type": "attribute"
},
{
"a... |
40341366552 | import os
import re
import selenium
from selenium import webdriver
from time import sleep
from openpyxl import load_workbook
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait # Required for explicit wait
from selenium.webdriver.support import expected_conditions as ec # Required for explicit wait
from selenium.webdriver.common.by import By # Required for explicit wait
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
excel_file = 'token_generation_automation.xlsx'
driver_exe = 'chromedriver.exe'
wb = load_workbook(filename = os.path.join(os.getcwd(),excel_file), read_only = False)
sheet = wb.sheetnames
ws1 = wb[sheet[2]]
max_consumers = ws1.max_row
########################################################
########################################################
indent = 0 #Last valid iteration; Must check before each run
########################################################
########################################################
print(max_consumers-indent)
browser = webdriver.Chrome(executable_path = os.path.join(os.getcwd(), driver_exe))
browser.get("http://172.16.15.18/prepay/login!init.do")
browser.implicitly_wait(100) #implicit wait
browser.maximize_window()
x1 = browser.find_element_by_id("czyId")
x1.send_keys("ChandpurAE1")
x2 = browser.find_element_by_id("pwd")
x2.send_keys("C6_029_Prepaid")
x3 = browser.find_element_by_xpath("//input[@type='button']")
x3.click()
print('Hello')
sleep(5)
for x in range(max_consumers-indent):
browser.implicitly_wait(100)
browser.get('http://172.16.15.18/prepay/prepay/mgtCode/codeMgt!ctc.do?timestamp=NaN&menuid=63100&menupath=Clear%20Tamper%20Status&curTabId=63100')
browser.implicitly_wait(100)
generateBtn = browser.find_elements_by_class_name('ext_btn')[0]
selectBtn = browser.find_element_by_xpath('/html/body/table/tbody/tr/td[2]/form/table/tbody/tr[2]/td[2]/select')
selectOptn = browser.find_element_by_xpath('/html/body/table/tbody/tr/td[2]/form/table/tbody/tr[2]/td[2]/select/option[2]')
browser.switch_to.frame(browser.find_element_by_id('accountQueryIframe'))
browser.implicitly_wait(100)
meterNo = ws1.cell(row = indent+1+x, column = 1).value
print("Meter No: ", meterNo)
browser.find_element(By.ID, "metNo").send_keys(meterNo)
# print('1')
browser.find_elements_by_class_name('ext_btn')[0].click()
browser.implicitly_wait(100)
# print('2')
browser.switch_to_default_content()
sleep(2)
selectOptn.click()
browser.implicitly_wait(100)
selectBtn.click()
browser.implicitly_wait(100)
generateBtn.click()
browser.implicitly_wait(100)
browser.find_element_by_xpath('/html/body/div[7]/div[2]/div[2]/div/div/div/div[1]/table/tbody/tr/td[1]/table/tbody/tr/td[1]/table/tbody/tr[2]/td[2]/em/button').click()
sleep(2)
browser.switch_to.frame(browser.find_element_by_id('openwin'))
serial = browser.find_element_by_xpath('/html/body/table/tbody/tr[1]/td/table/tbody/tr[14]').text
print("Token: ", serial)
sequence = browser.find_element_by_xpath('/html/body/table/tbody/tr[1]/td/table/tbody/tr[11]').text
print("Sequence: ", sequence[10:len(sequence)])
ws1.cell(row = indent+1+x, column = 3).value = sequence[10:len(sequence)]
ws1.cell(row = indent+1+x, column = 4).value = serial
ws1.cell(row = indent+1+x, column = 5).value = 'Done'
wb.save(os.path.join(os.getcwd(),excel_file))
print('Ends : ', x+1)
browser.close() | Himu1234/web-automation-chandpur | prepaid_token_generation_xlsx.py | prepaid_token_generation_xlsx.py | py | 3,524 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"lin... |
25399032626 | import re
from csv import reader
from colorama import init, Fore
# Insert the actual exploits in searchsploit in the database
def update_database(exploit_database, mycursor):
print(Fore.BLUE + "Updating database...")
# Read the CSV to get the basic information
with open('/usr/share/exploitdb/files_exploits.csv','r') as read_obj:
# Read the CSV and skip the first row (headers)
csv_reader = reader(read_obj)
next(csv_reader)
# Insert each row in the table
for row in csv_reader:
query = """INSERT IGNORE INTO Exploits (ID, File, Description, Date, Author, Type, Platform, Port, SellerLink, SoftwareLink, Version, Tested, CVE)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
values = (row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7])
# To get more information about the exploit
values = search_content(row[1], values)
mycursor.execute(query, values)
exploit_database.commit()
print(Fore.GREEN + "Database update")
# Search the exploit content to find more information about it.
def search_content(exploit_path, values):
# Add the root path to the exploit path
exploit_path = "/usr/share/exploitdb/" + exploit_path
# Specific variables to look for within each exploit
seller_link = ""
software_link = ""
version = ""
tested = ""
CVE = ""
# Booleans to control the search
isEmptyVendor = True
isEmptySoftware = True
isEmptyVersion = True
isEmptyTested = True
isEmptyCVE = True
# Open the file to read its content
with open(exploit_path, 'r') as exploit:
# Get all the lines of the file
data = exploit.read().splitlines()
# Iterate through them to find the key words and, after cleaning it, store them
for line in data:
# Search and check the vendor link
if isEmptyVendor:
if re.search('[Vv]endor [Hh]omepage', line):
if (re.split('[Vv]endor [Hh]omepage', line)[1].strip().startswith(':')):
seller_link = clean_characters(line,'[Vv]endor [Hh]omepage',':')
elif (re.split('[Vv]endor [Hh]omepage', line)[1].strip().startswith('-')):
seller_link = clean_characters(line,'[Vv]endor [Hh]omepage','-')
elif (re.split('[Vv]endor [Hh]omepage', line)[1].startswith(' ')):
seller_link = clean_white(line,'[Vv]endor [Hh]omepage')
isEmptyVendor = False
elif re.search('[Vv]endor', line):
if (re.split('[Vv]endor', line)[1].strip().startswith(':')):
seller_link = clean_characters(line,'[Vv]endor',':')
elif (re.split('[Vv]endor', line)[1].strip().startswith('-')):
seller_link = clean_characters(line,'[Vv]endor','-')
elif (re.split('[Vv]endor', line)[1].startswith(' ')):
seller_link = clean_white(line,'[Vv]endor')
isEmptyVendor = False
# Search and check the software link
if isEmptySoftware:
if re.search('[Ss]oftware [Ll]ink', line):
if (re.split('[Ss]oftware [Ll]ink', line)[1].strip().startswith(':')):
software_link = clean_characters(line,'[Ss]oftware [Ll]ink',':')
elif (re.split('[Ss]oftware [Ll]ink', line)[1].strip().startswith('-')):
software_link = clean_characters(line,'[Ss]oftware [Ll]ink','-')
elif (re.split('[Ss]oftware [Ll]ink', line)[1].startswith(' ')):
software_link = clean_white(line,'[Ss]oftware [Ll]ink')
isEmptySoftware = False
elif re.search('[Pp]roduct [Ww]eb [Pp]age', line):
if (re.split('[Pp]roduct [Ww]eb [Pp]age', line)[1].strip().startswith(':')):
software_link = clean_characters(line,'[Pp]roduct [Ww]eb [Pp]age',':')
elif (re.split('[Pp]roduct [Ww]eb [Pp]age', line)[1].strip().startswith('-')):
software_link = clean_characters(line,'[Pp]roduct [Ww]eb [Pp]age','-')
elif (re.split('[Pp]roduct [Ww]eb [Pp]age', line)[1].startswith(' ')):
software_link = clean_white(line,'[Pp]roduct [Ww]eb [Pp]age')
isEmptySoftware = False
# Search and check the affected version
if isEmptyVersion:
if re.search('[Vv]ersion', line):
if (re.split('[Vv]ersion', line)[1].strip().startswith(':')):
version = clean_characters(line,'[Vv]ersion',':')
elif (re.split('[Vv]ersion', line)[1].strip().startswith('-')):
version = clean_characters(line,'[Vv]ersion','-')
elif (re.split('[Vv]ersion', line)[1].startswith(' ')):
version = clean_white(line,'[Vv]ersion')
isEmptyVersion = False
# Search and check where it has been tested
if isEmptyTested:
if re.search('[Tt]ested [Oo]n', line):
if (re.split('[Tt]ested [Oo]n', line)[1].strip().startswith(':')):
tested = clean_characters(line,'[Tt]ested [Oo]n',':')
elif (re.split('[Tt]ested [Oo]n', line)[1].strip().startswith('-')):
tested = clean_characters(line,'[Tt]ested [Oo]n','-')
elif (re.split('[Tt]ested [Oo]n', line)[1].startswith(' ')):
tested = clean_white(line,'[Tt]ested [Oo]n')
isEmptyTested = False
# Search and check the CVE
if isEmptyCVE:
if line.__contains__('CVE ID'):
if (line.partition('CVE ID')[2].strip().startswith(':')):
CVE = clean_characters(line,'CVE ID',':')
elif (line.partition('CVE ID')[2].strip().startswith('-')):
CVE = clean_characters(line,'CVE ID','-')
elif (line.partition('CVE ID')[2].startswith(' ')):
CVE = clean_white(line,'CVE ID')
isEmptyCVE = False
elif line.__contains__('CVE'):
if (line.partition('CVE')[2].strip().startswith(':')):
CVE = clean_characters(line,'CVE',':')
elif (line.partition('CVE')[2].strip().startswith('-')):
CVE = clean_characters(line,'CVE','-')
elif (line.partition('CVE')[2].startswith(' ')):
CVE = clean_white(line,'CVE')
isEmptyCVE = False
# Add the new values to the values tuple
values = values + (seller_link,)
values = values + (software_link,)
values = values + (version,)
values = values + (tested,)
values = values + (CVE,)
return values
# Clean with characters
def clean_characters(line, word, character):
if (word == 'CVE' or word == 'CVE ID'):
return line.partition(word)[2].split(character,1)[1].translate({ord(i): None for i in '[]'}).strip()
else:
return re.split(word, line)[1].split(character,1)[1].translate({ord(i): None for i in '[]'}).strip()
# Clean with white space
def clean_white(line, word):
if (word == 'CVE' or word == 'CVE ID'):
return line.partition(word)[2].translate({ord(i): None for i in '[]'}).strip()
else:
return re.split(word, line)[1].translate({ord(i): None for i in '[]'}).strip()
| alvaroreinaa/Can-You-EXPLOIT-It | update_database.py | update_database.py | py | 7,870 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "colorama.Fore.BLUE",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "csv.reader",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "colorama.Fore.GREEN",
... |
20880620842 | #@title Установка модуля РЈРР
from PIL import Image
from pathlib import Path
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import Input, Dense, Conv2D, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from IPython import display as ipd
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import seaborn as sns
import gdown
import zipfile
import os
import random
import time
import gc
sns.set(style='darkgrid')
seed_value = 12
random.seed(seed_value)
np.random.seed(seed_value)
tf.random.set_seed(seed_value)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class AccuracyCallback(tf.keras.callbacks.Callback):
def __init__(self):
self.train_acc = []
self.val_acc = []
self.train_loss = []
self.val_loss = []
self.times = []
def plot_graph(self):
plt.figure(figsize=(20, 14))
plt.subplot(2, 2, 1)
plt.title('Точность', fontweight='bold')
plt.plot(self.train_acc, label='Точность на обучащей выборке')
plt.plot(self.val_acc, label='Точность на проверочной выборке')
plt.xlabel('РРїРѕС…Р° обучения')
plt.ylabel('Доля верных ответов')
plt.legend()
plt.show()
def on_epoch_begin(self, epoch, logs=None):
self.start_time = time.time()
def on_epoch_end(self, epoch, logs=None):
self.train_acc.append(logs['accuracy'])
self.val_acc.append(logs['val_accuracy'])
self.train_loss.append(logs['loss'])
self.val_loss.append(logs['val_loss'])
t = round(time.time() - self.start_time, 1)
self.times.append(t)
if logs['val_accuracy'] > self.accuracymax:
self.accuracymax = logs['val_accuracy']
self.idxmax = epoch
print(f'РРїРѕС…Р° {epoch+1}'.ljust(10)+ f'Время обучения: {t}c'.ljust(25) + f'Точность РЅР° обучающей выборке: {bcolors.OKBLUE}{round(logs["accuracy"]*100,1)}%{bcolors.ENDC}'.ljust(50) +f'Точность РЅР° проверочной выборке: {bcolors.OKBLUE}{round(logs["val_accuracy"]*100,1)}%{bcolors.ENDC}')
self.cntepochs += 1
def on_train_begin(self, logs):
self.idxmax = 0
self.accuracymax = 0
self.cntepochs = 0
def on_train_end(self, logs):
ipd.clear_output(wait=True)
for i in range(self.cntepochs):
if i == self.idxmax:
print('\33[102m' + f'РРїРѕС…Р° {i+1}'.ljust(10)+ f'Время обучения: {self.times[i]}c'.ljust(25) + f'Точность РЅР° обучающей выборке: {round(self.train_acc[i]*100,1)}%'.ljust(41) +f'Точность РЅР° проверочной выборке: {round(self.val_acc[i]*100,1)}%'+ '\033[0m')
else:
print(f'РРїРѕС…Р° {i+1}'.ljust(10)+ f'Время обучения: {self.times[i]}c'.ljust(25) + f'Точность РЅР° обучающей выборке: {bcolors.OKBLUE}{round(self.train_acc[i]*100,1)}%{bcolors.ENDC}'.ljust(50) +f'Точность РЅР° проверочной выборке: {bcolors.OKBLUE}{round(self.val_acc[i]*100,1)}%{bcolors.ENDC}' )
self.plot_graph()
class TerraDataset:
bases = {
'Молочная_продукция' : {
'url': 'https://storage.yandexcloud.net/terraai/sources/milk.zip',
'info': 'Вы скачали базу с изображениями бутылок молока. База содержит 1500 изображений трех категорий: «Parmalat», «Кубанская буренка», «Семейный формат»',
'dir_name': 'milk_ds',
'task_type': 'img_classification',
'size': (96, 53),
},
'Пассажиры_автобуса' : {
'url': 'https://storage.yandexcloud.net/terraai/sources/bus.zip',
'info': 'Вы скачали базу с изображениями пассажиров автобуса. База содержит 9081 изображение двух категорий: «Входящие пассажиры», «Выходящие пасажиры»',
'dir_name': 'passengers',
'task_type': 'img_classification',
'size': (128, 64),
},
'Возгорания' : {
'url': 'https://storage.yandexcloud.net/terraai/sources/fire.zip',
'info': 'Вы скачали базу с изображениями возгораний. База содержит 6438 изображение двух категорий: «Есть возгорание», «Нет возгорания»',
'dir_name': 'fire',
'task_type': 'img_classification',
'size': (96, 76),
},
'авто' : {
'url': 'https://storage.yandexcloud.net/aiueducation/Intensive/cars.zip',
'info': 'Вы скачали базу с изображениями марок авто. База содержит 3427 изображений трех категорий: «Феррари», «Мерседес», «Рено»',
'dir_name': 'car',
'task_type': 'img_classification',
'size': (54, 96),
},
'майонез' : {
'url': 'https://storage.yandexcloud.net/aiueducation/Intensive/mayonnaise.zip',
'info': 'Вы скачали базу с изображениями брендов майонеза. База содержит 150 изображений трех категорий: «ЕЖК», «Махеев», «Ряба»',
'dir_name': 'mayonesse',
'task_type': 'img_classification',
'size': (96, 76),
},
}
def __init__(self, name):
'''
parameters:
name - название датасета
'''
self.base = self.bases[name]
self.sets = None
self.classes = None
def load(self):
'''
функция загрузки датасета
'''
print(f'{bcolors.BOLD}Загрузка датасета{bcolors.ENDC}',end=' ')
# Загурзка датасета из облака
fname = gdown.download(self.base['url'], None, quiet=True)
if Path(fname).suffix == '.zip':
# Распаковка архива
with zipfile.ZipFile(fname, 'r') as zip_ref:
zip_ref.extractall(self.base['dir_name'])
# Удаление архива
os.remove(fname)
# Вывод информационного блока
print(f'{bcolors.OKGREEN}Ok{bcolors.ENDC}')
print(f'{bcolors.OKBLUE}Рнфо:{bcolors.ENDC}')
print(f' {self.base["info"]}')
return self.base['task_type']
def samples(self):
'''
Функция визуализации примеров
'''
# Визуализация датасета изображений для задачи классификации
if self.base['task_type'] == 'img_classification':
# Получение списка классов (названия папок в директории)
self.classes = sorted(os.listdir(self.base['dir_name']))
# Построение полотная визуализации
f, ax = plt.subplots(len(self.classes), 5, figsize=(24, len(self.classes) * 4))
for i, class_ in enumerate(self.classes):
# Выбор случайного изображения
for j in range(5):
random_image = random.choice(
os.listdir(os.path.join(
self.base['dir_name'],
class_)))
img = Image.open(os.path.join(
self.base['dir_name'],
class_,
random_image))
ax[i, j].imshow(img)
ax[i, j].axis('off')
ax[i, j].set_title(class_)
plt.show()
def create_sets(self):
'''
Функция создания выборок
'''
x_train = []
y_train = []
x_test = []
y_test = []
print(f'{bcolors.BOLD}Создание наборов данных для обучения модели{bcolors.ENDC}', end=' ')
# Создание выборок для задачи классификации изображений
if self.base['task_type'] == 'img_classification':
# Получение списка директорий
self.classes = sorted(os.listdir(self.base['dir_name']))
counts = []
# Проход по всем папкам директории (по всем классам)
for j, d in enumerate(self.classes):
# Получение списка всех изображений очередного класса
files = sorted(os.listdir(os.path.join(self.base['dir_name'], d)))
# Параметр разделения выборок
counts.append(len(files))
count = counts[-1] * .9
# Проход по всем изображениям очередного класса
for i in range(len(files)):
# Загрузка очередного изображения
sample = np.array(image.load_img(os.path.join(
self.base['dir_name'],
d,
files[i]), target_size=self.base['size']))
# Добавление элемента в тестовую или проверочную выборку
if i<count:
x_train.append(sample)
y_train.append(j)
else:
x_test.append(sample)
y_test.append(j)
self.sets = (np.array(x_train)/255., np.array(y_train)), (np.array(x_test)/255., np.array(y_test))
# Вывод финальной информации
print(f'{bcolors.OKGREEN}Ok{bcolors.ENDC}')
print()
print(f'Размер созданных выборок:')
print(f' Обучающая выборка: {self.sets[0][0].shape}')
print(f' Метки обучающей выборки: {self.sets[0][1].shape}')
print(f' Проверочная выборка: {self.sets[1][0].shape}')
print(f' Метки проверочной выборки: {self.sets[1][1].shape}')
print()
print(f'Распределение по классам:')
f, ax =plt.subplots(1,2, figsize=(16, 5))
ax[0].bar(self.classes, np.array(counts)*0.9)
ax[0].set_title('Обучающая выборка')
ax[1].bar(self.classes, np.array(counts)*0.1, color='g')
ax[1].set_title('Проверочная выборка')
plt.show()
class TerraModel:
def __init__(self, task_type, trds):
self.model = None
self.task_type = task_type
self.trds = trds
@staticmethod
def create_layer(params):
'''
Функция создания слоя
'''
activation = 'relu'
params = params.split('-')
# Добавление входного слоя
if params[0].lower() == 'РІС…РѕРґРЅРѕР№':
return Input(shape=eval(params[1]))
# Добавление полносвязного слоя
if params[0].lower() == 'полносвязный':
if len(params)>2:
activation = params[2]
return Dense(eval(params[1]), activation=activation)
# Добавление выравнивающего слоя
if params[0].lower() == 'выравнивающий':
return Flatten()
# Добавление сверточного слоя (Conv2D)
if params[0].lower() == 'сверточный2д':
if len(params)>3:
activation = params[3]
return Conv2D(eval(params[1]), eval(params[2]), activation=activation, padding='same')
def create_model(self, layers):
'''
Функция создания нейронной сети
parameters:
layers - слои (текстом)
'''
if self.task_type=='img_classification':
layers += '-softmax'
layers = layers.split()
# Создание входного слоя
inp = self.create_layer(f'РІС…РѕРґРЅРѕР№-{self.trds.sets[0][0].shape[1:]}')
# Создание первого слоя
x = self.create_layer(layers[0]) (inp)
# Создание остальных слоев
for layer in layers[1:]:
x = self.create_layer(layer) (x)
self.model = Model(inp, x)
def train_model(self, epochs, use_callback=True):
'''
Функция обучения нейронной сети
parameters:
epochs - количество эпох
'''
# Обучение модели классификации изображений
if self.task_type=='img_classification':
self.model.compile(loss='sparse_categorical_crossentropy', optimizer = Adam(0.0001), metrics=['accuracy'])
accuracy_callback = AccuracyCallback()
callbacks = []
if use_callback:
callbacks = [accuracy_callback]
history = self.model.fit(self.trds.sets[0][0], self.trds.sets[0][1],
batch_size = self.trds.sets[0][0].shape[0]//25,
validation_data=(self.trds.sets[1][0], self.trds.sets[1][1]),
epochs=epochs,
callbacks=callbacks,
verbose = 0)
return history
def test_model(self):
'''
Функция тестирования модели
'''
# Тестирование модели классификации изображений
if self.task_type=='img_classification':
for i in range(10):
number = np.random.randint(self.trds.sets[1][0].shape[0])
sample = self.trds.sets[1][0][number]
print('Тестовое изображение:')
plt.imshow(sample) # Выводим изображение из тестового набора с заданным индексом
plt.axis('off') # Отключаем оси
plt.show()
pred = self.model.predict(sample[None, ...])[0]
max_idx = np.argmax(pred)
print()
print('Результат предсказания модели:')
for i in range(len(self.trds.classes)):
if i == max_idx:
print(bcolors.BOLD, end='')
print(f'Модель распознала класс «{self.trds.classes[i]}» на {round(100*pred[i],1)}%{bcolors.ENDC}')
print('---------------------------')
print('Правильный ответ: ',end='')
if max_idx == self.trds.sets[1][1][number]:
print(bcolors.OKGREEN, end='')
else:
print(bcolors.FAIL, end='')
print(self.trds.classes[self.trds.sets[1][1][number]],end=f'{bcolors.ENDC}\n')
print('---------------------------')
print()
print()
class TerraIntensive:
def __init__(self):
self.trds = None
self.trmodel = None
self.task_type = None
def load_dataset(self, ds_name):
self.trds = TerraDataset(ds_name)
self.task_type = self.trds.load()
def samples(self):
self.trds.samples()
def create_sets(self):
self.trds.create_sets()
def create_model(self, layers):
print(f'{bcolors.BOLD}Создание модели нейронной сети{bcolors.ENDC}', end=' ')
self.trmodel = TerraModel(self.task_type, self.trds)
self.trmodel.create_model(layers)
print(f'{bcolors.OKGREEN}Ok{bcolors.ENDC}')
def train_model(self, epochs):
self.trmodel.train_model(epochs)
def test_model(self):
self.trmodel.test_model()
def train_model_average(self, layers, cnt=10):
if self.task_type == 'img_classification':
print(f'{bcolors.BOLD}Определение среднего показателя точности модели на {cnt} запусках{bcolors.ENDC}')
print()
average_accuracy = []
average_val_accuracy = []
times=[]
for i in range(cnt):
start_time = time.time()
self.trmodel.create_model(layers)
history = self.trmodel.train_model(20, False).history
average_accuracy.append(np.max(history['accuracy']))
average_val_accuracy.append(np.max(history['val_accuracy']))
t = round(time.time() - start_time, 1)
times.append(t)
print(f'Запуск {i+1}'.ljust(10)+ f'Время обучения: {t}c'.ljust(25) + f'Точность на обучающей выборке: {bcolors.OKBLUE}{round(average_accuracy[-1]*100,1)}%{bcolors.ENDC}'.ljust(50) +f'Точность на проверочной выборке: {bcolors.OKBLUE}{round(average_val_accuracy[-1]*100,1)}%{bcolors.ENDC}')
gc.collect()
ipd.clear_output(wait=True)
print(f'{bcolors.BOLD}Определение среднего показателя точности модели на {cnt} запусках{bcolors.ENDC}')
print()
argmax_idx = np.argmax(average_val_accuracy)
for i in range(cnt):
if i == argmax_idx:
print('\33[102m' + f'Запуск {i+1}'.ljust(10)+ f'Время обучения: {times[i]}c'.ljust(25) + f'Точность на обучающей выборке: {round(average_accuracy[i]*100,1)}%'.ljust(41) +f'Точность на проверочной выборке: {round(average_val_accuracy[i]*100,1)}%'+ '\033[0m')
else:
print(f'Запуск {i+1}'.ljust(10)+ f'Время обучения: {times[i]}c'.ljust(25) + f'Точность на обучающей выборке: {bcolors.OKBLUE}{round(average_accuracy[i]*100,1)}%{bcolors.ENDC}'.ljust(50) +f'Точность на проверочной выборке: {bcolors.OKBLUE}{round(average_val_accuracy[i]*100,1)}%{bcolors.ENDC}' )
print()
print(f'{bcolors.BOLD}Средняя точность на обучающей выборке: {bcolors.ENDC}{round(np.mean(average_accuracy[i])*100,1)}%')
print(f'{bcolors.BOLD}Максимальная точность на обучающей выборке: {bcolors.ENDC}{round(np.max(average_accuracy[i])*100,1)}%')
print(f'{bcolors.BOLD}Средняя точность на проверочной выборке: {round(np.mean(average_val_accuracy[i])*100,1)}%')
print(f'{bcolors.BOLD}Максимальная точность на проверочной выборке: {round(np.max(average_val_accuracy[i])*100,1)}%')
terra_ai = TerraIntensive() | alexfeklin1234/neural_network | yandex_milk_data/yandex_milk.py | yandex_milk.py | py | 26,223 | python | uk | code | 0 | github-code | 6 | [
{
"api_name": "seaborn.set",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_nu... |
26632895276 | import pygame
from setting import *
from bullet import Bullet
class Player(pygame.sprite.Sprite):
#初期化(元グループ、初期位置x、初期位置y)
def __init__(self, groups, x, y, enemy_group):
super().__init__(groups)
#敵グループ
self.enemy_group = enemy_group
#画面取得
self.screen = pygame.display.get_surface()
#画像取得
self.image_list = []
for i in range(3):
#tmp_image = pygame.image.load(f'assets/img/player/{i}.png')
tmp_image = pygame.image.load(player_image_path + str(i) + image_extension)
self.image_list.append(tmp_image)
#画像設定
#self.image = pygame.Surface((50,50))
#self.image.fill(COLOR_RED)
self.image_index = player_image_index_straight
self.update_image()
#自機を載せる台車
self.rect = self.image.get_rect(center = (x,y))
#移動方向初期化
self.direction = pygame.math.Vector2()
#移動速度取得
self.speed = player_speed
#体力
self.health = player_health
self.alive = True
#効果音
self.shot_sound = pygame.mixer.Sound(shot_se_path)
self.shot_sound.set_volume(se_volume)
#弾グループ設定
self.bullet_group = pygame.sprite.Group()
#弾発射中
self.fire = False
self.cooldown_timer = 0
##表示の更新
def update(self):
self.input()
self.move()
self.update_image()
#print(str(self.direction) + "-" + str(self.rect))
#弾描画
self.bullet_cooldown()
self.bullet_group.draw(self.screen)
self.bullet_group.update()
#体力
self.collision_enemy()
self.check_alive()
#デバッグ用
#print('b:' + str(self.bullet_group))
#入力キー取得
def input(self):
key = pygame.key.get_pressed()
#上下キー
if key[pygame.K_UP]:
self.direction.y = -1
elif key[pygame.K_DOWN]:
self.direction.y = 1
else:
self.direction.y = 0
#左右キー
if key[pygame.K_LEFT]:
self.direction.x = -1
self.image_index = player_image_index_left
elif key[pygame.K_RIGHT]:
self.direction.x = 1
self.image_index = player_image_index_right
else:
self.direction.x = 0
self.image_index = player_image_index_straight
#zキー(弾発射)
if key[pygame.K_z] and not self.fire:
bullet = Bullet(self.bullet_group, self.rect.centerx, self.rect.top)
self.fire = True
self.shot_sound.play()
#移動処理
def move(self):
#画面端チェック(画面端での移動速度修正)
self.check_edge_screen()
#移動速度の平準化
if self.direction.magnitude() != 0:
self.direction = self.direction.normalize()
#X座標移動
self.rect.x += self.direction.x * self.speed
self.check_off_screen("x")
#y座標移動
self.rect.y += self.direction.y * self.speed
self.check_off_screen("y")
#画面端チェック
#画面端でキーが押されたらそちらの方向への移動を0に
def check_edge_screen(self):
if self.rect.left <= 0 and self.direction.x < 0:
self.direction.x = 0
if self.rect.right >= screen_width and self.direction.x > 0:
self.direction.x = 0
if self.rect.top <= 0 and self.direction.y < 0:
self.direction.y = 0
if self.rect.bottom >= screen_height and self.direction.y > 0:
self.direction.y = 0
#画面外チェック
#画面外に出そうな場合は座標を修正(画面端チェックだけでは微妙にはみ出すため残す)
def check_off_screen(self, vector):
if vector == "x":
if self.rect.left < 0:
self.rect.left = 0
if self.rect.right > screen_width:
self.rect.right = screen_width
if vector == "y":
if self.rect.top < 0:
self.rect.top = 0
if self.rect.bottom > screen_height:
self.rect.bottom = screen_height
#TODO:画面端で斜め移動しようとしたとき、x成分y成分の移動速度は斜め移動の値のままのため、遅くなっている。
#画像の更新
def update_image(self):
self.pre_image = self.image_list[int(self.image_index)]
self.image = pygame.transform.scale(self.pre_image, player_image_size)
#弾クールダウン処理
def bullet_cooldown(self):
if self.fire:
self.cooldown_timer += 1
if self.cooldown_timer > bullet_cooldown_time:
self.fire = False
self.cooldown_timer = 0
#敵との当たり判定
def collision_enemy(self):
for enemy in self.enemy_group:
if self.rect.colliderect(enemy.rect) and enemy.alive:
self.health -= enemy_power
self.check_health()
def check_health(self):
if self.health <= 0:
self.alive = False
#生存確認
def check_alive(self):
if not self.alive:
self.kill()
| shu0411/training | python/trainingEnv/shooting/player.py | player.py | py | 5,639 | python | ja | code | 0 | github-code | 6 | [
{
"api_name": "pygame.sprite",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.get_surface",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pyga... |
42060446091 | import pygame as pg
from settings import *
class Bullet(pg.sprite.Sprite):
def __init__(self, groups):
self.groups = groups
pg.sprite.Sprite.__init__(self, self.groups)
class PlayerBullet(Bullet):
def __init__(self, game, x, y):
self.game = game
self.groups = self.game.sprites, self.game.bullets, self.game.player_bullets
Bullet.__init__(self, self.groups)
self.image = self.game.spritesheet.get_image(60, 0, 4, 16, GREEN)
self.rect = self.image.get_rect(center=(x, y + PLAYER_BULLET_HEIGHT))
self.mask = pg.mask.from_surface(self.image)
def update(self):
if self.rect.top - 4 < TOP_SPACING:
self.kill()
self.rect.y -= PLAYER_BULLET_SPEED
class MobBullet(Bullet):
def __init__(self, game, x, y):
self.game = game
self.groups = self.game.sprites, self.game.bullets, self.game.mob_bullets
Bullet.__init__(self, self.groups)
self.load_images()
self.last_updated = pg.time.get_ticks()
self.frame = 0
self.image = self.frames[0]
self.rect = self.image.get_rect(midbottom=(x, y + MOB_BULLET_HEIGHT))
def load_images(self):
self.frames = [
self.game.spritesheet.get_image(64, 224, 12, 28, WHITE),
self.game.spritesheet.get_image(76, 224, 12, 28, WHITE),
pg.transform.flip(self.game.spritesheet.get_image(64, 224, 12, 28, WHITE), True, False),
pg.transform.flip(self.game.spritesheet.get_image(76, 224, 12, 28, WHITE), True, False)
]
def animate(self):
now = pg.time.get_ticks()
if now - self.last_updated > MOB_BULLET_FRAME_RATE:
self.frame += 1
if self.frame == len(self.frames):
self.frame = 0
self.image = self.frames[self.frame]
self.last_updated = now
self.mask = pg.mask.from_surface(self.image)
def update(self):
if self.rect.bottom > HEIGHT - BOT_SPACING:
self.kill()
self.rect.y += MOB_BULLET_SPEED
self.animate()
| soupss/space-invaders | sprites/bullet.py | bullet.py | py | 2,099 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.sprite",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pyga... |
21077625640 | import json
import os
import requests
from get_token import GetToken
from log_setup import Logging
from program_data import PDApi
"""
NetApp / SolidFire
CPE
mnode support utility
"""
"""
Package service api calls
https://[mnodeip]/package-repository/1
"""
# set up logging
logmsg = Logging.logmsg()
# disable ssl warnings so the log doesn't fill up
requests.packages.urllib3.disable_warnings()
class Package:
def list_packages(repo):
""" List available packages """
url = f'{repo.base_url}/package-repository/1/packages/'
json_return = PDApi.send_get_return_json(repo, url, debug=repo.debug)
if json_return:
return json_return
def delete_package(repo, package_id):
""" Delete a package """
url = f'{repo.base_url}/package-repository/1/packages/{package_id}'
logmsg.debug(f'Sending DELETE {url}')
json_return = PDApi.send_delete_return_status(repo, url)
if json_return:
logmsg.info(f'{json_return["version"]}: {json_return["message"]}')
def upload_element_image(repo, updatefile):
""" upload a package
requires some special treatment with the api call. So it does not use PDApi.send_put
"""
token = GetToken(repo, True)
logmsg.info('Add upgrade image to package repository')
if os.path.exists(updatefile) != True:
logmsg.info(f'{updatefile} not found')
exit(1)
header = {"Accept": "application/json", "Prefer": "respond-async", "Content-Type": "application/octet-stream", "Authorization":f'Bearer {token.token}'}
url = f'{repo.base_url}/package-repository/1/packages'
session = requests.Session()
with open(updatefile, 'rb') as f:
try:
logmsg.debug(f'Sending PUT {url} {updatefile}')
logmsg.info(f'Loading {updatefile} into the package repository. This will take a few minutes')
response = session.post(url, headers=header, data=f, verify=False)
if response.status_code == 200 or response.status_code == 202:
logmsg.info('Upload successful')
logmsg.info(response.text)
response_json = json.loads(response.text)
else:
logmsg.info(f'Package upload fail with status {response.status_code}\n\t{response.text}')
except requests.exceptions.RequestException as exception:
logmsg.info("An exception occured. See /var/log/mnode-support-util.log for details")
logmsg.debug(exception)
logmsg.debug(f'{response}')
session.close()
return response_json
| solidfire/mnode-support-util | api_package_service.py | api_package_service.py | py | 2,719 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "log_setup.Logging.logmsg",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "log_setup.Logging",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "requests.packages.urllib3.disable_warnings",
"line_number": 26,
"usage_type": "call"
},
{
... |
39434540766 | # from sklearn.naive_bayes import MultinomialNB
# from sklearn.naive_bayes import GaussianNB
# from sklearn.cluster import KMeans
import pandas as pd
# from random import shuffle
import numpy as np
import os
# from sklearn.feature_extraction.text import CountVectorizer
# from sklearn.feature_extraction.text import TfidfTransformer
from nltk.corpus import stopwords
# from nltk.corpus import stopwords
# from nltk.tokenize import word_tokenize
# import nltk
import re
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import XMLParser
from lxml import etree
def obtener_text(path = str(), file = str()):
"""
Función que regresa el texto del archivo que se le pasa, preferentemente pasar la ruta relativa de donde se encuentra el archivo.
Funciona con los archivos #.review.post del corpus del mismo directorio.
Retorna el texto unicamente.
path = ruta relativa o completa del archivo seleccionado.
"""
with open(path + file, encoding='latin1', mode = 'r') as f:
text = f.read()
listas = text.split('\n')
test = []
for linea in listas:
aux = linea.split(' ')
try:
test.append(aux[1])
except:
pass
cad = ' '.join(test)
return cad
def normalizar(text = str()):
# nltk.download('stopwords')
'''
Funcion para normalizar el texto y eliminar stopwords, así como signos de puntuación, guiones bajos y demás caracteres que no sean texto, retorna la cadena limpia.
text : texto para normalizar
'''
stop_words = set(stopwords.words('spanish'))
lower_string = text.lower()
no_number_string = re.sub(r'\d+','',lower_string)
no_sub_ = re.sub('[\_]',' ', no_number_string)
no_punc_string = re.sub(r'[^\w\s]','', no_sub_)
no_wspace_string = no_punc_string.strip()
# no_wspace_string
lst_string = [no_wspace_string][0].split()
# print(lst_string)
no_stpwords_string=""
for i in lst_string:
if not i in stop_words:
no_stpwords_string += i+' '
no_stpwords_string = no_stpwords_string[:-1]
return no_stpwords_string
def get_rank (path = str(), file = str(), llave = 'rank'):
"""
En la función solo se tiene que pasar el path, más el
archivo del cual se quiera obtener el rank, o mejor dicho
la valoración que se obtuvo en la pelicula, el archivo a pasar tiene que
ser en formato .xml para que la función funcione de forma correcta,
retorna el valor entero que se puso en la pelicula.
path : ruta donde se encuentran los archivos xml
file : nombre del archivo el cual se va a obtener el valor
llave : atributo que se quiere, valor por defecto rank
"""
with open(path + file, mode = 'r', encoding= 'latin1') as f:
parser = etree.XMLParser(recover=True)
tree = ET.parse(f, parser=parser)
root = tree.getroot()
att = root.attrib
return int(att[llave])
def obtener_y (path = str(), file_pos = list(), file_xml = list()):
"""
Funcion hecha para obtener el mismo número de archivos xml y de review.pos, regresa el valor del archivo xml.
Retorna una lista.
path : Dirección donde se encuentra el corpus
file_pos : lista con los nombres del archivo review.pos
xml_file : lista con los nombres del archivo xml contenidas en el corpus
"""
file_of_x = list()
value_of_y = list()
for file in file_pos:
aux = file.split('.')
num = aux[0]
comp = str(num) + '.xml'
if comp in file_xml:
file_of_x.append(obtener_text(path,file))
value_of_y.append(get_rank(path, comp))
return file_of_x, value_of_y | svroo/PNL-Escom | Joder es cine/Modulo/text_proc.py | text_proc.py | py | 3,744 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "re.sub",
... |
72283436667 | import requests
import json
match = {
"Date": "21-01-2023",
"Form": "decent",
"Opposition": "tough",
"season": "middle",
"venue": "home",
"Previous match": "0",
"uEFa": "active"
}
#url = 'http://localhost:9696/predict'
url = 'https://klopp-s-liverp-prod-klopp-s-liverpool-hql7qt.mo4.mogenius.io/predict'
response = requests.post(url, json=match)
result = response.json()
print(result)
| Blaqadonis/klopps_liverpool | predict_test.py | predict_test.py | py | 416 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.post",
"line_number": 16,
"usage_type": "call"
}
] |
13876656972 | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
import boto3
import json
app = FastAPI()
origins = [
"https://ai.galaxychain.zone",
"https://galaxychain.zone",
"http://localhost:3000",
"https://galaxychain.zone",
"https://ai-api.galaxychain.zone",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Amazon SageMaker Runtime Client
session = boto3.Session()
sagemaker_runtime = session.client('sagemaker-runtime')
# Sagemaker endpoint name.
endpoint_name = 'sm-endpoint-gpt-j-6b'
class DataIn(BaseModel):
text: str
datas: dict
@app.get("/elb")
async def server_status():
response_body = {
'status': 200,
'data': "Healthy"
}
return jsonable_encoder(response_body)
@app.post('/gen-text')
async def text_generate(ai_params: DataIn):
response_body = {
'status': 100,
'data': ""
}
payload = {
'inputs': "",
'parameters': {
'max_length': 100,
'do_sample': True,
'no_repeat_ngram_size': 2,
'temperature': 0.75,
'top_k': 10,
'top_p': 0.95,
'early_stopping': True,
}
}
user_text = ai_params.dict()['text']
if user_text == "":
response_body['status'] = 400
response_body['data'] = "Please enter text."
return jsonable_encoder(response_body)
tempr_param = float(ai_params.dict()['datas']['randomness'])
if tempr_param < 0.01:
tempr_param = 0.01
top_k_param = int(ai_params.dict()['datas']['fluency'])
if top_k_param < 1:
top_k_param = 1
payload['inputs'] = user_text
payload['parameters']['temperature'] = tempr_param
payload['parameters']['top_k'] = top_k_param
try:
response = sagemaker_runtime.invoke_endpoint(
EndpointName=endpoint_name,
ContentType='application/json',
Body=json.dumps(payload)
)
result = json.loads(response['Body'].read().decode())
raw_text = result[0]['generated_text']
res_text = str(raw_text).replace(user_text, "").replace("\n", " ").replace('"', "")
response_body['status'] = 200
response_body['data'] = res_text
return jsonable_encoder(response_body)
except:
response_body['status'] = 503
response_body['data'] = "AI server is overloaded"
return jsonable_encoder(response_body) | galaxynetwork/story-ai-supporter | app.py | app.py | py | 2,629 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "fastapi.middleware.cors.CORSMiddleware",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "boto3.Session",
"line_number": 27,
"usage_type": "call"
},
{
"api_name"... |
43392192504 | import cv2
import numpy as np
from matplotlib import pyplot as plt
# 模版匹配
img = cv2.imread("fb.png", 0)
img2 = img.copy()
template = cv2.imread("zdbz.png", 0)
w,h = template.shape[::-1]
method = eval("cv2.TM_CCOEFF")
res = cv2.matchTemplate(img2, template ,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
print(min_val,max_val,min_loc,max_loc)
topLeft = max_loc
bottomRight = (topLeft[0]+w,topLeft[1]+h)
print(bottomRight)
| frebudd/python | 阴阳师副本自动化/副本自动化2.py | 副本自动化2.py | py | 450 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.matchTemplate",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.minMaxLoc",
"line_numbe... |
35426911515 | #!/usr/bin/python3
import numpy as np
import scipy.integrate
import matplotlib.pyplot as plt
def vdp(t,y):
"""calculate Van Der Pol Derivatives"""
# y is a tuple (y0,y1)
y0dot = y[1]
y1dot = (1-y[0]**2)*y[1]-y[0]
dydt = ( y0dot, y1dot )
return dydt
solution = scipy.integrate.solve_ivp(vdp, t_span=(0,20), y0=(0,2), method='RK45', rtol=1e-6)
t = solution.t
y0 = solution.y[0]
y1 = solution.y[1]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(t, y0, color='tab:blue', label='y1')
ax.plot(t, y1, color='tab:orange', label='y2')
ax.set_title('Solution of the van der Pol equation, mu=1')
ax.set_xlabel('time')
ax.set_ylabel('solution')
ax.legend()
plt.show()
| martinaoliver/GTA | ssb/m1a/numeric/Practical_full_solutions_jupyter/python_script_solutions/vanderpol_20191001.py | vanderpol_20191001.py | py | 695 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scipy.integrate.integrate.solve_ivp",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.integrate",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate",
"line_number": 15,
"usage_type": "name"
},
{
... |
130536507 | import numpy as np
import matplotlib.pyplot as plt
import pyRaven as rav
import emcee
import corner
from scipy.stats import norm
import scipy
from statistics import mode
def fitdata(param,DataPacket,guess):
'''
This function fits a set of LSD profiles using scipy's curve fit function.
Inputs:
param - input parameter dictionary
DataPacket - input DataPacket with real data
guess - array of guess values for kappa, vsini, and vmac. Ex: np.array([1.3,250,30])
Outputs:
parameters - array of fit parameters
covariance - covariance matrix of the fit
modelout - the best fit model
'''
def model(v,kappa,vsini,vmac):
'''
This function creates the line profile model that will be fit to the observed profile
Inputs:
kappa - value of kappa that the walker is on in parameter space
vsini - value of vsini that the walker is on in parameter space
v - velocity array of the actual data
Outputs:
f - line profile model using the weak field method at the walker's position in parameter space
'''
param['general']['vsini']=vsini
param['general']['vmac']=vmac
param['general']['logkappa']=np.log(kappa)
#pyRaven's weakfield disk integration function
model=rav.diskint2.analytical(param,False)
#interpolating the model to be size MCMC wants
f=np.interp(v,model['vel'],model['flux'])
return(f)
x=DataPacket.scaled.lsds[0].vel#+DataPacket.vrad[0]
y=DataPacket.scaled.lsds[0].specI
if DataPacket.nobs!=1:
for i in range(1,DataPacket.nobs):
x=np.append(x,DataPacket.scaled.lsds[i].vel)#+DataPacket.vrad[i])
y=np.append(y,DataPacket.scaled.lsds[i].specI)
parameters,covariance = scipy.optimize.curve_fit(model,x,y,guess)
modelout=model(x,parameters[0],parameters[1],parameters[2])
modelout=modelout[:DataPacket.scaled.lsds[0].vel.size]
return parameters,covariance,modelout
def fitdataMCMC(param,DataPacket,nsteps,guess):
'''
This function fits the LSD profile using MCMC
Inputs:
param - input parameter dictionary
DataPacket - input DataPacket with real data
nsteps - number of steps to run MCMC
guess - array of guess values for kappa, vsini, and vmac. Ex: np.array([1.3,250,30])
Outputs:
kappa - The average fitted value of kappa
vsini - The average fitted value of vsini
vmac - The average fitted value of vmac
'''
#def model(v,c,a,b,Ic):
# return(-c*np.exp(-0.5*np.power(v-a,2)/b**2)+Ic)
def model(v,kappa,vsini,vmac):
'''
This function creates the line profile model that will be fit to the observed profile
Inputs:
kappa - value of kappa that the walker is on in parameter space
vsini - value of vsini that the walker is on in parameter space
vmac - value of the vmac that the walker is on in parameter space
v - velocity array of the actual data
Outputs:
f - line profile model using the weak field method at the walker's position in parameter space
'''
param['general']['vsini']=vsini
param['general']['vmac']=vmac
param['general']['logkappa']=np.log(kappa)
#pyRaven's weakfield disk integration function
model=rav.diskint2.analytical(param,False)
#interpolating the model to be size MCMC wants
f=np.interp(v,model['vel'],model['flux'])
return(f)
def lnprior(params):
'''
This function is used to set constraints on the parameter space the walkers are allowed in. I did this to try and save time, could probably use some tweaking.
Inputs:
params - list of walker parameters, in this code that is [kappa, vsini]
Outputs:
-infinity - if kappa and/or vsini are out of the specified ranges
0 - otherwise
'''
kappa,vsini,vmac=params
if kappa<=0.0 or kappa>=10.0 or vsini >= 500.0 or vsini<=0.0 or vmac<=0.0 or vmac>=100.0:
return(-np.inf)
else:
return(0.0)
def lnlike(params,v,I,Ierr):
'''
Inputs:
params -list of walker parameters, in this code that is [kappa, vsini]
v - velocity array of the data
I - stokes I of the actual data
Ierr - uncertainty in the stokes I of the actual data
Outputs:
The log likelihood using a gaussian function
'''
kappa,vsini,vmac= params
m = model(v,kappa,vsini,vmac)
sigma2 = Ierr**2 #+m**2*np.exp(2*log_f)
return(-0.5 * np.sum((I - m) ** 2 / sigma2 + np.log(sigma2)))
def lnprob(params,v,I,Ierr):
'''
Inputs:
params - list of walker parameters, in this code that is [kappa, vsini]
v - velocity array of the data
I - stokes I of the actual data
Ierr - uncertainty in the stokes I of the actual data
Outputs:
log probability. Used to determine how good a fit the model is
'''
prior=lnprior(params)
if not np.isfinite(prior):
return(-np.inf)
else:
return(prior+lnlike(params,v,I,Ierr))
# Set up the convergence diagonstic plots. At the final step we want all the walkers to be very close together, i.e a straight line at the end.
fig, ax = plt.subplots(3,1,figsize=(15,5))
ax[0].set_title('Convergence Diagnostic Plots')
fig1, ax1 = plt.subplots(1,1,figsize=(5,5)) #sets up the send set of plots
#for i in range(1):
kappa=np.array([])
#log_f=np.array([])
vsini=np.array([])
vmac=np.array([])
Ic=1.0 #defining the continuum value of the real data
vsiniin=DataPacket.vsini #defining the vsini listed in the data packet
v=DataPacket.scaled.lsds[0].vel#+DataPacket.vrad[0] #defining the velocity array of the data
I=DataPacket.scaled.lsds[0].specI #defining the stokes I array of the data
Ierr=DataPacket.scaled.lsds[0].specSigI #defining the stokes I error of the data
for i in range(1,DataPacket.nobs):
v=np.append(v,DataPacket.scaled.lsds[i].vel)#+DataPacket.vrad[i] #defining the velocity array of the data
I=np.append(I,DataPacket.scaled.lsds[i].specI) #defining the stokes I array of the data
Ierr=np.append(Ierr,DataPacket.scaled.lsds[i].specSigI) #defining the stokes I error of the data
ndim = 3 #number of parameters to fit
nwalkers= 10 * ndim #number of walkers (10/parameter)
pguess = guess #initial guess for kappa and vsini
positions = np.zeros((nwalkers,ndim)) #set up walker position array
positions[:,0] = np.abs(np.random.randn(nwalkers)*pguess[0]*0.1+pguess[0]) #set the inital positions of the kappa walkers to be a random distribution around the guess
positions[:,1] = np.random.randn(nwalkers)*pguess[1]*0.1+pguess[1] #set the initial positions of the vsini walkers
positions[:,2] = np.random.randn(nwalkers)*pguess[2]*2
sampler = emcee.EnsembleSampler(nwalkers,ndim,lnprob,args=(v,I,Ierr)) #set up MCMC. Note that the args keyword contains the real data arrays
pos,prob,state = sampler.run_mcmc(positions, nsteps,progress=True) #runs MCMC for the specified number of steps
#make the first set of plots
res = [ax[j].plot(sampler.chain[:,:,j].T, '-', color='k', alpha=0.3) for j in range(3)]
res = [ax[j].axhline(pguess[j]) for j in range(3)]
#save the walker positions at each step (for diagnostics)
#kappa=np.append(kappa,np.mean(sampler.flatchain[int(2*nsteps/3):], axis=0)[0])
#vsini=np.append(vsini,np.mean(sampler.flatchain[int(2*nsteps/3):], axis=0)[1])
#vmac=np.append(vmac,np.mean(sampler.flatchain[int(2*nsteps/3):], axis=0)[2])
kappa=sampler.flatchain[int(2*100/3):][:,0]
vsini=sampler.flatchain[int(2*100/3):][:,1]
vmac=sampler.flatchain[int(2*100/3):][:,2]
bins=20
bin_means = (np.histogram(kappa, bins, weights=kappa)[0]/np.histogram(kappa, bins)[0])
kappa=bin_means[np.histogram(kappa, bins)[0]==np.histogram(kappa, bins)[0].max()][0]
bin_means = (np.histogram(vsini, bins, weights=vsini)[0]/np.histogram(vsini, bins)[0])
vsini=bin_means[np.histogram(vsini, bins)[0]==np.histogram(vsini, bins)[0].max()][0]
bin_means = (np.histogram(vmac, bins, weights=vmac)[0]/np.histogram(vmac, bins)[0])
vmac=bin_means[np.histogram(vmac, bins)[0]==np.histogram(vmac, bins)[0].max()][0]
#log_f=np.append(log_f,np.mean(sampler.flatchain, axis=0)[1])
#make the second set of plots
ax1.plot(DataPacket.scaled.lsds[0].vel,model(DataPacket.scaled.lsds[0].vel, kappa,vsini,vmac))
ax1.plot(v,I)
print('kappa: {} | vsini: {} | vmac: {}'.format(kappa,vsini,vmac))
#make the corner plots
flat_samples = sampler.get_chain(discard=0, thin=5, flat=True)
labels = ["kappa","vsini",'vmac']
corner.corner(
flat_samples, labels=labels)
return(kappa,vsini,vmac,sampler.flatchain)
def fitdata_novsini(param,DataPacket,guess):
'''
This function fits a set of LSD profiles using scipy's curve fit function.
Inputs:
param - input parameter dictionary
DataPacket - input DataPacket with real data
guess - array of guess values for kappa and vmac. Ex: np.array([1.3,30])
Outputs:
parameters - array of fit parameters
covariance - covariance matrix of the fit
modelout - the best fit model
'''
def model(v,kappa,vmac):
'''
This function creates the line profile model that will be fit to the observed profile
Inputs:
kappa - value of kappa that the walker is on in parameter space
vmac - value of vmac that the walker is on in parameter space
v - velocity array of the actual data
Outputs:
f - line profile model using the weak field method at the walker's position in parameter space
'''
param['general']['vmac']=vmac
param['general']['logkappa']=np.log(kappa)
#pyRaven's weakfield disk integration function
model=rav.diskint2.analytical(param,False)
#interpolating the model to be size MCMC wants
f=np.interp(v,model['vel'],model['flux'])
return(f)
param['general']['vsini']=DataPacket.vsini
x=DataPacket.scaled.lsds[0].vel#+DataPacket.vrad[0]
y=DataPacket.scaled.lsds[0].specI
if DataPacket.nobs!=1:
for i in range(1,DataPacket.nobs):
x=np.append(x,DataPacket.scaled.lsds[i].vel)#+DataPacket.vrad[i])
y=np.append(y,DataPacket.scaled.lsds[i].specI)
parameters,covariance = scipy.optimize.curve_fit(model,x,y,guess)
modelout=model(x,parameters[0],parameters[1])
modelout=modelout[:DataPacket.scaled.lsds[0].vel.size]
return parameters,covariance,modelout | veropetit/pyRaven | fitparams.py | fitparams.py | py | 10,372 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.log",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pyRaven.diskint2.analytical",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pyRaven.diskint2",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "numpy.int... |
24347584300 | # # Categorize all issues
#
# To use: open with jupyter notebook/lab using jupytext and run all cells
# +
from getpass import getpass
from textwrap import dedent
from ipywidgets import Button, ToggleButtons, Output, VBox
from IPython.display import display, Markdown
import gitlab
# -
gl = gitlab.Gitlab(url="https://gitlab.kwant-project.org", private_token=getpass("Gitlab API token: "))
repo = gl.projects.get("zesje/zesje")
labels = repo.labels.list()
# +
label_dict = {tuple(label.name.split(": ")): label for label in labels if ": " in label.name}
categories = ["impact", "effort", "maintainability"]
degrees = ["low", "medium", "high"]
# +
description = Output()
selector = {
category: ToggleButtons(
options=[(degree, label_dict[category, degree]) for degree in degrees],
description=category,
label="medium",
style={"button_color": label_dict},
)
for category in categories
}
submit = Button(description="submit & next", icon="check")
current_issue = None
def submit_labels(issue):
other_labels = [i for i in issue.labels if ": " not in i]
issue.labels = [i.value.name for i in selector.values()] + other_labels
issue.save()
def render_issue(issue):
return Markdown(
dedent(
f"""### [{issue.title}]({issue.web_url})
{issue.description}
"""
)
)
def next_issue():
issues = repo.issues.list(all=True, state="opened")
for issue in issues:
issue_categories = {label.split(": ")[0]: label.split(": ")[1] for label in issue.labels if ": " in label}
already_labeled = len(issue_categories) == 3 and len(set(issue_categories)) == 3
if not already_labeled:
break
else:
submit.disabled = True
submit.text = "Done!"
for button in selector.values():
button.disabled = True
return None
description.clear_output(wait=True)
with description:
display(render_issue(issue))
for category, degree in issue_categories.items():
selector[category].label = degree
return issue
def submit_and_next(event):
global current_issue
if current_issue is not None:
submit_labels(current_issue)
current_issue = next_issue()
submit.on_click(submit_and_next)
VBox(children=[description] + list(selector.values()) + [submit])
| zesje/zesje | label_issues.py | label_issues.py | py | 2,370 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "gitlab.Gitlab",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "getpass.getpass",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "ipywidgets.Output",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "ipywidgets.ToggleButt... |
508734253 | from itertools import permutations
import cProfile
#
#make permutation of array
#
list = [1,2,3,4]
listPermutations = permutations(list)
for permutation in listPermutations:
print(permutation)
#
#count number of permutations
#
listPermutations = permutations(list)
count = 0
for permutation in listPermutations:
count += 1
print(len(list), count)
#
#check the performance and undestand
# how fast the space of permutations grows
#
def faculty(n):
if n <= 1:
return n
else:
return faculty(n-1)+n
def counter(n):
count = 0
for i in range(n):
count += 1
return count
cProfile.run("counter(faculty(10))") | sleevs/JSNSecurity | Permutations.py | Permutations.py | py | 695 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "itertools.permutations",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cProfile.run",
"line_number": 44,
"usage_type": "call"
}
] |
24794059633 | #!/usr/bin/env python3
#
import sys, argparse
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
def h2pgf(h):
""" Convert TH1 into pgfplot data with error bars
input: xmin xmax y ey
output: x ex y ey
ex = (xmax+xmin)/2
"""
nbins = h.GetNbinsX()
# print("# \\begin{axis}")
# print("# \\addplot[const plot mark mid, black, solid, no markers, error bars/.cd, y dir=both, y explicit, error mark=none]")
# print(" coordinates {")
print("x ex y ey")
for b in range(1, nbins+1):
x = h.GetBinCenter(b)
ex = h.GetBinWidth(b)/2.0
y = h.GetBinContent(b)
ey = h.GetBinError(b)
# print(x,ex,y,ey)
if y>0.0:
print(x, ex, y, ey)
def g2pgf(h):
""" Convert TGraph into pgfplot data """
N = h.GetN()
print("\\begin{axis}")
print("\\addplot[ultra thick]")
print(" coordinates {")
print("x ex y ey")
for b in range(N):
x = h.GetX()[b]
y = h.GetY()[b]
print(x, y)
print("};")
print("\\addlegendentry{TGraph};")
def main():
""" A script to convert TH1 and TGraph into a pgfplot format """
parser = argparse.ArgumentParser(description=main.__doc__, epilog='Homepage: https://github.com/kbat/mc-tools')
parser.add_argument('root', type=str, help='ROOT file')
parser.add_argument('hist', type=str, help='histogram name')
args = parser.parse_args()
f = ROOT.TFile(args.root)
f.ls()
h = f.Get(args.hist)
if h.InheritsFrom("TH1"):
h2pgf(h)
elif h.InheritsFrom("TGraph"):
g2pgf(h)
if __name__ == "__main__":
sys.exit(main())
| kbat/mc-tools | mctools/common/root2pgf.py | root2pgf.py | py | 1,629 | python | en | code | 38 | github-code | 6 | [
{
"api_name": "ROOT.PyConfig",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "ROOT.TFile",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"... |
74514085946 | #!/usr/bin/env python3
from __future__ import print_function
from __future__ import division
# System level imports
import sys
import os
import argparse
import logging
import time
import math
import numpy as np
import matplotlib.pyplot as plt
from numpy.core.defchararray import index
import controller2d
#import controller2d_AR as controller2d
import configparser
import local_planner
import behavioural_planner
import cv2
from math import sin, cos, pi, tan, sqrt
from utils import compute_middle_point
from agents import Agent
from sidewalk import point_in_sidewalks
from converter import Converter
import os
# Script level imports
sys.path.append(os.path.abspath(sys.path[0] + '/..'))
import live_plotter as lv # Custom live plotting library
from carla import sensor
from carla.client import make_carla_client, VehicleControl
from carla.settings import CarlaSettings
from carla.tcp import TCPConnectionError
from carla.controller import utils
from carla.sensor import Camera
from carla.image_converter import labels_to_array, depth_to_array, to_bgra_array
from carla.planner.city_track import CityTrack
from AVD_BP.carla_object_detector.carla_yolo_model.OD import load_model,predict,postprocess
from AVD_BP.carla_object_detector.carla_yolo_model.config import VEHICLE_TAG,PERSON_TAG
SERVER_HOST = "193.205.163.183"
SERVER_PORT = 6018
LOCAL_HOST = "localhost"
LOCAL_PORT = 2000
SIMULATION_PERFECT = False
###############################################################################
# CONFIGURABLE PARAMENTERS DURING EXAM
###############################################################################
PLAYER_START_INDEX = 15 #20 #89 #148 #91 # spawn index for player
DESTINATION_INDEX = 139 #40# 133 #61 #142 # Setting a Destination HERE
NUM_PEDESTRIANS = 500 # total number of pedestrians to spawn
NUM_VEHICLES = 500 # total number of vehicles to spawn
SEED_PEDESTRIANS = 0 # seed for pedestrian spawn randomizer
SEED_VEHICLES = 1 # seed for vehicle spawn randomizer
###############################################################################
ITER_FOR_SIM_TIMESTEP = 10 # no. iterations to compute approx sim timestep
WAIT_TIME_BEFORE_START = 1.00 # game seconds (time before controller start)
TOTAL_RUN_TIME = 5000.00 # game seconds (total runtime before sim end)
TOTAL_FRAME_BUFFER = 300 # number of frames to buffer after total runtime
CLIENT_WAIT_TIME = 3 # wait time for client before starting episode
# used to make sure the server loads
# consistently
DESIRED_SPEED = 5.0
WINDOWS_OS = os.name == 'nt'
WEATHER = "DEFAULT"
WEATHERID = {
"DEFAULT": 0,
"CLEARNOON": 1,
"CLOUDYNOON": 2,
"WETNOON": 3,
"WETCLOUDYNOON": 4,
"MIDRAINYNOON": 5,
"HARDRAINNOON": 6,
"SOFTRAINNOON": 7,
"CLEARSUNSET": 8,
"CLOUDYSUNSET": 9,
"WETSUNSET": 10,
"WETCLOUDYSUNSET": 11,
"MIDRAINSUNSET": 12,
"HARDRAINSUNSET": 13,
"SOFTRAINSUNSET": 14,
}
#SIMWEATHER = WEATHERID[WEATHER] # set simulation weather
FIGSIZE_X_INCHES = 8 # x figure size of feedback in inches
FIGSIZE_Y_INCHES = 8 # y figure size of feedback in inches
PLOT_LEFT = 0.1 # in fractions of figure width and height
PLOT_BOT = 0.1
PLOT_WIDTH = 0.8
PLOT_HEIGHT = 0.8
DIST_THRESHOLD_TO_LAST_WAYPOINT = 2.0 # some distance from last position before
# simulation ends
# Planning Constants
NUM_PATHS = 7
BP_LOOKAHEAD_BASE = 16.0 # m
BP_LOOKAHEAD_TIME = 1.0 # s
PATH_OFFSET = 1.5 # m
CIRCLE_OFFSETS = [-1.0, 1.0, 3.0] # m
CIRCLE_RADII = [1.5, 1.5, 1.5] # m
TIME_GAP = 1.0 # s
PATH_SELECT_WEIGHT = 10
A_MAX = 2.5 # m/s^2
SLOW_SPEED = 2.0 # m/s
STOP_LINE_BUFFER = 3.5 # m
LEAD_VEHICLE_LOOKAHEAD = 20.0 # m
LP_FREQUENCY_DIVISOR = 2 # Frequency divisor to make the
# local planner operate at a lower
# frequency than the controller
# (which operates at the simulation
# frequency). Must be a natural
# number.
# Path interpolation parameters
INTERP_MAX_POINTS_PLOT = 10 # number of points used for displaying
# selected path
INTERP_DISTANCE_RES = 0.01 # distance between interpolated points
# controller output directory
CONTROLLER_OUTPUT_FOLDER = os.path.dirname(os.path.realpath(__file__)) +\
'/controller_output/'
AGENTS_CHECK_RADIUS = 30
# Camera parameters
camera_parameters = {}
camera_parameters['x'] = 1.8
camera_parameters['y'] = 0.0
camera_parameters['z'] = 1.3
camera_parameters['pitch'] = 0.0
camera_parameters['roll'] = 0.0
camera_parameters['yaw'] = 0.0
camera_parameters['width'] = 224#200
camera_parameters['height'] = 224#200
camera_parameters['fov'] = 90
camera_parameters_bis = {}
camera_parameters_bis['x'] = 1.8
camera_parameters_bis['y'] = 0.0
camera_parameters_bis['z'] = 1.3
camera_parameters_bis['pitch'] = 0.0
camera_parameters_bis['roll'] = 0.0
camera_parameters_bis['yaw'] = 0.0
camera_parameters_bis['width'] = 224#200
camera_parameters_bis['height'] = 224#200
camera_parameters_bis['fov'] = 120
camera_parameters_view = {}
camera_parameters_view['x'] = -5.0
camera_parameters_view['y'] = 0.0
camera_parameters_view['z'] = 2.5
camera_parameters_view['pitch'] = -15.0
camera_parameters_view['roll'] = 0.0
camera_parameters_view['yaw'] = 0.0
camera_parameters_view['width'] = 500
camera_parameters_view['height'] = 500
camera_parameters_view['fov'] = 90
def rotate_x(angle):
R = np.mat([[ 1, 0, 0],
[ 0, cos(angle), -sin(angle) ],
[ 0, sin(angle), cos(angle) ]])
return R
def rotate_y(angle):
R = np.mat([[ cos(angle), 0, sin(angle) ],
[ 0, 1, 0 ],
[-sin(angle), 0, cos(angle) ]])
return R
def rotate_z(angle):
R = np.mat([[ cos(angle), -sin(angle), 0 ],
[ sin(angle), cos(angle), 0 ],
[ 0, 0, 1 ]])
return R
def find_pedestrians_and_vehicles_from_camera(net, camera_data, seg_data, depth_data, current_x, current_y, current_z, current_yaw, camera_parameters, bis=False):
"""
Args:
- net: rete addestrata a fare object detection su immagini
- camera_data: dati della telecamera
- seg_data: dati della telecamera di semantic segmentation
- depth_data: dati dalla telecamera di profondità
Returns:
- world_frame_pedestrians: lista di coordinate (x,y) dei pedoni rilevati dalla telecamera nel mondo reale
- world_frame_vehicles: come sopra ma per i veicoli
"""
converter = Converter(camera_parameters)
###################################
# GET BBs
bb = predict(net,camera_data)
camera_data, bb_dict= postprocess(camera_data,bb)
if bis:
cv2.imshow("Detection box bis",camera_data)
else:
cv2.imshow("Detection box",camera_data)
cv2.waitKey(10)
#bbs vehicle and pedestrian
## bb_p and bb_v are lists like [[(x,y),width,height]]
# NOTE to access to a specific pixel from bb x,y -> camera_data[y,x]
#list of pedestrians bounding boxes
bb_p = bb_dict[PERSON_TAG]
# list of bounding boxis
bb_v = bb_dict[VEHICLE_TAG]
###################################
# MARK PEDESTRIAN BB ON SIDEWAYS
# only pedestrian bb
# found point in the middle of bb vertex like X, x1 refer to (x,y) from one bb in bb_p
# x1--------x2
# | |
# x3---X----x4
#
# if X is on sidewalk (or x3 or x4) mark this pedestrian as on sidewalk
# in this section for each pedestrian bb check if point X is on sidewalk
# USE FUNCTION : point_in_sidewalks(semSeg_data, point) NOTE: point must be provided as (y,x)
count=0
sidewalk= {} #contains only the X on sidewalk, True if X is on sidewalk otherwise False
for bb in bb_p:
middle_point = compute_middle_point(bb[0][0], bb[0][1], bb[1], bb[2])
on_sidewalk = point_in_sidewalks(seg_data, middle_point)
sidewalk[count] = on_sidewalk
count+=1
###################################
# FOR EACH BB WE CAN CHOOSE X POINT DESCIBED IN PREVIUS SECTION TO GET VEHICLES POSITION
# IN 3D WORLD COORDINATE FRAME
# USING DEPTH CAMERA GET PEDESTRIAN BB AND VEHICLE BB IN WORLD COORDINATES FRAME
# USE this to convert a pixel in 3D pixel should be [x,y,1] pixel_depth = depth_data[y1][x1]
#converter.convert_to_3D(pixel,pixel_depth,current_x,current_y,current_yaw)
world_frame_vehicles = [] #list of tuples of converted pixel in the world
for vehicle in bb_v:
middle_point = compute_middle_point(vehicle[0][0], vehicle[0][1], vehicle[1], vehicle[2])
middle_point = (min(middle_point[0],camera_parameters['height']-1), min(middle_point[1], camera_parameters['width']-1))
pixel = [middle_point[0], middle_point[1]]
pixel_depth = depth_data[middle_point[1], middle_point[0]]*1000
world_frame_point= converter.convert_to_3D(pixel, pixel_depth, current_x, current_y,current_z,current_yaw)
world_frame_vehicles.append(world_frame_point)
world_frame_pedestrians = [] #list of tuples of converted pixel in the world
for pedestrian in bb_p:
middle_point = compute_middle_point(pedestrian[0][0], pedestrian[0][1], pedestrian[1], pedestrian[2])
middle_point = (min(middle_point[0],camera_parameters['height']-1), min(middle_point[1], camera_parameters['width']-1))
pixel = [middle_point[0], middle_point[1]]
pixel_depth = depth_data[middle_point[1], middle_point[0]]*1000
world_frame_point= converter.convert_to_3D(pixel, pixel_depth, current_x, current_y,current_z,current_yaw)
world_frame_pedestrians.append(world_frame_point)
return world_frame_vehicles, world_frame_pedestrians, sidewalk
# Transform the obstacle with its boundary point in the global frame
# bounding_box.transform.location, bounding_box.extent ,bounding_box.transform.rotation
def obstacle_to_world(location, dimensions, orientation):
box_pts = []
x = location.x
y = location.y
z = location.z
yaw = orientation.yaw * pi / 180
xrad = dimensions.x
yrad = dimensions.y
zrad = dimensions.z
# Border points in the obstacle frame
cpos = np.array([
[-xrad, -xrad, -xrad, 0, xrad, xrad, xrad, 0 ],
[-yrad, 0, yrad, yrad, yrad, 0, -yrad, -yrad]])
# Rotation of the obstacle
rotyaw = np.array([
[np.cos(yaw), np.sin(yaw)],
[-np.sin(yaw), np.cos(yaw)]])
# Location of the obstacle in the world frame
cpos_shift = np.array([
[x, x, x, x, x, x, x, x],
[y, y, y, y, y, y, y, y]])
cpos = np.add(np.matmul(rotyaw, cpos), cpos_shift)
for j in range(cpos.shape[1]):
box_pts.append([cpos[0,j], cpos[1,j]])
return box_pts
def make_carla_settings(args):
"""Make a CarlaSettings object with the settings we need.
"""
settings = CarlaSettings()
# There is no need for non-agent info requests if there are no pedestrians
# or vehicles.
get_non_player_agents_info = False
if (NUM_PEDESTRIANS > 0 or NUM_VEHICLES > 0):
get_non_player_agents_info = True
# Base level settings
settings.set(
SynchronousMode=True,
SendNonPlayerAgentsInfo=get_non_player_agents_info,
NumberOfVehicles=NUM_VEHICLES,
NumberOfPedestrians=NUM_PEDESTRIANS,
SeedVehicles=SEED_VEHICLES,
SeedPedestrians=SEED_PEDESTRIANS,
WeatherId=WEATHERID[args.weather],
QualityLevel=args.quality_level)
# Common cameras settings
cam_height = camera_parameters['z']
cam_x_pos = camera_parameters['x']
cam_y_pos = camera_parameters['y']
camera_pitch = camera_parameters['pitch']
camera_roll = camera_parameters['roll']
camera_yaw = camera_parameters['yaw']
camera_width = camera_parameters['width']
camera_height = camera_parameters['height']
camera_fov = camera_parameters['fov']
cam_height_bis = camera_parameters_bis['z']
cam_x_pos_bis = camera_parameters_bis['x']
cam_y_pos_bis = camera_parameters_bis['y']
camera_pitch_bis = camera_parameters_bis['pitch']
camera_roll_bis = camera_parameters_bis['roll']
camera_yaw_bis = camera_parameters_bis['yaw']
camera_width_bis = camera_parameters_bis['width']
camera_height_bis = camera_parameters_bis['height']
camera_fov_bis = camera_parameters_bis['fov']
# Declare here your sensors
camera0 = Camera("CameraRGB")
camera0.set_image_size(camera_width, camera_height)
camera0.set(FOV=camera_fov)
camera0.set_position(cam_x_pos, cam_y_pos, cam_height)
camera0.set_rotation(camera_pitch, camera_roll, camera_yaw)
camera0bis = Camera("CameraRGBbis")
camera0bis.set_image_size(camera_width_bis, camera_height_bis)
camera0bis.set(FOV=camera_fov_bis)
camera0bis.set_position(cam_x_pos_bis, cam_y_pos_bis, cam_height_bis)
camera0bis.set_rotation(camera_pitch_bis, camera_roll_bis, camera_yaw_bis)
camera1 = Camera("CameraSemSeg", PostProcessing="SemanticSegmentation")
camera1.set_image_size(camera_width, camera_height)
camera1.set(FOV=camera_fov)
camera1.set_position(cam_x_pos, cam_y_pos, cam_height)
camera1.set_rotation(camera_pitch, camera_roll, camera_yaw)
camera1bis = Camera("CameraSemSegbis", PostProcessing="SemanticSegmentation")
camera1bis.set_image_size(camera_width_bis, camera_height_bis)
camera1bis.set(FOV=camera_fov_bis)
camera1bis.set_position(cam_x_pos_bis, cam_y_pos_bis, cam_height_bis)
camera1bis.set_rotation(camera_pitch_bis, camera_roll_bis, camera_yaw_bis)
camera2 = Camera("CameraDepth", PostProcessing="Depth")
camera2.set_image_size(camera_width, camera_height)
camera2.set(FOV=camera_fov)
camera2.set_position(cam_x_pos, cam_y_pos, cam_height)
camera2.set_rotation(camera_pitch, camera_roll, camera_yaw)
camera2bis = Camera("CameraDepthbis", PostProcessing="Depth")
camera2bis.set_image_size(camera_width_bis, camera_height_bis)
camera2bis.set(FOV=camera_fov_bis)
camera2bis.set_position(cam_x_pos_bis, cam_y_pos_bis, cam_height_bis)
camera2bis.set_rotation(camera_pitch_bis, camera_roll_bis, camera_yaw_bis)
settings.add_sensor(camera0)
settings.add_sensor(camera0bis)
settings.add_sensor(camera1)
settings.add_sensor(camera1bis)
settings.add_sensor(camera2)
settings.add_sensor(camera2bis)
if not args.local:
# Common cameras settings
cam_height = camera_parameters_view['z']
cam_x_pos = camera_parameters_view['x']
cam_y_pos = camera_parameters_view['y']
camera_pitch = camera_parameters_view['pitch']
camera_roll = camera_parameters_view['roll']
camera_yaw = camera_parameters_view['yaw']
camera_width = camera_parameters_view['width']
camera_height = camera_parameters_view['height']
camera_fov = camera_parameters_view['fov']
# Declare here your sensors
camera3 = Camera("CameraRGBView")
camera3.set_image_size(camera_width, camera_height)
camera3.set(FOV=camera_fov)
camera3.set_position(cam_x_pos, cam_y_pos, cam_height)
camera3.set_rotation(camera_pitch, camera_roll, camera_yaw)
settings.add_sensor(camera3)
return settings
class Timer(object):
""" Timer Class
The steps are used to calculate FPS, while the lap or seconds since lap is
used to compute elapsed time.
"""
def __init__(self, period):
self.step = 0
self._lap_step = 0
self._lap_time = time.time()
self._period_for_lap = period
def tick(self):
self.step += 1
def has_exceeded_lap_period(self):
if self.elapsed_seconds_since_lap() >= self._period_for_lap:
return True
else:
return False
def lap(self):
self._lap_step = self.step
self._lap_time = time.time()
def ticks_per_second(self):
return float(self.step - self._lap_step) /\
self.elapsed_seconds_since_lap()
def elapsed_seconds_since_lap(self):
return time.time() - self._lap_time
def get_current_pose(measurement):
"""Obtains current x,y,yaw pose from the client measurements
Obtains the current x,y, and yaw pose from the client measurements.
Args:
measurement: The CARLA client measurements (from read_data())
Returns: (x, y, yaw)
x: X position in meters
y: Y position in meters
yaw: Yaw position in radians
"""
x = measurement.player_measurements.transform.location.x
y = measurement.player_measurements.transform.location.y
z = measurement.player_measurements.transform.location.z
pitch = math.radians(measurement.player_measurements.transform.rotation.pitch)
roll = math.radians(measurement.player_measurements.transform.rotation.roll)
yaw = math.radians(measurement.player_measurements.transform.rotation.yaw)
return (x, y, z, pitch, roll, yaw)
def get_start_pos(scene):
"""Obtains player start x,y, yaw pose from the scene
Obtains the player x,y, and yaw pose from the scene.
Args:
scene: The CARLA scene object
Returns: (x, y, yaw)
x: X position in meters
y: Y position in meters
yaw: Yaw position in radians
"""
x = scene.player_start_spots[0].location.x
y = scene.player_start_spots[0].location.y
yaw = math.radians(scene.player_start_spots[0].rotation.yaw)
return (x, y, yaw)
def get_player_collided_flag(measurement,
prev_collision_vehicles,
prev_collision_pedestrians,
prev_collision_other):
"""Obtains collision flag from player. Check if any of the three collision
metrics (vehicles, pedestrians, others) from the player are true, if so the
player has collided to something.
Note: From the CARLA documentation:
"Collisions are not annotated if the vehicle is not moving (<1km/h) to avoid
annotating undesired collision due to mistakes in the AI of non-player
agents."
"""
player_meas = measurement.player_measurements
current_collision_vehicles = player_meas.collision_vehicles
current_collision_pedestrians = player_meas.collision_pedestrians
current_collision_other = player_meas.collision_other
collided_vehicles = current_collision_vehicles > prev_collision_vehicles
collided_pedestrians = current_collision_pedestrians > \
prev_collision_pedestrians
collided_other = current_collision_other > prev_collision_other
return (collided_vehicles or collided_pedestrians or collided_other,
current_collision_vehicles,
current_collision_pedestrians,
current_collision_other)
def send_control_command(client, throttle, steer, brake,
hand_brake=False, reverse=False):
"""Send control command to CARLA client.
Send control command to CARLA client.
Args:
client: The CARLA client object
throttle: Throttle command for the sim car [0, 1]
steer: Steer command for the sim car [-1, 1]
brake: Brake command for the sim car [0, 1]
hand_brake: Whether the hand brake is engaged
reverse: Whether the sim car is in the reverse gear
"""
control = VehicleControl()
# Clamp all values within their limits
steer = np.fmax(np.fmin(steer, 1.0), -1.0)
throttle = np.fmax(np.fmin(throttle, 1.0), 0)
brake = np.fmax(np.fmin(brake, 1.0), 0)
control.steer = steer
control.throttle = throttle
control.brake = brake
control.hand_brake = hand_brake
control.reverse = reverse
client.send_control(control)
def create_controller_output_dir(output_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
def store_trajectory_plot(graph, fname):
""" Store the resulting plot.
"""
create_controller_output_dir(CONTROLLER_OUTPUT_FOLDER)
file_name = os.path.join(CONTROLLER_OUTPUT_FOLDER, fname)
graph.savefig(file_name)
def write_trajectory_file(x_list, y_list, v_list, t_list, collided_list):
create_controller_output_dir(CONTROLLER_OUTPUT_FOLDER)
file_name = os.path.join(CONTROLLER_OUTPUT_FOLDER, 'trajectory.txt')
with open(file_name, 'w') as trajectory_file:
for i in range(len(x_list)):
trajectory_file.write('%3.3f, %3.3f, %2.3f, %6.3f %r\n' %\
(x_list[i], y_list[i], v_list[i], t_list[i],
collided_list[i]))
def write_collisioncount_file(collided_list):
create_controller_output_dir(CONTROLLER_OUTPUT_FOLDER)
file_name = os.path.join(CONTROLLER_OUTPUT_FOLDER, 'collision_count.txt')
with open(file_name, 'w') as collision_file:
collision_file.write(str(sum(collided_list)))
def make_correction(waypoint,previuos_waypoint,desired_speed):
dx = waypoint[0] - previuos_waypoint[0]
dy = waypoint[1] - previuos_waypoint[1]
if dx < 0:
moveY = -1.5
elif dx > 0:
moveY = 1.5
else:
moveY = 0
if dy < 0:
moveX = 1.5
elif dy > 0:
moveX = -1.5
else:
moveX = 0
waypoint_on_lane = waypoint
waypoint_on_lane[0] += moveX
waypoint_on_lane[1] += moveY
waypoint_on_lane[2] = desired_speed
return waypoint_on_lane
def found_nearest_object(position,objects_position,objects_just_assoicated):
"""
Given the list of objects position found the index of the object position
nearest to the given position.
All indices just used are provided in objects_just_associated list
"""
THRESHOLD_DISTANCE = 3
min_index = None
min_dist = math.inf
for i, object_position in enumerate(objects_position): #from camera0
x_point, y_point = object_position[0][0], object_position[1][0] # prendere i dati dagli attributi di world_frame
dist = np.subtract(position,[x_point, y_point])
norm = np.linalg.norm(dist)
# an association is found
if norm < min_dist and norm < THRESHOLD_DISTANCE and i not in objects_just_assoicated:
min_dist = norm
min_index = i
return min_index
def association_vehicle_pedestrian(perfect_data, real_data, real_data_bis, sidewalk=None, sidewalk_bis = None, pedestrian=False):
"""
Associates real data position to available perfect data agent. The real data are provided by two different cameras.
Args:
perfect_data (list): list of real agents
real_data (np.ndarray): A 3x1 array with [x;y;z] provided by camera 0.
real_data_bis (np.ndarray): A 3x1 array with [x;y;z] provided by camera bis.
sidewalk (dict): dictionary where items are like (index:boolean). The keys regards to
index of pedestrian in the real_data array and the values means that
this pedestrian is on sidewalk
sidewalk_bis (dict): dictionary where items are like (index:boolean). The keys regards to
index of pedestrian in the real_data_bis array and the values means that
this pedestrian is on sidewalk
pedestrian (boolean): if true the data analyzed regargds to pedestrians otherwise vehicles.
"""
# THRESHOLD_DISTANCE = 2.5
THRESHOLD_SPEED = 0.15
indices_associated = []
data_to_consider = []
indices_associated_bis = []
vehicle_dict = {}
# for each real data to associate to given detected data
for d in perfect_data:
x, y = d.get_position()
min_index= found_nearest_object([x,y],real_data,indices_associated)
min_index_bis = found_nearest_object([x,y],real_data_bis,indices_associated_bis)
# real objcet index.
association_index = None
# sidewalk for pedestrian association
sidewalk_to_consider = None
pose = None
#if a perfect object is associated to both real_data and real_data_bis we
# decide to associate it to real_data object
if min_index is None and min_index_bis != None:
association_index = min_index_bis
pose = real_data_bis[association_index]
sidewalk_to_consider = sidewalk_bis
indices_associated_bis.append(min_index_bis)
elif min_index != None:
association_index = min_index
pose = real_data[association_index]
sidewalk_to_consider = sidewalk
indices_associated.append(min_index)
# if an association is found
if association_index is not None:
# pose = real_data[association_index]
position = (pose[0][0],pose[1][0])
#position = d.get_position()
yaw = d.get_orientation()
bb = d.get_bounding_box()
speed = d.get_speed()
id = d.get_id()
if not pedestrian:
vehicle = Agent(id,position,bb,yaw,speed,"Vehicle")
data_to_consider.append(vehicle)
if not SIMULATION_PERFECT:
vehicle_dict[id] = vehicle
else:
# if the detected pedestrian is one sidewalk and its speed is less than THRESHOLD_SPEED
# no association must be made
if sidewalk_to_consider is not None and not(sidewalk_to_consider[association_index] and speed<THRESHOLD_SPEED):
data_to_consider.append(Agent(id,position,bb,yaw,speed,"Pedestrian"))
return data_to_consider, vehicle_dict
def agent_entering_management(current_agents,last_agents, entering,vehicles_dict = None):
"""
Agents entering manangements
Args:
current_agents (list): list of all pedestrian (or vehicles) that are detected in the current frame
last_agents (list); list of all pedestrians (or vehicles) that were considered in the last frame
entering (dict): dictionary where the key are the id of entering agents and the value is a list of two field.
the first indicated the number of frame in which it was consider an entering agent while the second contains
the informations regarding the specific agent.
"""
agents_to_consider = []
MIN_ENTERING_FRAME = 2
# entering pedestrian
# STEP 1: update entering objects
for current_agent in current_agents: # from each pedestrian in current frame
id = current_agent.get_id()
# this boolean var check if a pedestrain is just detected in the scene
# if in next iteration it is not updated to True means that this object is
# an entering object
check_existing = False
for last_agent in last_agents:
if id == last_agent.get_id(): # check if it just detected in the last frame
check_existing = True
# print(f"[ENTERING FUNC] {id} is already seen")
agents_to_consider.append(current_agent)
if vehicles_dict is not None:
vehicles_dict[id] = current_agent
break
# if a match between the current and last frame is check_existing
# so it is an entering object
if not check_existing:
if id in entering:
entering[id][0]+=1
entering[id][1] = current_agent # update location and speed
else:
# print(f"\n[INFUNC] insert {current_agent._type} {id}\n")
entering[id] = [1,current_agent]
# STEP 2: for each entering object check if enough frame have passed from entering condition
entering_ids = list(entering.keys())
for id in entering_ids:
counter = entering[id][0]
if counter == MIN_ENTERING_FRAME:
agents_to_consider.append( entering[id][1])
if vehicles_dict is not None:
vehicles_dict[id] = entering[id][1]
# there is no need to flag this object as entering object because now it is a real object
del entering[id]
# STEP 3: delete all entering object that are not are detected in the current frame
# thats means that they were FALSE POSITIVE objects
for id in entering_ids:
# flag to detect wich object can maintains the entering conditions
check_entering_condition = False
for current_agent in current_agents:
if id == current_agent.get_id():
check_entering_condition = True
break
if not check_entering_condition:
del entering[id]
return agents_to_consider
def agents_outgoing_managements(current_agents,last_agents, outgoing, vehicle_dict=None):
"""
Agents outgoing manangements (ghost situation)
Args:
current_agents (list): list of all pedestrian (or vehicles) that are detected in the current frame
last_agents (list); list of all pedestrians (or vehicles) that were considered in the last frame
outgoing (dict): dictionary where the key are the id of ghost agents and the value is a list of two field.
the first indicated the number of frame in which it was a ghost while the second contains
the informations regarding the specific agent.
"""
agents_to_consider = []
MAX_GHOST_FRAME = 5
# STEP 1: update ghost object
for last_agent in last_agents:
id = last_agent.get_id()
check_ghost = True
for current_agent in current_agents:
if id == current_agent.get_id():
check_ghost = False
break
# update number of frame where this object is a ghost
if check_ghost:
if id in outgoing:
outgoing[id][0]+=1
else:
outgoing[id] = [1, last_agent]
# delete agents that are not ghost yet
else:
if id in outgoing:
del outgoing[id]
# STEP 2: check which object should be delete from ghost condition
ids_ghost = list(outgoing.keys())
for id in ids_ghost:
if outgoing[id][0] < MAX_GHOST_FRAME:
agent = outgoing[id][1]
agents_to_consider.append(agent)
if vehicle_dict is not None:
vehicle_dict[id]=agent
else:
del outgoing[id] # if MAX_GHOST_FRAME are passed
return agents_to_consider
def exec_waypoint_nav_demo(args, host, port):
""" Executes waypoint navigation demo.
"""
with make_carla_client(host, port) as client:
print('Carla client connected.')
settings = make_carla_settings(args)
# Now we load these settings into the server. The server replies
# with a scene description containing the available start spots for
# the player. Here we can provide a CarlaSettings object or a
# CarlaSettings.ini file as string.
scene = client.load_settings(settings)
# Refer to the player start folder in the WorldOutliner to see the
# player start information
player_start = args.start
# Notify the server that we want to start the episode at the
# player_start index. This function blocks until the server is ready
# to start the episode.
print('Starting new episode at %r...' % scene.map_name)
client.start_episode(player_start)
#############################################
# Load Configurations
#############################################
# Load configuration file (options.cfg) and then parses for the various
# options. Here we have two main options:
# live_plotting and live_plotting_period, which controls whether
# live plotting is enabled or how often the live plotter updates
# during the simulation run.
config = configparser.ConfigParser()
config.read(os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'options.cfg'))
demo_opt = config['Demo Parameters']
# Get options
enable_live_plot = demo_opt.get('live_plotting', 'true').capitalize()
enable_live_plot = enable_live_plot == 'True'
live_plot_period = float(demo_opt.get('live_plotting_period', 0))
# Set options
live_plot_timer = Timer(live_plot_period)
# Settings Mission Planner
mission_planner = CityTrack("Town01")
#############################################
# Determine simulation average timestep (and total frames)
#############################################
# Ensure at least one frame is used to compute average timestep
num_iterations = ITER_FOR_SIM_TIMESTEP
if (ITER_FOR_SIM_TIMESTEP < 1):
num_iterations = 1
# Gather current data from the CARLA server. This is used to get the
# simulator starting game time. Note that we also need to
# send a command back to the CARLA server because synchronous mode
# is enabled.
measurement_data, sensor_data = client.read_data()
car_extent_x = measurement_data.player_measurements.bounding_box.extent.x
car_extent_y = measurement_data.player_measurements.bounding_box.extent.y
# get traffic light information
traffic_lights = [] #[id, [x,y],yaw]
for agent in measurement_data.non_player_agents:
if agent.HasField("traffic_light"):
traffic_lights.append([agent.id,
agent.traffic_light.transform.location.x,agent.traffic_light. transform.location.y,
agent.traffic_light.transform.rotation.yaw,agent.traffic_light.state])
sim_start_stamp = measurement_data.game_timestamp / 1000.0
# Send a control command to proceed to next iteration.
# This mainly applies for simulations that are in synchronous mode.
send_control_command(client, throttle=0.0, steer=0, brake=1.0)
# Computes the average timestep based on several initial iterations
sim_duration = 0
for i in range(num_iterations):
# Gather current data
measurement_data, sensor_data = client.read_data()
# Send a control command to proceed to next iteration
send_control_command(client, throttle=0.0, steer=0, brake=1.0)
# Last stamp
if i == num_iterations - 1:
sim_duration = measurement_data.game_timestamp / 1000.0 -\
sim_start_stamp
# Outputs average simulation timestep and computes how many frames
# will elapse before the simulation should end based on various
# parameters that we set in the beginning.
SIMULATION_TIME_STEP = sim_duration / float(num_iterations)
print("SERVER SIMULATION STEP APPROXIMATION: " + \
str(SIMULATION_TIME_STEP))
TOTAL_EPISODE_FRAMES = int((TOTAL_RUN_TIME + WAIT_TIME_BEFORE_START) /\
SIMULATION_TIME_STEP) + TOTAL_FRAME_BUFFER
#############################################
# Frame-by-Frame Iteration and Initialization
#############################################
# Store pose history starting from the start position
measurement_data, sensor_data = client.read_data()
start_timestamp = measurement_data.game_timestamp / 1000.0
start_x, start_y, start_z, start_pitch, start_roll, start_yaw = get_current_pose(measurement_data)
send_control_command(client, throttle=0.0, steer=0, brake=1.0)
x_history = [start_x]
y_history = [start_y]
yaw_history = [start_yaw]
time_history = [0]
speed_history = [0]
collided_flag_history = [False] # assume player starts off non-collided
#############################################
# Settings Waypoints
#############################################
starting = scene.player_start_spots[args.start]
destination = scene.player_start_spots[args.dest]
# Starting position is the current position
# (x, y, z, pitch, roll, yaw)
source_pos = [starting.location.x, starting.location.y, starting.location.z]
source_ori = [starting.orientation.x, starting.orientation.y]
source = mission_planner.project_node(source_pos)
# Destination position
destination_pos = [destination.location.x, destination.location.y, destination.location.z]
destination_ori = [destination.orientation.x, destination.orientation.y]
destination = mission_planner.project_node(destination_pos)
waypoints = []
waypoints_route = mission_planner.compute_route(source, source_ori, destination, destination_ori)
desired_speed = DESIRED_SPEED
turn_speed = 2.5
intersection_nodes = mission_planner.get_intersection_nodes()
intersection_pair = []
turn_cooldown = 0
prev_x = False
prev_y = False
# Put waypoints in the lane
previuos_waypoint = mission_planner._map.convert_to_world(waypoints_route[0])
for i in range(1,len(waypoints_route)):
point = waypoints_route[i]
waypoint = mission_planner._map.convert_to_world(point)
current_waypoint = make_correction(waypoint,previuos_waypoint,desired_speed)
dx = current_waypoint[0] - previuos_waypoint[0]
dy = current_waypoint[1] - previuos_waypoint[1]
is_turn = ((prev_x and abs(dy) > 0.1) or (prev_y and abs(dx) > 0.1)) and not(abs(dx) > 0.1 and abs(dy) > 0.1)
prev_x = abs(dx) > 0.1
prev_y = abs(dy) > 0.1
if point in intersection_nodes:
prev_start_intersection = mission_planner._map.convert_to_world(waypoints_route[i-2])
center_intersection = mission_planner._map.convert_to_world(waypoints_route[i])
start_intersection = mission_planner._map.convert_to_world(waypoints_route[i-1])
end_intersection = mission_planner._map.convert_to_world(waypoints_route[i+1])
start_intersection = make_correction(start_intersection,prev_start_intersection,turn_speed)
end_intersection = make_correction(end_intersection,center_intersection,turn_speed)
dx = start_intersection[0] - end_intersection[0]
dy = start_intersection[1] - end_intersection[1]
if abs(dx) > 0 and abs(dy) > 0:
intersection_pair.append((center_intersection,len(waypoints)))
waypoints[-1][2] = turn_speed
middle_point = [(start_intersection[0] + end_intersection[0]) /2, (start_intersection[1] + end_intersection[1]) /2]
centering = 0.75
middle_intersection = [(centering*middle_point[0] + (1-centering)*center_intersection[0]), (centering*middle_point[1] + (1-centering)*center_intersection[1])]
# Point at intersection:
A = [[start_intersection[0], start_intersection[1], 1],
[end_intersection[0], end_intersection[1], 1],
[middle_intersection[0], middle_intersection[1], 1]]
b = [-start_intersection[0]**2 - start_intersection[1]**2,
-end_intersection[0]**2 - end_intersection[1]**2,
-middle_intersection[0]**2 - middle_intersection[1]**2]
coeffs = np.matmul(np.linalg.inv(A), b)
x = start_intersection[0]
center_x = -coeffs[0]/2
center_y = -coeffs[1]/2
r = sqrt(center_x**2 + center_y**2 - coeffs[2])
theta_start = math.atan2((start_intersection[1] - center_y),(start_intersection[0] - center_x))
theta_end = math.atan2((end_intersection[1] - center_y),(end_intersection[0] - center_x))
theta = theta_start
start_to_end = 1 if theta_start < theta_end else -1
while (start_to_end==1 and theta < theta_end) or (start_to_end==-1 and theta > theta_end):
waypoint_on_lane = [0,0,0]
waypoint_on_lane[0] = center_x + r * cos(theta)
waypoint_on_lane[1] = center_y + r * sin(theta)
waypoint_on_lane[2] = turn_speed
waypoints.append(waypoint_on_lane)
theta += (abs(theta_end - theta_start) * start_to_end) / 10
turn_cooldown = 4
else:
waypoint = mission_planner._map.convert_to_world(point)
if turn_cooldown > 0:
target_speed = turn_speed
turn_cooldown -= 1
else:
target_speed = desired_speed
waypoint_on_lane = make_correction(waypoint,previuos_waypoint,target_speed)
waypoints.append(waypoint_on_lane)
previuos_waypoint = waypoint
waypoints = np.array(waypoints)
print("[MAIN] n waypoints -> ", len(waypoints))
with open("waypoints.txt","w") as f:
for x,y,v in waypoints:
f.writelines(f"{x}, {y}, {v}\n")
#############################################
# Controller 2D Class Declaration
#############################################
# This is where we take the controller2d.py class
# and apply it to the simulator
controller = controller2d.Controller2D(waypoints)
#############################################
# Vehicle Trajectory Live Plotting Setup
#############################################
# Uses the live plotter to generate live feedback during the simulation
# The two feedback includes the trajectory feedback and
# the controller feedback (which includes the speed tracking).
lp_traj = lv.LivePlotter(tk_title="Trajectory Trace")
lp_1d = lv.LivePlotter(tk_title="Controls Feedback")
###
# Add 2D position / trajectory plot
###
trajectory_fig = lp_traj.plot_new_dynamic_2d_figure(
title='Vehicle Trajectory',
figsize=(FIGSIZE_X_INCHES, FIGSIZE_Y_INCHES),
edgecolor="black",
rect=[PLOT_LEFT, PLOT_BOT, PLOT_WIDTH, PLOT_HEIGHT])
trajectory_fig.set_invert_x_axis() # Because UE4 uses left-handed
# coordinate system the X
# axis in the graph is flipped
trajectory_fig.set_axis_equal() # X-Y spacing should be equal in size
# Add waypoint markers
trajectory_fig.add_graph("waypoints", window_size=len(waypoints),
x0=waypoints[:,0], y0=waypoints[:,1],
linestyle="-", marker="", color='g')
# Add trajectory markers
trajectory_fig.add_graph("trajectory", window_size=TOTAL_EPISODE_FRAMES,
x0=[start_x]*TOTAL_EPISODE_FRAMES,
y0=[start_y]*TOTAL_EPISODE_FRAMES,
color=[1, 0.5, 0])
# Add starting position marker
trajectory_fig.add_graph("start_pos", window_size=1,
x0=[start_x], y0=[start_y],
marker=11, color=[1, 0.5, 0],
markertext="Start", marker_text_offset=1)
trajectory_fig.add_graph("obstacles_points",
window_size=8 * (NUM_PEDESTRIANS + NUM_VEHICLES) ,
x0=[0]* (8 * (NUM_PEDESTRIANS + NUM_VEHICLES)),
y0=[0]* (8 * (NUM_PEDESTRIANS + NUM_VEHICLES)),
linestyle="", marker="+", color='b')
nearest_tl = []
tl_dict = {}
# we compute here traffic lights filter because they are stationary objects.
for i,tl in enumerate(traffic_lights):
# compute distances vector between waypoints and current traffic light
temp = waypoints[:,:2] - tl[1:3]
# compute module fpr each distances vector
dist = np.linalg.norm(temp,axis=1)
# verify if there is at least one traffic_light
# along waypoints trajectory and plot it.
# For each i-th waypoint we consider a circle of
# radius 5 and centered in i-th waypoint. If traffic lights
# point is in almost a circle we considered it.
TRAFFIC_LIGHT_DISTANCE = 10 # sperimentaly computed
if len(np.where(dist<TRAFFIC_LIGHT_DISTANCE)[0]>0):
nearest_tl.append(tl[:-1]) # not interested to store status information here
#get id and status
tl_dict[tl[0]]=tl[-1]
if enable_live_plot:
trajectory_fig.add_graph(f"{tl[0]}",
window_size=1,
x0=[tl[1]], y0=[tl[2]],
marker=11, color=[1, 0.5, 0],
markertext=f"{i}", marker_text_offset=1)
nearest_tl = np.array(nearest_tl)
print("SHAPE:")
print(nearest_tl.shape)
# Add end position marker
trajectory_fig.add_graph("end_pos", window_size=1,
x0=[waypoints[-1, 0]],
y0=[waypoints[-1, 1]],
marker="D", color='r',
markertext="End", marker_text_offset=1)
# Add car marker
trajectory_fig.add_graph("car", window_size=1,
marker="s", color='b', markertext="Car",
marker_text_offset=1)
# Add lead car information
trajectory_fig.add_graph("leadcar", window_size=1,
marker="s", color='g', markertext="Lead Car",
marker_text_offset=1)
# Add lookahead path
trajectory_fig.add_graph("selected_path",
window_size=INTERP_MAX_POINTS_PLOT,
x0=[start_x]*INTERP_MAX_POINTS_PLOT,
y0=[start_y]*INTERP_MAX_POINTS_PLOT,
color=[1, 0.5, 0.0],
linewidth=3)
# Add local path proposals
for i in range(NUM_PATHS):
trajectory_fig.add_graph("local_path " + str(i), window_size=200,
x0=None, y0=None, color=[0.0, 0.0, 1.0])
###
# Add 1D speed profile updater
###
forward_speed_fig =\
lp_1d.plot_new_dynamic_figure(title="Forward Speed (m/s)")
forward_speed_fig.add_graph("forward_speed",
label="forward_speed",
window_size=TOTAL_EPISODE_FRAMES)
forward_speed_fig.add_graph("reference_signal",
label="reference_Signal",
window_size=TOTAL_EPISODE_FRAMES)
# Add throttle signals graph
throttle_fig = lp_1d.plot_new_dynamic_figure(title="Throttle")
throttle_fig.add_graph("throttle",
label="throttle",
window_size=TOTAL_EPISODE_FRAMES)
# Add brake signals graph
brake_fig = lp_1d.plot_new_dynamic_figure(title="Brake")
brake_fig.add_graph("brake",
label="brake",
window_size=TOTAL_EPISODE_FRAMES)
# Add steering signals graph
steer_fig = lp_1d.plot_new_dynamic_figure(title="Steer")
steer_fig.add_graph("steer",
label="steer",
window_size=TOTAL_EPISODE_FRAMES)
# live plotter is disabled, hide windows
if not enable_live_plot:
lp_traj._root.withdraw()
lp_1d._root.withdraw()
#############################################
# Local Planner Variables
#############################################
wp_goal_index = 0
local_waypoints = None
path_validity = np.zeros((NUM_PATHS, 1), dtype=bool)
lp = local_planner.LocalPlanner(NUM_PATHS,
PATH_OFFSET,
CIRCLE_OFFSETS,
CIRCLE_RADII,
PATH_SELECT_WEIGHT,
TIME_GAP,
A_MAX,
SLOW_SPEED,
STOP_LINE_BUFFER)
bp = behavioural_planner.BehaviouralPlanner(BP_LOOKAHEAD_BASE,
LEAD_VEHICLE_LOOKAHEAD,
nearest_tl,
tl_dict)
#############################################
# Scenario Execution Loop
#############################################
# Iterate the frames until the end of the waypoints is reached or
# the TOTAL_EPISODE_FRAMES is reached. The controller simulation then
# ouptuts the results to the controller output directory.
reached_the_end = False
skip_first_frame = True
# Initialize the current timestamp.
current_timestamp = start_timestamp
# Initialize collision history
prev_collision_vehicles = 0
prev_collision_pedestrians = 0
prev_collision_other = 0
# vehicles_dict = {}
####################################
vehicles_entering = {}
pedestrians_entering = {}
pedestrians_outgoing= {}
vehicles_outgoing = {}
# the aboves data structure are structured in this way:
# entering = {
# id1: [counter, agent_object],
# id2: [counter, agent_object],
# ....
# }
#
# list of last frame ids
pedestrians_last_frame = []
vehicles_last_frame = []
###################################
# DETECTOR
net = load_model()
for frame in range(TOTAL_EPISODE_FRAMES):
# Gather current data from the CARLA server
measurement_data, sensor_data = client.read_data()
# UPDATE HERE the obstacles list
obstacles = []
_vehicles_dict = {}
# Update pose and timestamp
prev_timestamp = current_timestamp
current_x, current_y, current_z, current_pitch, current_roll, current_yaw = \
get_current_pose(measurement_data)
current_speed = measurement_data.player_measurements.forward_speed
current_timestamp = float(measurement_data.game_timestamp) / 1000.0
# Wait for some initial time before starting the demo
if current_timestamp <= WAIT_TIME_BEFORE_START:
send_control_command(client, throttle=0.0, steer=0, brake=1.0)
continue
else:
current_timestamp = current_timestamp - WAIT_TIME_BEFORE_START
# Store history
x_history.append(current_x)
y_history.append(current_y)
yaw_history.append(current_yaw)
speed_history.append(current_speed)
time_history.append(current_timestamp)
# Store collision history
collided_flag,\
prev_collision_vehicles,\
prev_collision_pedestrians,\
prev_collision_other = get_player_collided_flag(measurement_data,
prev_collision_vehicles,
prev_collision_pedestrians,
prev_collision_other)
collided_flag_history.append(collided_flag)
if frame % (LP_FREQUENCY_DIVISOR) == 0:
# update traffic_lights status
###################################
# GET BGR
camera_data = sensor_data.get('CameraRGB', None)
if camera_data is not None:
# to_bgra_array returns an image with 4 channels with last channel all zeros
camera_data = to_bgra_array(camera_data)[:,:,:3]
camera_data = np.copy(camera_data)
camera_data_bis = sensor_data.get("CameraRGBbis", None)
if camera_data_bis is not None:
camera_data_bis = to_bgra_array(camera_data_bis)[:,:,:3]
camera_data_bis = np.copy(camera_data_bis)
#output segmentation
seg_data = sensor_data.get('CameraSemSeg', None)
if seg_data is not None:
seg_data = seg_data.data
seg_data_bis = sensor_data.get('CameraSemSegbis', None)
if seg_data_bis is not None:
seg_data_bis = seg_data_bis.data
#depth camera
depth_data = sensor_data.get('CameraDepth', None)
if depth_data is not None:
depth_data = depth_data.data
depth_data_bis = sensor_data.get('CameraDepthbis', None)
if depth_data_bis is not None:
depth_data_bis = depth_data_bis.data
# print("-"*50)
world_frame_vehicles, world_frame_pedestrians,sidewalk = find_pedestrians_and_vehicles_from_camera(net, camera_data, seg_data, depth_data, current_x, current_y, current_z, current_yaw, camera_parameters)
wfv_bis, wfp_bis, sidewalk_bis = find_pedestrians_and_vehicles_from_camera(net, camera_data_bis, seg_data_bis, depth_data_bis, current_x, current_y, current_z,current_yaw, camera_parameters_bis, True)
# world_frame_vehicles, world_frame_pedestrians,sidewalk = find_pedestrians_and_vehicles_from_camera(net, camera_data, seg_data, depth_data, current_x, current_y, current_yaw, camera_parameters)
# wfv_bis, wfp_bis, sidewalk_bis = find_pedestrians_and_vehicles_from_camera(net, camera_data_bis, seg_data_bis, depth_data_bis, current_x, current_y, current_yaw, camera_parameters_bis, True)
# world_frame_vehicles += wfv_bis
# world_frame_pedestrians += wfp_bis
# for p in world_frame_vehicles:
# print("CAMERA 0 vehicles ", p)
# print()
# for p in wfv_bis:
# print("CAMERA BIS vehicles ", p)
# print()
###############################################
# BELOW CARLA PERFECT DATA
pedestrians = []
vehicles = []
for agent in measurement_data.non_player_agents:
if agent.HasField("traffic_light"):
if agent.id in tl_dict:
tl_dict[agent.id] = agent.traffic_light.state
if agent.HasField("pedestrian"):
location = agent.pedestrian.transform.location
dimensions = agent.pedestrian.bounding_box.extent
orientation = agent.pedestrian.transform.rotation
dist = np.subtract([current_x,current_y], [location.x,location.y])
norm = np.linalg.norm(dist)
# filter only pedestrian that are in a radiud of 30 metres
if norm < AGENTS_CHECK_RADIUS:
bb = obstacle_to_world(location, dimensions, orientation)
#takes only verteces of pedestrians bb
bb = bb[0:-1:2]
orientation = orientation.yaw*math.pi/180
speed = agent.pedestrian.forward_speed
# print("REAL PED: ", location.x,location.y)
pedestrian = Agent(agent.id,[location.x,location.y],bb,orientation,speed,"Pedestrian")
pedestrians.append(pedestrian)
if id in pedestrians_outgoing:
# print(f"[MAIN] Update position of ghost {id}, {pedestrian}")
# update its data because in the current frame this object can be still occludeed
pedestrians_outgoing[id][1] = pedestrian
if agent.HasField("vehicle"):
location = agent.vehicle.transform.location
dimensions = agent.vehicle.bounding_box.extent
orientation = agent.vehicle.transform.rotation
dist = np.subtract([current_x,current_y], [location.x,location.y])
norm = np.linalg.norm(dist)
# filter only vehicle that are in a radiud of AGENTS_CHECK_RADIUS metres
if norm < AGENTS_CHECK_RADIUS:
id = agent.id
speed = agent.vehicle.forward_speed
bb = obstacle_to_world(location, dimensions, orientation)
#takes only verteces of pedestrians bb
bb = bb[0:-1:2]
# print("REAL VEHICLE: ", location.x,location.y)
vehicle = Agent(id,[location.x,location.y],bb,orientation.yaw,speed,"Vehicle")
vehicles.append(vehicle)
if id in vehicles_outgoing:
# update its data because in the current frame this object can be still occludeed
# print(f"[MAIN] Update position of ghost {id}, {vehicle}")
vehicles_outgoing[id][1] = vehicle
if SIMULATION_PERFECT:
_vehicles_dict[id] = vehicle
#########################################
# here make data association (remember to valuate it only on x and y)
# input-> world_frame_vehicles, world_frame_pedestrians, sidewalk
# output-> np array di pedoni
pedestrian_associated,_ = association_vehicle_pedestrian(pedestrians,
world_frame_pedestrians,wfp_bis,sidewalk,sidewalk_bis,True)
vehicles_associated, vehicles_dict = association_vehicle_pedestrian(vehicles,
world_frame_vehicles,wfv_bis)
# pedestrians_to_consider = pedestrian_associated
# vehicles_to_consider = vehicles_associated
pedestrians_to_consider = []
vehicles_to_consider = []
######## entering management
output_p = agent_entering_management(pedestrian_associated,pedestrians_last_frame,pedestrians_entering)
output_v = agent_entering_management(vehicles_associated,vehicles_last_frame,vehicles_entering,vehicles_dict)
pedestrians_to_consider += output_p
vehicles_to_consider += output_v
output_p = agents_outgoing_managements(pedestrians_to_consider,pedestrians_last_frame,pedestrians_outgoing)
output_v = agents_outgoing_managements(vehicles_to_consider,vehicles_last_frame,vehicles_outgoing,vehicles_dict)
pedestrians_to_consider += output_p
vehicles_to_consider += output_v
pedestrians_last_frame = pedestrians_to_consider
vehicles_last_frame = vehicles_to_consider
# last_frame_agents = vehicles_associated + pedestrian_associated
#######
if SIMULATION_PERFECT:
vehicles_dict = _vehicles_dict
if not SIMULATION_PERFECT:
pedestrians = np.array(pedestrians_to_consider)
vehicles = np.array(vehicles_to_consider)
else:
pedestrians = np.array(pedestrians,dtype=object)
vehicles = np.array(vehicles)
# set current info about traffic light (status), pedestrian and vehicle
bp.set_tl_dict(tl_dict)
bp.set_pedestrians(pedestrians)
bp.set_vehicles(vehicles)
bp.set_vehicles_dict(vehicles_dict)
camera_data = sensor_data.get('CameraRGBView', None)
if camera_data is not None:
camera_data = to_bgra_array(camera_data)[:,:,:3]
cv2.imshow("CameraRGB", camera_data)
cv2.waitKey(10)
# Execute the behaviour and local planning in the current instance
# Note that updating the local path during every controller update
# produces issues with the tracking performance (imagine everytime
# the controller tried to follow the path, a new path appears). For
# this reason, the local planner (LP) will update every X frame,
# stored in the variable LP_FREQUENCY_DIVISOR, as it is analogous
# to be operating at a frequency that is a division to the
# simulation frequency.
if frame % LP_FREQUENCY_DIVISOR == 0:
# Compute open loop speed estimate.
open_loop_speed = lp._velocity_planner.get_open_loop_speed(current_timestamp - prev_timestamp)
# Calculate the goal state set in the local frame for the local planner.
# Current speed should be open loop for the velocity profile generation.
ego_state = [current_x, current_y, current_yaw, open_loop_speed]
# Set lookahead based on current speed.
bp.set_lookahead(BP_LOOKAHEAD_BASE + BP_LOOKAHEAD_TIME * open_loop_speed)
if True:
if WINDOWS_OS:
os.system("cls")
else:
os.system("clear")
print(f"[LOGINFO]: from {args.start} to {args.dest}\t[DESIRED_SPEED]: {DESIRED_SPEED} m/s")
print(f"[WEATHER]: {args.weather}")
print(f"[PEDESTRIANS]: {NUM_PEDESTRIANS}, {SEED_PEDESTRIANS}\t[VEHICLES]: {NUM_VEHICLES}, {SEED_VEHICLES}\n")
# Perform a state transition in the behavioural planner.
bp.transition_state(waypoints, ego_state, current_speed)
states = ["FOLLOW_LANE", "DECELERATE_TO_STOP", "STAY_STOPPED"]
print(f"[CURRENT_STATE]: {states[bp._state]}", end="\t")
print(f"[COLLISION]: {'Yes' if collided_flag else 'No'}")
print(f"[EGO_POS]: ({round(current_x, 2)}, {round(current_y, 2)})", end='\t')
print(f"[EGO_YAW]: {round(current_yaw*180/math.pi, 2)} deg", end='\t')
print(f"[EGO_SPEED]: {round(current_speed,2)} m/s")
print(f"[PEDESTRIAN_COLLISION_PREDICTED]: {'Yes' if bp._pedestrian_detected else 'No'}")
print(f"[VEHICLE_COLLISION_PREDICTED]: {'Yes' if bp._car_collision_predicted else 'No'}")
# print(f"[PED_POS]: (XXX.XX, XXX.XX)", end='\t')
# print(f"[PED_YAW]: X.XX deg", end='\t')
# print(f"[PED_SPEED]: X.XX m/s")
leader = bp._lead_vehicle
if leader is None:
print(f"[LEAD_POS]: (XXX.XX, XXX.XX)", end='\t')
print(f"[LEAD_YAW]: X.XX deg", end='\t')
print(f"[LEAD_SPEED]: X.XX m/s")
else:
leader_pos = leader.get_position()
print(f"[LEAD_POS]: ({round(leader_pos[0], 2)}, {round(leader_pos[1], 2)})", end='\t')
print(f"[LEAD_YAW]: {round(leader.get_orientation(), 2)} deg", end='\t')
print(f"[LEAD_SPEED]: {round(leader.get_speed(), 2)} m/s")
tl = bp._current_traffic_light
if len(tl) != 0:
print(f"[T_LIG_POS]: ({round(tl[1],2)}, {round(tl[2],2)})", end='\t')
print(f"[T_LIG_YAW]: {round(tl[3],2)} deg", end='\t')
statuses = ["GREEN", "YELLOW", "RED"]
print(f"[T_LIG_STATUS]: {statuses[bp._tl_dict[tl[0]]]}")
else:
print(f"[T_LIG_POS]: (XXX.XX, XXX.XX)", end='\t')
print(f"[T_LIG_YAW]: X.XX deg", end='\t')
print(f"[T_LIG_STATUS]: X.XX m/s")
else:
bp.transition_state(waypoints, ego_state, current_speed)
# Compute the goal state set from the behavioural planner's computed goal state.
goal_state_set = lp.get_goal_state_set(bp._goal_index, bp._goal_state, waypoints, ego_state)
# Calculate planned paths in the local frame.
paths, path_validity = lp.plan_paths(goal_state_set)
# Transform those paths back to the global frame.
paths = local_planner.transform_paths(paths, ego_state)
# Perform collision checking.
collision_check_array = lp._collision_checker.collision_check(paths, [])
# Compute the best local path.
best_index = lp._collision_checker.select_best_path_index(paths, collision_check_array, bp._goal_state)
# If no path was feasible, continue to follow the previous best path.
if best_index == None:
best_path = lp._prev_best_path
else:
best_path = paths[best_index]
lp._prev_best_path = best_path
if best_path is not None:
# Compute the velocity profile for the path, and compute the waypoints.
desired_speed = bp._goal_state[2]
decelerate_to_stop = bp._state == behavioural_planner.DECELERATE_TO_STOP
lead_car_state = None
if bp._lead_vehicle is not None:
lead_car_pos = bp._lead_vehicle.get_position()
lead_car_speed = bp._lead_vehicle.get_speed()
lead_car_state = [lead_car_pos[0],lead_car_pos[1],lead_car_speed]
local_waypoints = lp._velocity_planner.compute_velocity_profile(best_path, desired_speed, ego_state, current_speed, decelerate_to_stop, lead_car_state, bp._follow_lead_vehicle)
if local_waypoints != None:
# Update the controller waypoint path with the best local path.
# This controller is similar to that developed in Course 1 of this
# specialization. Linear interpolation computation on the waypoints
# is also used to ensure a fine resolution between points.
wp_distance = [] # distance array
local_waypoints_np = np.array(local_waypoints)
for i in range(1, local_waypoints_np.shape[0]):
wp_distance.append(
np.sqrt((local_waypoints_np[i, 0] - local_waypoints_np[i-1, 0])**2 +
(local_waypoints_np[i, 1] - local_waypoints_np[i-1, 1])**2))
wp_distance.append(0) # last distance is 0 because it is the distance
# from the last waypoint to the last waypoint
# Linearly interpolate between waypoints and store in a list
wp_interp = [] # interpolated values
# (rows = waypoints, columns = [x, y, v])
for i in range(local_waypoints_np.shape[0] - 1):
# Add original waypoint to interpolated waypoints list (and append
# it to the hash table)
wp_interp.append(list(local_waypoints_np[i]))
# Interpolate to the next waypoint. First compute the number of
# points to interpolate based on the desired resolution and
# incrementally add interpolated points until the next waypoint
# is about to be reached.
num_pts_to_interp = int(np.floor(wp_distance[i] /\
float(INTERP_DISTANCE_RES)) - 1)
wp_vector = local_waypoints_np[i+1] - local_waypoints_np[i]
wp_uvector = wp_vector / np.linalg.norm(wp_vector[0:2])
for j in range(num_pts_to_interp):
next_wp_vector = INTERP_DISTANCE_RES * float(j+1) * wp_uvector
wp_interp.append(list(local_waypoints_np[i] + next_wp_vector))
# add last waypoint at the end
wp_interp.append(list(local_waypoints_np[-1]))
# Update the other controller values and controls
controller.update_waypoints(wp_interp)
###
# Controller Update
###
if local_waypoints != None and local_waypoints != []:
controller.update_values(current_x, current_y, current_yaw,
current_speed,
current_timestamp, frame)
controller.update_controls()
cmd_throttle, cmd_steer, cmd_brake = controller.get_commands()
else:
cmd_throttle = 0.0
cmd_steer = 0.0
cmd_brake = 0.0
# Skip the first frame or if there exists no local paths
if skip_first_frame and frame == 0:
pass
elif local_waypoints == None:
pass
else:
# Update live plotter with new feedback
trajectory_fig.roll("trajectory", current_x, current_y)
trajectory_fig.roll("car", current_x, current_y)
if lead_car_state is not None:
current_lead_car_x = lead_car_state[0]
current_lead_car_y = lead_car_state[1]
else:
current_lead_car_x = 0
current_lead_car_y = 0
trajectory_fig.roll("leadcar", current_lead_car_x, current_lead_car_y)
# Load parked car points
obstacles = np.array(obstacles)
if len(obstacles) > 0:
x = obstacles[:,:,0]
y = obstacles[:,:,1]
trajectory_fig.roll("obstacles_points", x, y)
forward_speed_fig.roll("forward_speed",
current_timestamp,
current_speed)
forward_speed_fig.roll("reference_signal",
current_timestamp,
controller._desired_speed)
throttle_fig.roll("throttle", current_timestamp, cmd_throttle)
brake_fig.roll("brake", current_timestamp, cmd_brake)
steer_fig.roll("steer", current_timestamp, cmd_steer)
# Local path plotter update
if frame % LP_FREQUENCY_DIVISOR == 0:
path_counter = 0
for i in range(NUM_PATHS):
# If a path was invalid in the set, there is no path to plot.
if path_validity[i]:
# Colour paths according to collision checking.
if not collision_check_array[path_counter]:
colour = 'r'
elif i == best_index:
colour = 'k'
else:
colour = 'b'
trajectory_fig.update("local_path " + str(i), paths[path_counter][0], paths[path_counter][1], colour)
path_counter += 1
else:
trajectory_fig.update("local_path " + str(i), [ego_state[0]], [ego_state[1]], 'r')
# When plotting lookahead path, only plot a number of points
# (INTERP_MAX_POINTS_PLOT amount of points). This is meant
# to decrease load when live plotting
wp_interp_np = np.array(wp_interp)
path_indices = np.floor(np.linspace(0,
wp_interp_np.shape[0]-1,
INTERP_MAX_POINTS_PLOT))
trajectory_fig.update("selected_path",
wp_interp_np[path_indices.astype(int), 0],
wp_interp_np[path_indices.astype(int), 1],
new_colour=[1, 0.5, 0.0])
# Refresh the live plot based on the refresh rate
# set by the options
if enable_live_plot and \
live_plot_timer.has_exceeded_lap_period():
lp_traj.refresh()
lp_1d.refresh()
live_plot_timer.lap()
# Output controller command to CARLA server
send_control_command(client,
throttle=cmd_throttle,
steer=cmd_steer,
brake=cmd_brake)
# Find if reached the end of waypoint. If the car is within
# DIST_THRESHOLD_TO_LAST_WAYPOINT to the last waypoint,
# the simulation will end.
dist_to_last_waypoint = np.linalg.norm(np.array([
waypoints[-1][0] - current_x,
waypoints[-1][1] - current_y]))
if dist_to_last_waypoint < DIST_THRESHOLD_TO_LAST_WAYPOINT:
reached_the_end = True
if reached_the_end:
break
# End of demo - Stop vehicle and Store outputs to the controller output
# directory.
if reached_the_end:
print("Reached the end of path. Writing to controller_output...")
else:
print("Exceeded assessment time. Writing to controller_output...")
# Stop the car
send_control_command(client, throttle=0.0, steer=0.0, brake=1.0)
# Store the various outputs
store_trajectory_plot(trajectory_fig.fig, 'trajectory.png')
store_trajectory_plot(forward_speed_fig.fig, 'forward_speed.png')
store_trajectory_plot(throttle_fig.fig, 'throttle_output.png')
store_trajectory_plot(brake_fig.fig, 'brake_output.png')
store_trajectory_plot(steer_fig.fig, 'steer_output.png')
write_trajectory_file(x_history, y_history, speed_history, time_history,
collided_flag_history)
write_collisioncount_file(collided_flag_history)
def main():
"""Main function.
Args:
-v, --verbose: print debug information
-l, --local: use local server
-w, --weather: weather simulation
-s, --start: player start index
-d, --dest: player destination index
-a, --autopilot: enable autopilot
-q, --quality-level: graphics quality level [Low or Epic]
-i, --images-to-disk: save images to disk
-c, --carla-settings: Path to CarlaSettings.ini file
"""
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--local', '-l',
action='store_true',
dest = 'local'
)
argparser.add_argument(
'--weather', '-w',
metavar='weather',
type=str,
default=WEATHER,
help='Weather simulation'
)
argparser.add_argument(
'-s', '--start',
metavar='S',
default = PLAYER_START_INDEX,
type=int,
help='Player start index')
argparser.add_argument(
'-d', '--dest',
metavar='D',
default = DESTINATION_INDEX,
type=int,
help='Player destination index')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'-q', '--quality-level',
choices=['Low', 'Epic'],
type=lambda s: s.title(),
default='Low',
help='graphics quality level.')
argparser.add_argument(
'-c', '--carla-settings',
metavar='PATH',
dest='settings_filepath',
default=None,
help='Path to a "CarlaSettings.ini" file')
args = argparser.parse_args()
# Logging startup info
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
if not args.local:
host = SERVER_HOST; port = SERVER_PORT
else:
host = LOCAL_HOST; port = LOCAL_PORT
#host = "192.168.1.128"; port = 2000
logging.info('listening to server %s:%s', host, port)
args.out_filename_format = '_out/episode_{:0>4d}/{:s}/{:0>6d}'
# Execute when server connection is established
while True:
try:
exec_waypoint_nav_demo(args, host, port)
print('Done.')
return
except TCPConnectionError as error:
logging.error(error)
time.sleep(1)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
| AlfonsoCom/AVD_BP | main.py | main.py | py | 80,582 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
26401762339 | # %%
import plotly.express as px
import plotly.graph_objects as go
import os
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pdb
def visualize_map():
# %%
map_prefix = "50_10_5_10_5_2"
ndf = pd.read_csv("" + map_prefix + "_Nodes.csv")
edf = pd.read_csv("" + map_prefix + "_Edges.csv")
# %%
### Plotly
newDf = []
traces = []
xLines, yLines, zLines = [], [], []
for index, row in edf.iterrows():
bidir = row['bidirectional']
node1 = ndf.loc[ndf['NodeId'] == row['nodeFrom']]
node2 = ndf.loc[ndf['NodeId'] == row['nodeTo']]
xline = [node1['X'].iloc[0], node2['X'].iloc[0]]
yline = [node1['Y'].iloc[0], node2['Y'].iloc[0]]
zline = [node1['Z'].iloc[0], node2['Z'].iloc[0]]
# aTrace = go.Scatter3d(x=xline, y=yline, z=zline, mode='lines', line=dict(color="blue"), hoverinfo='skip', showlegend=False)
# traces.append(aTrace)
vals = [xline[0], yline[0], zline[0], xline[1], yline[1], zline[1]]
newDf.append([row["nodeFrom"], row["nodeFrom"], bidir, *vals])
xLines.extend([*xline, None])
yLines.extend([*yline, None])
zLines.extend([*zline, None])
aTrace = go.Scatter3d(x=xLines, y=yLines, z=zLines, mode='lines', line=dict(color="blue"), hoverinfo='skip', showlegend=False)
traces.append(aTrace)
# fig = go.Figure(data=traces)
# fig.write_image("testPlotly.png")
# plt.show()
# %%
fig = go.Figure(data=traces)
# fig.write_image("../figs/maps/" + map_prefix + ".png")
fig.write_html("hello_world/templates/" + map_prefix + ".html")
def animate_paths():
map_prefix = "50_10_5_10_5_2"
ndf = pd.read_csv("" + map_prefix + "_Nodes.csv")
pdf = pd.read_csv("paths.csv")
pdf = pdf.iloc[:, :-1] # Drop last empty column
# %%
### Creating traces of
pathTraces = []
for index, row in pdf.iterrows():
tmpdf = ndf.iloc[row[1:]]
aTrace = go.Scatter3d(x=tmpdf["X"], y=tmpdf["Y"], z=tmpdf["Z"], mode='lines', hoverinfo="skip", showlegend=False)
pathTraces.append(aTrace)
# %%
# fig = go.Figure(data=pathTraces)
# fig.write_image("testPlotly.png")
# %%
### Create animations
numFrames = len(pdf.columns) - 1 # First columns is the string "Timesteps"
numAgents = pdf.shape[0]
agentColors = list(range(numAgents))
def getSingleFrame(curT):
curLocs = ndf.iloc[pdf[str(curT)]]
return go.Frame(name=str(curT),
data = go.Scatter3d(x=curLocs["X"], y=curLocs["Y"], z=curLocs["Z"],
mode="markers", marker=dict(size=6, color=agentColors), showlegend=False, hoverinfo="skip"))
allFrames = [getSingleFrame(t) for t in range(numFrames)]
# %%
### https://plotly.com/python/visualizing-mri-volume-slices/?_ga=2.213007632.583970308.1664493502-1988171524.1656003349
def sliderFrameArgs(duration):
return {
"frame": {"duration": duration},
"mode": "immediate",
"fromcurrent": True,
"transition": {"duration": duration, "easing": "linear"},
}
sliders = [{
"pad": {"b": 10, "t": 60},
"len": 0.6,
"x": 0.22,
"y": 0,
"steps": [
{
"args": [[f.name], sliderFrameArgs(300)],
"label": str(k),
"method": "animate",
}
for k, f in enumerate(allFrames)]
}]
fig = go.Figure(frames=allFrames,
# data=traces, ## Show entire grid, significantly slows down animation
# data=allFrames[0].data, ## First frame, no grid lines
data=pathTraces, ## Show path traces, animation works fine
layout=go.Layout(
title="3D MAPF Animation",
updatemenus=[dict(
type="buttons",
buttons=[dict(label="▶", # play symbol
method="animate",
args=[None, sliderFrameArgs(300)]),
dict(label="◼", # pause symbol
method="animate",
args=[[None], sliderFrameArgs(0)])
],
direction="left",
pad={"r": 10, "t": 70},
x=0.22,
y=0)],
sliders=sliders)
)
# fig.update_layout(sliders=sliders)
# %%
fig.write_html("hello_world/templates/backAndForth.html")
# %%
| pwang649/3D_MAPF | hello_world/core/visualization.py | visualization.py | py | 4,687 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects.Sc... |
22117461324 | import rospy
from MyStatics.RealTimePlotter import RealTimePlotter
from MyStatics.GaussianPlotter import GaussPlot
from FaultDetection import ChangeDetection
from geometry_msgs.msg import AccelStamped
from dynamic_reconfigure.server import Server
from accelerometer_ros.cfg import accelerometerGaussConfig
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
class AccGaussCUSUM(RealTimePlotter,ChangeDetection,GaussPlot):
def __init__(self, max_samples = 500, pace = 2, cusum_window_size = 10 ):
self.data_ = []
self.data_.append([0,0,0])
self.i = 0
self.msg = 0
self.window_size = cusum_window_size
RealTimePlotter.__init__(self,max_samples,pace)
ChangeDetection.__init__(self,3)
GaussPlot.__init__(self )
rospy.init_node("accelerometer_gauss_cusum", anonymous=True)
rospy.Subscriber("accel", AccelStamped, self.accCB)
self.dyn_reconfigure_srv = Server(accelerometerGaussConfig, self.dynamic_reconfigureCB)
plt.legend()
plt.show()
rospy.spin()
plt.close("all")
def dynamic_reconfigureCB(self,config, level):
self.window_size = config["window_size"]
return config
def accCB(self, msg):
while (self.i< self.window_size):
self.addData([msg.accel.linear.x,msg.accel.linear.y, msg.accel.angular.z])
self.i = self.i+1
if len(self.samples) is self.max_samples:
self.samples.pop(0)
return
self.i=0
self.changeDetection(len(self.samples))
cur = np.array(self.cum_sum, dtype = object)
self.call(np.mean(self.samples, axis=0),np.var(self.samples, axis=0))
"""
THIS IS NOT REALLY WORKING
x1 = np.linspace(-140, 140, len(self.s_z))
print(len(x1), len(np.sort(self.s_z)))
plt.scatter([x1,x1,x1],np.sort(self.s_z))
"""
x = np.linspace(-140, 140, 200)
y = np.array([i.pdf(x) for i in self.rv])
self.update(msg.header.seq,x.tolist(),y.T.tolist())
| jcmayoral/collision_detector_observers | collision_observers/accelerometer/accelerometer_ros/src/fault_detection/AccGaussCUSUM.py | AccGaussCUSUM.py | py | 2,101 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "MyStatics.RealTimePlotter.RealTimePlotter",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "FaultDetection.ChangeDetection",
"line_number": 14,
"usage_type": "name"
},
... |
9401341695 | # coding: utf-8
import re
import requests
response = requests.get('http://ads.fraiburgo.ifc.edu.br')
if response.status_code == 200:
texto = response.content.decode('utf-8')
links = re.findall(r'<a href="(.*?)".*>(.*)</a>', texto)
for url in links:
print(url) | fabricioifc/python_regex_tarefa | exemplos_professor/regex_02.py | regex_02.py | py | 266 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 9,
"usage_type": "call"
}
] |
70879484348 | import collections
import functools
from operator import mul
def tokenize(s: str):
tokens = []
words = s.strip().split()
for word in words:
if word.startswith("("):
tokens.append(word[0])
tokens.extend(tokenize(word[1:]))
elif word.endswith(")"):
tokens.extend(tokenize(word[:-1]))
tokens.append(word[-1])
elif word in "+*":
tokens.append(word)
else:
tokens.append(int(word))
return tokens
def weird_math_a_helper(tokens):
ans = None
op = None
while len(tokens) > 0:
token = tokens.popleft()
if token == ")":
break
elif token in ("*", "+"):
op = token
else:
n = weird_math_a_helper(tokens) if token == "(" else token
if ans is None:
ans = n
elif op == "+":
ans += n
elif op == "*":
ans *= n
return ans
def weird_math_a(problem):
return weird_math_a_helper(collections.deque(tokenize(problem)))
def weird_math_b_helper(tokens):
sums = []
ans = None
op = None
while len(tokens) > 0:
token = tokens.popleft()
if token == ")":
break
elif token == "*":
sums.append(ans)
ans = None
op = None
elif token == "+":
op = token
else:
n = weird_math_b_helper(tokens) if token == "(" else token
if ans is None:
ans = n
elif op == "+":
ans += n
sums.append(ans)
return functools.reduce(mul, sums, 1)
def weird_math_b(problem):
return weird_math_b_helper(collections.deque(tokenize(problem)))
def parta(txt):
return sum(weird_math_a(line) for line in txt.splitlines())
def partb(txt):
return sum(weird_math_b(line) for line in txt.splitlines())
if __name__ == "__main__":
from aocd import data
print(f"parta: {parta(data)}")
print(f"partb: {partb(data)}")
| cj81499/advent-of-code | src/aoc_cj/aoc2020/day18.py | day18.py | py | 2,065 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "operator.mul",
"line_number": 68,
"usage_type": "argument"
},
{
"api_name": "collections.deque... |
10423383033 | from __future__ import annotations
import copy
import dataclasses
import json
from typing import TYPE_CHECKING
import pytest
from randovania.game_description.db.node_identifier import NodeIdentifier
from randovania.games.prime2.layout.echoes_configuration import EchoesConfiguration
from randovania.games.prime2.layout.translator_configuration import LayoutTranslatorRequirement
from randovania.gui import tracker_window
from randovania.layout.lib.teleporters import TeleporterShuffleMode
from randovania.layout.versioned_preset import VersionedPreset
if TYPE_CHECKING:
from pathlib import Path
from unittest.mock import MagicMock
@pytest.fixture(params=[{}, {"teleporters": TeleporterShuffleMode.ONE_WAY_ANYTHING, "translator_configuration": True}])
def layout_config(request, default_echoes_configuration):
if "translator_configuration" in request.param:
translator_requirement = copy.copy(default_echoes_configuration.translator_configuration.translator_requirement)
for gate in translator_requirement.keys():
translator_requirement[gate] = LayoutTranslatorRequirement.RANDOM
break
new_gate = dataclasses.replace(
default_echoes_configuration.translator_configuration, translator_requirement=translator_requirement
)
request.param["translator_configuration"] = new_gate
return dataclasses.replace(default_echoes_configuration, **request.param)
def test_load_previous_state_no_previous_layout(tmp_path: Path, default_echoes_configuration):
# Run
result = tracker_window._load_previous_state(tmp_path, default_echoes_configuration)
# Assert
assert result is None
def test_load_previous_state_previous_layout_not_json(tmp_path: Path, default_echoes_configuration):
# Setup
tmp_path.joinpath("preset.rdvpreset").write_text("this is not a json")
# Run
result = tracker_window._load_previous_state(tmp_path, default_echoes_configuration)
# Assert
assert result is None
def test_load_previous_state_previous_layout_not_layout(tmp_path: Path, default_echoes_configuration):
# Setup
tmp_path.joinpath("preset.rdvpreset").write_text(json.dumps({"trick_level": "foo"}))
tmp_path.joinpath("state.json").write_text("[]")
# Run
result = tracker_window._load_previous_state(tmp_path, default_echoes_configuration)
# Assert
assert result is None
def test_load_previous_state_missing_state(tmp_path: Path, default_preset):
# Setup
VersionedPreset.with_preset(default_preset).save_to_file(tmp_path.joinpath("preset.rdvpreset"))
# Run
result = tracker_window._load_previous_state(tmp_path, default_preset.configuration)
# Assert
assert result is None
def test_load_previous_state_invalid_state(tmp_path: Path, default_preset):
# Setup
VersionedPreset.with_preset(default_preset).save_to_file(tmp_path.joinpath("preset.rdvpreset"))
tmp_path.joinpath("state.json").write_text("")
# Run
result = tracker_window._load_previous_state(tmp_path, default_preset.configuration)
# Assert
assert result is None
def test_load_previous_state_success(tmp_path: Path, default_preset):
# Setup
data = {"asdf": 5, "zxcv": 123}
VersionedPreset.with_preset(default_preset).save_to_file(tmp_path.joinpath("preset.rdvpreset"))
tmp_path.joinpath("state.json").write_text(json.dumps(data))
# Run
result = tracker_window._load_previous_state(tmp_path, default_preset.configuration)
# Assert
assert result == data
@pytest.mark.parametrize("shuffle_advanced", [False, True])
async def test_apply_previous_state(
skip_qtbot, tmp_path: Path, default_echoes_preset, shuffle_advanced, echoes_game_description
):
configuration = default_echoes_preset.configuration
assert isinstance(configuration, EchoesConfiguration)
if shuffle_advanced:
translator_requirement = copy.copy(configuration.translator_configuration.translator_requirement)
for gate in translator_requirement.keys():
translator_requirement[gate] = LayoutTranslatorRequirement.RANDOM
break
new_gate = dataclasses.replace(
configuration.translator_configuration, translator_requirement=translator_requirement
)
layout_config = dataclasses.replace(
configuration,
teleporters=dataclasses.replace(
configuration.teleporters,
mode=TeleporterShuffleMode.ONE_WAY_ANYTHING,
),
translator_configuration=new_gate,
)
preset = dataclasses.replace(default_echoes_preset.fork(), configuration=layout_config)
else:
preset = default_echoes_preset
state: dict = {
"actions": ["Temple Grounds/Landing Site/Save Station"],
"collected_pickups": {
"Amber Translator": 0,
"Annihilator Beam": 0,
"Boost Ball": 0,
"Cobalt Translator": 0,
"Dark Agon Key 1": 0,
"Dark Agon Key 2": 0,
"Dark Agon Key 3": 0,
"Dark Ammo Expansion": 0,
"Dark Beam": 0,
"Dark Torvus Key 1": 0,
"Dark Torvus Key 2": 0,
"Dark Torvus Key 3": 0,
"Dark Visor": 0,
"Darkburst": 0,
"Echo Visor": 0,
"Emerald Translator": 0,
"Energy Tank": 0,
"Grapple Beam": 0,
"Gravity Boost": 0,
"Ing Hive Key 1": 0,
"Ing Hive Key 2": 0,
"Ing Hive Key 3": 0,
"Light Ammo Expansion": 0,
"Light Beam": 0,
"Missile Expansion": 0,
"Missile Launcher": 0,
"Morph Ball Bomb": 0,
"Power Bomb": 0,
"Power Bomb Expansion": 0,
"Progressive Suit": 0,
"Screw Attack": 0,
"Seeker Launcher": 0,
"Sky Temple Key 1": 0,
"Sky Temple Key 2": 0,
"Sky Temple Key 3": 0,
"Sky Temple Key 4": 0,
"Sky Temple Key 5": 0,
"Sky Temple Key 6": 0,
"Sky Temple Key 7": 0,
"Sky Temple Key 8": 0,
"Sky Temple Key 9": 0,
"Sonic Boom": 0,
"Space Jump Boots": 1,
"Spider Ball": 0,
"Sunburst": 0,
"Super Missile": 0,
"Violet Translator": 0,
},
"teleporters": [
{
"data": None,
"teleporter": {
"area": "Transport to Temple Grounds",
"node": "Elevator to Temple Grounds",
"region": "Agon Wastes",
},
},
{
"data": None,
"teleporter": {
"area": "Transport to Torvus Bog",
"node": "Elevator to Torvus Bog",
"region": "Agon Wastes",
},
},
{
"data": None,
"teleporter": {
"area": "Transport to Sanctuary Fortress",
"node": "Elevator to Sanctuary Fortress",
"region": "Agon Wastes",
},
},
{
"data": None,
"teleporter": {
"area": "Temple Transport C",
"node": "Elevator to Temple Grounds",
"region": "Great Temple",
},
},
{
"data": None,
"teleporter": {
"area": "Temple Transport A",
"node": "Elevator to Temple Grounds",
"region": "Great Temple",
},
},
{
"data": None,
"teleporter": {
"area": "Temple Transport B",
"node": "Elevator to Temple Grounds",
"region": "Great Temple",
},
},
{
"data": None,
"teleporter": {
"area": "Aerie",
"node": "Elevator to Aerie Transport Station",
"region": "Sanctuary Fortress",
},
},
{
"data": None,
"teleporter": {
"area": "Aerie Transport Station",
"node": "Elevator to Aerie",
"region": "Sanctuary Fortress",
},
},
{
"data": None,
"teleporter": {
"area": "Transport to Temple Grounds",
"node": "Elevator to Temple Grounds",
"region": "Sanctuary Fortress",
},
},
{
"data": None,
"teleporter": {
"area": "Transport to Agon Wastes",
"node": "Elevator to Agon Wastes",
"region": "Sanctuary Fortress",
},
},
{
"data": None,
"teleporter": {
"area": "Transport to Torvus Bog",
"node": "Elevator to Torvus Bog",
"region": "Sanctuary Fortress",
},
},
{
"data": None,
"teleporter": {
"area": "Sky Temple Energy Controller",
"node": "Elevator to Temple Grounds",
"region": "Great Temple",
},
},
{
"data": None,
"teleporter": {
"area": "Sky Temple Gateway",
"node": "Elevator to Great Temple",
"region": "Temple Grounds",
},
},
{
"data": None,
"teleporter": {
"area": "Transport to Agon Wastes",
"node": "Elevator to Agon Wastes",
"region": "Temple Grounds",
},
},
{
"data": None,
"teleporter": {
"area": "Temple Transport B",
"node": "Elevator to Great Temple",
"region": "Temple Grounds",
},
},
{
"data": None,
"teleporter": {
"area": "Transport to Sanctuary Fortress",
"node": "Elevator to Sanctuary Fortress",
"region": "Temple Grounds",
},
},
{
"data": None,
"teleporter": {
"area": "Temple Transport A",
"node": "Elevator to Great Temple",
"region": "Temple Grounds",
},
},
{
"data": None,
"teleporter": {
"area": "Transport to Torvus Bog",
"node": "Elevator to Torvus Bog",
"region": "Temple Grounds",
},
},
{
"data": None,
"teleporter": {
"area": "Temple Transport C",
"node": "Elevator to Great Temple",
"region": "Temple Grounds",
},
},
{
"data": None,
"teleporter": {
"area": "Transport to Sanctuary Fortress",
"node": "Elevator to Sanctuary Fortress",
"region": "Torvus Bog",
},
},
{
"data": None,
"teleporter": {
"area": "Transport to Temple Grounds",
"node": "Elevator to Temple Grounds",
"region": "Torvus Bog",
},
},
{
"data": None,
"teleporter": {
"area": "Transport to Agon Wastes",
"node": "Elevator to Agon Wastes",
"region": "Torvus Bog",
},
},
],
"configurable_nodes": {
"Agon Wastes/Mining Plaza/Translator Gate": None,
"Agon Wastes/Mining Station A/Translator Gate": None,
"Great Temple/Temple Sanctuary/Transport A Translator Gate": None,
"Great Temple/Temple Sanctuary/Transport B Translator Gate": None,
"Great Temple/Temple Sanctuary/Transport C Translator Gate": None,
"Sanctuary Fortress/Reactor Core/Translator Gate": None,
"Sanctuary Fortress/Sanctuary Temple/Translator Gate": None,
"Temple Grounds/GFMC Compound/Translator Gate": None,
"Temple Grounds/Hive Access Tunnel/Translator Gate": None,
"Temple Grounds/Hive Transport Area/Translator Gate": None,
"Temple Grounds/Industrial Site/Translator Gate": None,
"Temple Grounds/Meeting Grounds/Translator Gate": None,
"Temple Grounds/Path of Eyes/Translator Gate": None,
"Temple Grounds/Temple Assembly Site/Translator Gate": None,
"Torvus Bog/Great Bridge/Translator Gate": None,
"Torvus Bog/Torvus Temple/Elevator Translator Scan": None,
"Torvus Bog/Torvus Temple/Translator Gate": None,
},
"starting_location": {"region": "Temple Grounds", "area": "Landing Site", "node": "Save Station"},
}
if shuffle_advanced:
for teleporter in state["teleporters"]:
if (
teleporter["teleporter"]["region"] == "Agon Wastes"
and teleporter["teleporter"]["node"] == "Elevator to Sanctuary Fortress"
and teleporter["teleporter"]["area"] == "Transport to Sanctuary Fortress"
):
teleporter["data"] = {
"area": "Agon Energy Controller",
"region": "Agon Wastes",
"node": "Door to Controller Access",
}
state["configurable_nodes"]["Temple Grounds/Hive Access Tunnel/Translator Gate"] = "violet"
VersionedPreset.with_preset(preset).save_to_file(tmp_path.joinpath("preset.rdvpreset"))
tmp_path.joinpath("state.json").write_text(json.dumps(state), "utf-8")
# Run
window = await tracker_window.TrackerWindow.create_new(tmp_path, preset)
skip_qtbot.add_widget(window)
# Assert
assert window.state_for_current_configuration() is not None
persisted_data = json.loads(tmp_path.joinpath("state.json").read_text("utf-8"))
assert persisted_data == state
window.reset()
window.persist_current_state()
persisted_data = json.loads(tmp_path.joinpath("state.json").read_text("utf-8"))
assert persisted_data != state
async def test_load_multi_starting_location(
skip_qtbot, tmp_path: Path, default_echoes_configuration, default_echoes_preset, mocker
):
preset = default_echoes_preset
new_start_loc = (
NodeIdentifier.create("Temple Grounds", "Landing Site", "Save Station"),
NodeIdentifier.create("Temple Grounds", "Temple Transport C", "Elevator to Great Temple"),
)
layout_config = dataclasses.replace(
default_echoes_configuration,
starting_location=dataclasses.replace(default_echoes_configuration.starting_location, locations=new_start_loc),
)
preset = dataclasses.replace(default_echoes_preset.fork(), configuration=layout_config)
mock_return = ("Temple Grounds/Temple Transport C/Elevator to Great Temple", True)
# Run
mock_get_item: MagicMock = mocker.patch("PySide6.QtWidgets.QInputDialog.getItem", return_value=mock_return)
window = await tracker_window.TrackerWindow.create_new(tmp_path, preset)
skip_qtbot.add_widget(window)
# Assert
mock_get_item.assert_called_once()
state = window.state_for_current_configuration()
assert state is not None
assert state.node.identifier == new_start_loc[1]
async def test_load_single_starting_location(
skip_qtbot, tmp_path: Path, default_echoes_configuration, default_echoes_preset
):
preset = default_echoes_preset
new_start_loc = (NodeIdentifier.create("Temple Grounds", "Temple Transport C", "Elevator to Great Temple"),)
layout_config = dataclasses.replace(
default_echoes_configuration,
starting_location=dataclasses.replace(default_echoes_configuration.starting_location, locations=new_start_loc),
)
preset = dataclasses.replace(default_echoes_preset.fork(), configuration=layout_config)
# Run
window = await tracker_window.TrackerWindow.create_new(tmp_path, preset)
skip_qtbot.add_widget(window)
# Assert
state = window.state_for_current_configuration()
assert state is not None
assert state.node.identifier == new_start_loc[0]
async def test_preset_without_starting_location(
skip_qtbot, tmp_path: Path, default_echoes_configuration, default_echoes_preset
):
preset = default_echoes_preset
new_start_loc = ()
layout_config = dataclasses.replace(
default_echoes_configuration,
starting_location=dataclasses.replace(default_echoes_configuration.starting_location, locations=new_start_loc),
)
preset = dataclasses.replace(default_echoes_preset.fork(), configuration=layout_config)
# Run
with pytest.raises(ValueError, match="Preset without a starting location"):
await tracker_window.TrackerWindow.create_new(tmp_path, preset)
| randovania/randovania | test/gui/test_tracker_window.py | test_tracker_window.py | py | 17,755 | python | en | code | 165 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "copy.copy",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "randovania.games.prime2.layout.translator_configuration.LayoutTranslatorRequirement.RANDOM",
"line_number": 27... |
74548599546 | import boto3
from botocore.exceptions import NoCredentialsError
def upload_to_aws(local_file, bucket, s3_file):
s3 = boto3.client('s3')
try:
s3.upload_file(local_file, bucket, s3_file)
print("Upload Successful")
return True
except FileNotFoundError:
print("The file was not found")
return False
except NoCredentialsError:
print("Credentials not available")
return False
if __name__ == "__main__":
uploaded = upload_to_aws('Screenshot (288).png', 'group-6-marxel-pictures', 'test.png') | HULKMARXEL/Group_6_AWS_project | localhost/S3.py | S3.py | py | 562 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "boto3.client",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "botocore.exceptions.NoCredentialsError",
"line_number": 13,
"usage_type": "name"
}
] |
19399824149 | from typing import List
# 438. 找到字符串中所有字母异位词
# https://leetcode-cn.com/problems/find-all-anagrams-in-a-string/
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
p_vec = self.to_vector(p)
s_vec = self.to_vector(s[0: len(p) - 1])
# print(s_vec, p_vec)
i = 0
ans = []
for j in range(len(p) - 1, len(s)):
s_vec[ord(s[j]) - ord('a')] += 1
if p_vec == s_vec:
# print(i)
ans.append(i)
s_vec[ord(s[i]) - ord('a')] -= 1
i += 1
return ans
# 用不到
def compare(self, p_vec, word):
d = dict()
for i, c in enumerate(word):
if c in d:
d[c] += 1
else:
d[c] = 1
if c not in p_vec:
return False, i + 1
elif d[c] > p_vec[c]:
return False, 1
return p_vec == d, 1
def to_vector(self, word):
# d = dict()
# for each in word:
# if each in d:
# d[each] += 1
# else:
# d[each] = 1
# return d
vec = [0] * 26
for each in word:
vec[ord(each) - ord('a')] += 1
return vec
s = "abab"
p = "ab"
print(s)
r = Solution().findAnagrams(s, p)
print(r)
| Yigang0622/LeetCode | findAnagrams.py | findAnagrams.py | py | 1,375 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
}
] |
71643512507 | import pygame
from pygame.sprite import Sprite
class Button(Sprite):
def __init__(self, ai_settings, screen, msg, position, function_num):
super(Button, self).__init__()
self.screen = screen
self.screen_rect = screen.get_rect()
self.ai_settings = ai_settings
self.function_num = function_num
# 设置按钮的尺寸和其他属性
self.width, self.height = 160, 50
self.button_color = (230, 230, 230)
self.text_color = self.ai_settings.BLACK
self.font = pygame.font.SysFont(None, 48)
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.topleft = position
# 按钮的标签只需创建一次
self.prep_msg(msg)
def prep_msg(self, msg):
"""
将msg渲染成图像,并使其在按钮上居中
:param msg:
:return:
"""
self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def is_over(self):
"""判断鼠标是否在按钮上"""
point_x, point_y = pygame.mouse.get_pos()
x, y = self.rect.x, self.rect.y
in_x = x < point_x < x + self.width
in_y = y < point_y < y + self.height
return in_x and in_y
def update(self):
button_color = self.button_color
if self.is_over():
if self.ai_settings.pressed[0] and self.ai_settings.is_upped:
button_color = (190, 190, 190)
self.ai_settings.mouse_state = self.function_num
self.ai_settings.is_upped = False
else:
button_color = (220, 220, 220)
self.screen.fill(button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect)
# r1rect = pygame.draw.rect(screen, BLACK, (20, 52.25, 160, 50), 1)
# r2rect = pygame.draw.rect(screen, BLACK, (20, 206.75, 160, 50), 1)
# r3rect = pygame.draw.rect(screen, BLACK, (20, 361.25, 160, 50), 1)
# r4rect = pygame.draw.rect(screen, BLACK, (20, 515.75, 160, 50), 1)
# r5rect = pygame.draw.rect(screen, BLACK, (205, 52.25, 160, 50), 1)
# r6rect = pygame.draw.rect(screen, BLACK, (205, 206.75, 160, 50), 1)
# r7rect = pygame.draw.rect(screen, BLACK, (205, 361.25, 160, 50), 1)
# r8rect = pygame.draw.rect(screen, BLACK, (205, 515.75, 160, 50), 1)
#
# f1 = pygame.freetype.Font('德彪钢笔行书字库.TTF', 36)
# f1rect = f1.render_to(screen, (40, 58), "上一层", fgcolor=BLACK, size=40)
# f2rect = f1.render_to(screen, (40, 212.5), "下一层", fgcolor=BLACK, size=40)
# f3rect = f1.render_to(screen, (57, 367), "删除", fgcolor=BLACK, size=40)
# f4rect = f1.render_to(screen, (57, 521.5), "完成", fgcolor=BLACK, size=40)
# f5rect = f1.render_to(screen, (225, 58), "长砖块", fgcolor=BLACK, size=40)
# f6rect = f1.render_to(screen, (225, 212.5), "短砖块", fgcolor=BLACK, size=40)
# f7rect = f1.render_to(screen, (225, 367), "厚砖块", fgcolor=BLACK, size=40)
# f8rect = f1.render_to(screen, (225, 521.5), "其他块", fgcolor=BLACK, size=40)
| Karllzy/Architect | button.py | button.py | py | 3,292 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "pygame.sprite.Sprite",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect"... |
41554384385 | import sys
import nimfa
import numpy as np
import scipy.sparse as sp
import pandas as pd
import gc
import os
import math
import mysql.connector
import random
import collections
from scipy.sparse.linalg import svds
from sklearn.model_selection import KFold
from multiprocessing import Pool
# import module
import machine_learning as ml
import evaluation as ev
import parsonal_value as pv
#np.set_printoptions(threshold=np.inf)
# Default values
CPU = 1
dataset = "movie.review"
eta0 = 0.45
repeate = 10
sepalate = 1
attribute = 5
def learning(method, train_matrix, train_index, data, user_list, item_list):
if method == "SVD":
u, s, vt = svds(train_matrix, k=attribute)
np.savetxt("u.csv", u, delimiter=",")
np.savetxt("s.csv", s, delimiter=",")
np.savetxt("vt.csv", vt, delimiter=",")
s_diag_matrix = np.diag(s)
return u
elif method == "ML3_liner":
u, v = pv.rmrate(train_index, data, user_list, item_list, attribute)
R = ml.pv_ml3(train_matrix, eta0, u, v, attribute)
return u
def learning2(method, train_matrix, train_index, data, user_list, item_list, u2):
if method == "SVD":
u, s, vt = svds(train_matrix, k=attribute)
s_diag_matrix = np.diag(s)
return np.dot(np.dot(u2, s_diag_matrix), vt)
elif method == "ML3_liner":
u, v = pv.rmrate(train_index, data, user_list, item_list, attribute)
R = ml.pv_ml3(train_matrix, eta0, u, v, attribute)
return np.dot(np.dot(u2, R), v.T)
#=======================================================================================================================
# Name : makeMatrix
# Argument : ALL ... All data from numpy
# Purpose ... porpese index
# Role : make user-item matrix from evaluation format
#=======================================================================================================================
def makeMatrix(data, index, user_list, item_list):
# lil matrix is a sparse matrix format (eliminated zero values)
matrix = sp.lil_matrix((len(user_list), len(item_list)))
# translate numpy into Dataframe
data = pd.DataFrame(assign_index(data, index))
for line in data.itertuples():
# line[1] ... userID
# line[2] ... itemID
# line[3] ... rating value
matrix[line[1], line[2]] = line[3]
return matrix.tocsr()
#=======================================================================================================================
# Name : assign_index
# Argument : ALL ... All data from numpy
# Purpose ... purpose index
# Role : assign separated index data into numpy format
#=======================================================================================================================
def assign_index(ALL, Purpose):
# attribute + 3 equals dataset format; user_ID, item_ID, time, attributes
# Assigned ... all data in numpy format
Assigned = np.zeros((len(Purpose), attribute + 3)).astype(np.int64)
for i, j in enumerate(Purpose):
Assigned[i] = ALL[j]
return Assigned
#=======================================================================================================================
# Name : users_in_testdata
# Argument : n ... top-N recommendation count
# test_matrix ... matrix witch elements include only in the test data
# user_list ... user's ID
# Role : make users list (the number of evaluations in test data is more than n)
#=======================================================================================================================
def users_in_testdata(n, test_matrix, user_list):
test_user_list = np.zeros(len(user_list)).astype(np.int64)
test_matrix = test_matrix.todense()
for i,t in enumerate(test_matrix):
if(t[t.nonzero()].size >= n):
test_user_list[i] = 1
return test_user_list
def nDCG(n, pred, test, user_list, item_list, count_dict):
# initialization
nDCG = np.zeros(len(user_list))
D02 = 0
D04 = 0
D06 = 0
D08 = 0
D10 = 0
l = 0
count = np.array([0.,0.,0.,0.,0.,0.,0.])
count_c = np.array([0,0,0,0,0,0,0])
for i,p in enumerate(pred):
# user : i
# predicted score : p
# initialize DCG and iDCG
DCG = 0.
iDCG = 0.
# extract test data for user i
t = test[i]
# ground_truth : non zero list for test data
ground_truth = t.nonzero()
# predicted score corresponding to test data
p_test = p[ground_truth]
# ranking of predicted score
ranking_p_arg = np.argsort(p_test)[::-1]
# item ID of test data
test_item = item_list[ground_truth]
# test data rating
truth = t[ground_truth]
# ranking of test data's ratings
ranking_t = np.sort(truth)[::-1]
# the number of evaluation in test data more than n
if len(ranking_p_arg) >= n:
# j : recommendation result of top-N
# k : item ID in test data
for j in range(n):
for k in range(len(test_item)):
# calculate DCG
if k == ranking_p_arg[j]:
if j == 0:
DCG = truth[k]
else:
DCG = DCG + (truth[k] / math.log(j + 1, 2))
# calculate iDCG
if j == 0:
iDCG = ranking_t[j]
else:
iDCG = iDCG + (ranking_t[j] / math.log(j + 1, 2))
# calc111ulate nDCG
nDCG[i] = DCG / iDCG
if nDCG[i] <= 0.2:
D02 = D02 + 1
elif nDCG[i] <= 0.4:
D04 = D04 + 1
elif nDCG[i] <= 0.6:
D06 = D06 + 1
elif nDCG[i] <= 0.8:
D08 = D08 + 1
else:
D10 = D10 + 1
if len(ranking_p_arg) <= 3:
count[0] = count[0] + nDCG[i]
count_c[0] = count_c[0] + 1
elif len(ranking_p_arg) <= 6:
count[1] = count[1] + nDCG[i]
count_c[1] = count_c[1] + 1
elif len(ranking_p_arg) <= 10:
count[2] = count[2] + nDCG[i]
count_c[2] = count_c[2] + 1
elif len(ranking_p_arg) <= 20:
count[3] = count[3] + nDCG[i]
count_c[3] = count_c[3] + 1
elif len(ranking_p_arg) <= 30:
count[4] = count[4] + nDCG[i]
count_c[4] = count_c[4] + 1
elif len(ranking_p_arg) <= 40:
count[5] = count[5] + nDCG[i]
count_c[5] = count_c[5] + 1
else:
count[6] = count[6] + nDCG[i]
count_c[6] = count_c[6] + 1
count = count / count_c
return nDCG, np.mean(nDCG), np.std(nDCG), np.max(nDCG), np.min(nDCG), D02, D04, D06, D08, D10, count, count_c
def precision(n, pred, test, user_list, item_list, count_dict):
# initialization
precision = np.zeros(len(user_list))
recall = np.zeros(len(user_list))
p00 = 0
p033 = 0
p066 = 0
p100 = 0
r02 = 0
r04 = 0
r06 = 0
r08 = 0
r10 = 0
count_pre = np.array([0.,0.,0.,0.,0.,0.,0.])
count_c_pre = np.array([0,0,0,0,0,0,0])
count_rec = np.array([0.,0.,0.,0.,0.,0.,0.])
count_c_rec = np.array([0,0,0,0,0,0,0])
count_recom = np.array([0.,0.,0.,0.,0.,0.,0.])
count_recom2 = np.array([0.,0.,0.,0.,0.,0.,0.])
x = np.array( [] )
for i, p in enumerate(pred):
#initialization
tp = 0
fp = 0
truth_all = 0
t = test[i]
ground_truth = t.nonzero()
ground_truth2 = p.nonzero()
ranking_p_arg = np.argsort(p[ground_truth])[::-1]
ranking_p_arg2 = np.argsort(p[ground_truth2])[::-1]
test_item = item_list[ground_truth]
if len(ranking_p_arg2) >= 3:
print(i,item_list[ranking_p_arg2[0:3]])
if i == 0:
x = np.append( x, ranking_p_arg2 )
else:
for v in range(n):
if count_dict[item_list[ranking_p_arg2[v]]] <= 3:
count_recom[0] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[0] += 1
elif count_dict[item_list[ranking_p_arg2[v]]] <= 6:
count_recom[1] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[1] += 1
elif count_dict[item_list[ranking_p_arg2[v]]] <= 10:
count_recom[2] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[2] += 1
elif count_dict[item_list[ranking_p_arg2[v]]] <= 20:
count_recom[3] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[3] += 1
elif count_dict[item_list[ranking_p_arg2[v]]] <= 30:
count_recom[4] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[4] += 1
elif count_dict[item_list[ranking_p_arg2[v]]] <= 40:
count_recom[5] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[5] += 1
else:
count_recom[6] += 1
if ranking_p_arg2[v] not in x:
x = np.append( x, ranking_p_arg2[v] )
count_recom2[6] += 1
if len(ranking_p_arg) >= 3:
# true ratings
truth = t[ground_truth]
for j in range(n):
for k in range(len(test_item)):
if k == ranking_p_arg[j]:
# good impression for items
if truth[k] >= 4.:
tp = tp + 1.0
# bad impression
else:
fp = fp + 1.0
# all items having good impression
for j in range(len(truth)):
if truth[j] >= 4.0:
truth_all += 1
# calculate precision
precision[i] = tp / (tp + fp)
# calculate recall
if truth_all > 0:
recall[i] = tp / truth_all
if precision[i] == 0:
p00 = p00 + 1
elif precision[i] < 0.4:
p033 = p033 + 1
elif precision[i] < 0.7:
p066 = p066 + 1
else:
p100 = p100 + 1
if recall[i] <= 0.2:
r02 = r02 + 1
elif recall[i] <= 0.4:
r04 = r04 + 1
elif recall[i] <= 0.6:
r06 = r06 + 1
elif recall[i] <= 0.8:
r08 = r08 + 1
else:
r10 = r10 + 1
if len(ranking_p_arg) <= 3:
count_pre[0] = count_pre[0] + precision[i]
count_c_pre[0] = count_c_pre[0] + 1
elif len(ranking_p_arg) <= 6:
count_pre[1] = count_pre[1] + precision[i]
count_c_pre[1] = count_c_pre[1] + 1
elif len(ranking_p_arg) <= 10:
count_pre[2] = count_pre[2] + precision[i]
count_c_pre[2] = count_c_pre[2] + 1
elif len(ranking_p_arg) <= 20:
count_pre[3] = count_pre[3] + precision[i]
count_c_pre[3] = count_c_pre[3] + 1
elif len(ranking_p_arg) <= 30:
count_pre[4] = count_pre[4] + precision[i]
count_c_pre[4] = count_c_pre[4] + 1
elif len(ranking_p_arg) <= 40:
count_pre[5] = count_pre[5] + precision[i]
count_c_pre[5] = count_c_pre[5] + 1
else:
count_pre[6] = count_pre[6] + precision[i]
count_c_pre[6] = count_c_pre[6] + 1
if len(ranking_p_arg) <= 3:
count_rec[0] = count_rec[0] + recall[i]
count_c_rec[0] = count_c_rec[0] + 1
elif len(ranking_p_arg) <= 6:
count_rec[1] = count_rec[1] + recall[i]
count_c_rec[1] = count_c_rec[1] + 1
elif len(ranking_p_arg) <= 10:
count_rec[2] = count_rec[2] + recall[i]
count_c_rec[2] = count_c_rec[2] + 1
elif len(ranking_p_arg) <= 20:
count_rec[3] = count_rec[3] + recall[i]
count_c_rec[3] = count_c_rec[3] + 1
elif len(ranking_p_arg) <= 30:
count_rec[4] = count_rec[4] + recall[i]
count_c_rec[4] = count_c_rec[4] + 1
elif len(ranking_p_arg) <= 40:
count_rec[5] = count_rec[5] + recall[i]
count_c_rec[5] = count_c_rec[5] + 1
else:
count_rec[6] = count_rec[6] + recall[i]
count_c_rec[6] = count_c_rec[6] + 1
count_pre = count_pre / count_c_pre
count_rec = count_rec / count_c_rec
return precision,recall,precision.mean(), precision.std(), precision.max(), precision.min(), recall.mean(), recall.std(), recall.max(), recall.min(), p00, p033, p066, p100, r02, r04, r06, r08, r10, count_pre, count_c_pre, count_rec, count_c_rec, count_recom , count_recom2
def search_lt_n(n, test_data):
lt_n = 0
for t in test_data:
if t[t.nonzero()].shape[0] < n:
lt_n = lt_n + 1
return lt_n
def calculate(method):
a=0.0
b=0.0
c=0.0
eta0 = 0.45
set_data = sys.argv[1]
setting = sys.argv[2]
Pat3_ave = np.zeros((10, 3))
Pat3_std = np.zeros((10, 3))
Pat3_max = np.zeros((10, 3))
Pat3_min = np.zeros((10, 3))
Rat3_ave = np.zeros((10,3))
Rat3_std = np.zeros((10,3))
Rat3_max = np.zeros((10,3))
Rat3_min = np.zeros((10,3))
nDCGat3_ave = np.zeros((10,3))
nDCGat3_std = np.zeros((10,3))
nDCGat3_max = np.zeros((10,3))
nDCGat3_min = np.zeros((10,3))
P0 = np.zeros((10,3)).astype(np.int64)
P03 = np.zeros((10,3)).astype(np.int64)
P06 = np.zeros((10,3)).astype(np.int64)
P10 = np.zeros((10,3)).astype(np.int64)
D02 = np.zeros((10,3)).astype(np.int64)
D04 = np.zeros((10,3)).astype(np.int64)
D06 = np.zeros((10,3)).astype(np.int64)
D08 = np.zeros((10,3)).astype(np.int64)
D10 = np.zeros((10,3)).astype(np.int64)
R02 = np.zeros((10,3)).astype(np.int64)
R04 = np.zeros((10,3)).astype(np.int64)
R06 = np.zeros((10,3)).astype(np.int64)
R08 = np.zeros((10,3)).astype(np.int64)
R10 = np.zeros((10,3)).astype(np.int64)
lt_3 = np.zeros((10,3)).astype(np.int64)
c_pre = np.array([0.,0.,0.,0.,0.,0.,0.])
c_rec = np.array([0.,0.,0.,0.,0.,0.,0.])
c_dcg = np.array([0.,0.,0.,0.,0.,0.,0.])
pre_count = np.array([0,0,0,0,0,0,0])
rec_count = np.array([0,0,0,0,0,0,0])
dcg_count = np.array([0,0,0,0,0,0,0])
if setting == '4':
#setting 4
user_Mu = np.loadtxt("./genre"+ set_data +"/data/d11/user.csv",delimiter=",").astype(np.int64)
user_Mv = np.loadtxt("./genre"+ set_data +"/data/d22/user.csv",delimiter=",").astype(np.int64)
test_user = np.loadtxt("./genre"+ set_data +"/data/d12/user.csv",delimiter=",").astype(np.int64)
item_Mu = np.loadtxt("./genre"+ set_data +"/data/d11/item.csv",delimiter=",").astype(np.int64)
item_Mv = np.loadtxt("./genre"+ set_data +"/data/d22/item.csv",delimiter=",").astype(np.int64)
test_item = np.loadtxt("./genre"+ set_data +"/data/d12/item.csv",delimiter=",").astype(np.int64)
data_Mu = np.loadtxt("./genre"+ set_data +"/data/d11/data.csv",delimiter=",").astype(np.int64)
data_Mv = np.loadtxt("./genre"+ set_data +"/data/d22/data.csv",delimiter=",").astype(np.int64)
test_data = np.loadtxt("./genre"+ set_data +"/data/d12/data.csv",delimiter=",").astype(np.int64)
train_index = np.loadtxt("./genre"+ set_data +"/data/d11/index.csv",delimiter=",").astype(np.int64)
train_index2 = np.loadtxt("./genre"+ set_data +"/data/d22/index.csv",delimiter=",").astype(np.int64)
train_index3 = np.loadtxt("./genre"+ set_data +"/data/d12/index.csv",delimiter=",").astype(np.int64)
elif setting == '5':
#setting 5
user_Mu = np.loadtxt("./genre"+ set_data +"/data/d22/user.csv",delimiter=",").astype(np.int64)
user_Mv = np.loadtxt("./genre"+ set_data +"/data/d11/user.csv",delimiter=",").astype(np.int64)
test_user = np.loadtxt("./genre"+ set_data +"/data/d21/user.csv",delimiter=",").astype(np.int64)
item_Mu = np.loadtxt("./genre"+ set_data +"/data/d22/item.csv",delimiter=",").astype(np.int64)
item_Mv = np.loadtxt("./genre"+ set_data +"/data/d11/item.csv",delimiter=",").astype(np.int64)
test_item = np.loadtxt("./genre"+ set_data +"/data/d21/item.csv",delimiter=",").astype(np.int64)
data_Mu = np.loadtxt("./genre"+ set_data +"/data/d22/data.csv",delimiter=",").astype(np.int64)
data_Mv = np.loadtxt("./genre"+ set_data +"/data/d11/data.csv",delimiter=",").astype(np.int64)
test_data = np.loadtxt("./genre"+ set_data +"/data/d21/data.csv",delimiter=",").astype(np.int64)
train_index = np.loadtxt("./genre"+ set_data +"/data/d22/index.csv",delimiter=",").astype(np.int64)
train_index2 = np.loadtxt("./genre"+ set_data +"/data/d11/index.csv",delimiter=",").astype(np.int64)
train_index3 = np.loadtxt("./genre"+ set_data +"/data/d21/index.csv",delimiter=",").astype(np.int64)
else:
print("Select setting.")
sys.exit()
for i in range(repeate):
j = 0
Mu_matrix = makeMatrix(data_Mu, train_index, user_Mu, item_Mu)
u = learning(method, Mu_matrix, train_index, data_Mu, user_Mu, item_Mu)
Mv_matrix = makeMatrix(data_Mv, train_index2, user_Mv, item_Mv)
pred = learning2(method, Mv_matrix, train_index2, data_Mv, user_Mv, item_Mv, u)
np.savetxt("./result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/pred_temp.csv", pred, delimiter=",")
test_matrix = makeMatrix(test_data, train_index3, test_user, test_item)
test_users = users_in_testdata(3, test_matrix, test_user)
# calculating precision, recall, and nDCG using "pred"
lt_3[i,j] = search_lt_n(3, test_matrix)
count_dict = collections.Counter(test_data[:,1])
pre,rec,Pat3_ave[i,j], Pat3_std[i,j], Pat3_max[i,j], Pat3_min[i,j], Rat3_ave[i,j], Rat3_std[i,j], Rat3_max[i,j], Rat3_min[i,j], P0[i,j], P03[i,j], P06[i,j], P10[i,j], R02[i,j], R04[i,j], R06[i,j], R08[i,j], R10[i,j], new_c_pre, new_pre_count, new_c_rec, new_rec_count, recom, recom2 = precision(3, pred, np.array(test_matrix.todense()), test_user, test_item,count_dict)
dcg, nDCGat3_ave[i,j], nDCGat3_std[i,j], nDCGat3_max[i,j], nDCGat3_min[i,j], D02[i,j], D04[i,j], D06[i,j], D08[i,j], D10[i,j], new_c_dcg, new_dcg_count = nDCG(3, pred, np.array(test_matrix.todense()), test_user, test_item,count_dict)
c_pre = c_pre + new_c_pre
c_rec = c_rec + new_c_rec
c_dcg = c_dcg + new_c_dcg
pre_count = pre_count + new_pre_count
rec_count = rec_count + new_rec_count
dcg_count = dcg_count + new_dcg_count
print("count:" + str(i) + ", precision=" + str(np.mean(pre[test_users.nonzero()])) + ", recall=" + str(np.mean(rec[test_users.nonzero()])) +", nDCG=" + str(np.mean(dcg[test_users.nonzero()])))
a += np.mean(pre[test_users.nonzero()])
b += np.mean(rec[test_users.nonzero()])
c += np.mean(dcg[test_users.nonzero()])
#del pred
#del train_matrix
#del test_matrix
gc.collect()
j = j + 1
c_pre = c_pre / 10
c_rec = c_rec / 10
c_dcg = c_dcg / 10
pre_count = pre_count / 10
rec_count = rec_count / 10
dcg_count = dcg_count / 10
print(c_pre)
print("Precision AVE : " + str(a / 10))
print("Recall AVE : " + str(b / 10))
print("nDCG AVE : " + str(c / 10))
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Pat3_ave.npy", Pat3_ave)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Pat3_std.npy", Pat3_std)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Pat3_max.npy", Pat3_max)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Pat3_min.npy", Pat3_min)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Rat3_ave.npy", Rat3_ave)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Rat3_std.npy", Rat3_std)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Rat3_max.npy", Rat3_max)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/Rat3_min.npy", Rat3_min)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/nDCGat3_ave.npy", nDCGat3_ave)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/nDCGat3_std.npy", nDCGat3_std)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/nDCGat3_max.npy", nDCGat3_max)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/nDCGat3_min.npy", nDCGat3_min)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/P00.npy", P0)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/P03.npy", P03)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/P06.npy", P06)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/P10.npy", P10)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/D02.npy", D02)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/D04.npy", D04)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/D06.npy", D06)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/D08.npy", D08)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/D10.npy", D10)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/R02.npy", R02)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/R04.npy", R04)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/R06.npy", R06)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/R08.npy", R08)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/R10.npy", R10)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/lt_3.npy", lt_3)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/c_pre.npy", c_pre)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/c_rec.npy", c_rec)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/c_dcg.npy", c_dcg)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/pre_count.npy", pre_count)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/rec_count.npy", rec_count)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/dcg_count.npy", dcg_count)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/recom.npy", recom)
np.save("result/movie.review/" + method + "/genre"+ set_data +"_set"+ setting +"/recom2.npy", recom2)
if __name__ == "__main__":
# Pool : the number of CPU.
p = Pool(CPU)
'''
methods = ["SVD", "NMF", "RMrate_liner", "D1_liner", "D2_liner", "D3_liner", "D4_liner", "D5_liner", "RMrate_square",
"D1_square", "D2_square", "D3_square", "D4_square", "D5_square", "ML1_liner", "ML2_liner", "ML3_liner",
"ML1_square", "ML2_square", "ML3_square"]
'''
methods = ["SVD","ML3_liner"]
p.map(calculate,methods)
print("Program completed...") | Saito2982/CrossDomain | plot_domain.py | plot_domain.py | py | 22,614 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scipy.sparse.linalg.svds",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
... |
12258821097 | import math
import random
import time
import carla
import cv2
import numpy as np
actor_list = []
def pure_pursuit(tar_location, v_transform):
L = 2.875
yaw = v_transform.rotation.yaw * (math.pi / 180)
x = v_transform.location.x - L / 2 * math.cos(yaw)
y = v_transform.location.y - L / 2 * math.sin(yaw)
dx = tar_location.x - x
dy = tar_location.y - y
ld = math.sqrt(dx ** 2 + dy ** 2)
alpha = math.atan2(dy, dx) - yaw
delta = math.atan(2 * math.sin(alpha) * L / ld) * 180 / math.pi
steer = delta/90
if steer > 1:
steer = 1
elif steer < -1:
steer = -1
return steer
def img_process(data):
img = np.array(data.raw_data)
img = img.reshape((1080, 1920, 4))
img = img[:, :, :3]
cv2.imwrite('car.png', img)
# cv2.imshow('', img)
# cv2.waitKey(1)
pass
def callback(event):
print("碰撞")
def callback2(event):
print("穿越车道")
try:
client = carla.Client('localhost', 2000)
client.set_timeout(5.0)
world = client.get_world()
map = world.get_map()
blueprint_library = world.get_blueprint_library()
v_bp = blueprint_library.filter("model3")[0]
spawn_point = random.choice(world.get_map().get_spawn_points())
vehicle = world.spawn_actor(v_bp, spawn_point)
actor_list.append(vehicle)
# Find the blueprint of the sensor.
blueprint = blueprint_library.find('sensor.camera.rgb')
# Modify the attributes of the blueprint to set image resolution and field of view.
blueprint.set_attribute('image_size_x', '1920')
blueprint.set_attribute('image_size_y', '1080')
blueprint.set_attribute('fov', '110')
# Set the time in seconds between sensor captures
blueprint.set_attribute('sensor_tick', '1.0')
transform = carla.Transform(carla.Location(x=0.8, z=1.7))
sensor = world.spawn_actor(blueprint, transform, attach_to=vehicle)
actor_list.append(sensor)
sensor.listen(lambda data: img_process(data))
blueprint_collision = blueprint_library.find('sensor.other.collision')
transform = carla.Transform(carla.Location(x=0.8, z=1.7))
sensor_collision = world.spawn_actor(blueprint_collision, transform, attach_to=vehicle)
actor_list.append(sensor_collision)
sensor_collision.listen(callback)
blueprint_lane_invasion = blueprint_library.find('sensor.other.lane_invasion')
transform = carla.Transform(carla.Location(x=0.8, z=1.7))
sensor_lane_invasion = world.spawn_actor(blueprint_lane_invasion, transform, attach_to=vehicle)
actor_list.append(sensor_lane_invasion)
sensor_lane_invasion.listen(callback2)
vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=0))
while True:
waypoint01 = map.get_waypoint(vehicle.get_location(), project_to_road=True,
lane_type=(carla.LaneType.Driving | carla.LaneType.Sidewalk))
v_trans = vehicle.get_transform()
waypoints = waypoint01.next(8.0)
waypoint02 = waypoints[0]
tar_loc = waypoint02.transform.location
steer = pure_pursuit(tar_loc, v_trans)
vehicle.apply_control(carla.VehicleControl(throttle=0.6, steer=steer))
time.sleep(0.02)
finally:
for actor in actor_list:
actor.destroy()
print("结束") | DYSfu/Carla_demo | demo3.py | demo3.py | py | 3,290 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "math.pi",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "math.cos",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 18,
... |
74472010426 | import unittest
from car_simulation import Car, Field, main
from unittest.mock import patch
from io import StringIO
class TestCar(unittest.TestCase):
def test_change_direction(self):
car = Car("TestCar", 0, 0, "N", "F")
car.change_direction("R")
self.assertEqual(car.direction, "E")
def test_move_within_field(self):
field = Field(5, 5)
car = Car("TestCar", 2, 2, "N", "F")
car.move(field)
self.assertEqual((car.x, car.y), (2, 3))
def test_move_out_of_bounds(self):
field = Field(5, 5)
car = Car("TestCar", 4, 4, "E", "F")
car.move(field)
self.assertEqual((car.x, car.y), (4, 4)) # Should not move out of bounds
def test_execute_commands(self):
field = Field(5, 5)
car = Car("TestCar", 0, 0, "N", "F")
car.execute_commands(field)
self.assertEqual(car.direction, "N")
self.assertEqual((car.x, car.y), (0, 1))
def test_execute_empty_commands(self):
field = Field(5, 5)
car = Car("TestCar", 0, 0, "N", "")
car.execute_commands(field)
self.assertEqual((car.x, car.y), (0, 0)) # Should not move with empty commands
def test_get_status(self):
car = Car("TestCar", 3, 3, "W", "FFL")
status = car.get_status()
self.assertEqual(status, "TestCar, (3, 3) W, FFL")
def test_collide(self):
car = Car("TestCar", 2, 2, "S", "F")
car.collid(1, "AnotherCar")
self.assertTrue(car.collided)
self.assertEqual(car.step, 1)
self.assertEqual(car.collided_with, "AnotherCar")
class TestField(unittest.TestCase):
def test_add_car_within_field(self):
field = Field(5, 5)
car = Car("TestCar", 1, 1, "N", "F")
field.add_car(car)
self.assertIn(car, field.cars)
def test_add_car_out_of_bounds(self):
field = Field(5, 5)
car = Car("TestCar", 6, 6, "N", "F")
field.add_car(car)
self.assertNotIn(car, field.cars)
def test_add_car_collision(self):
field = Field(5, 5)
car1 = Car("Car1", 2, 2, "N", "F")
car2 = Car("Car2", 2, 2, "S", "F")
field.add_car(car1)
field.add_car(car2)
self.assertNotIn(car2, field.cars) # Car2 should not be added due to collision
def test_is_within_field(self):
field = Field(5, 5)
self.assertTrue(field.is_within_field(2, 3))
self.assertFalse(field.is_within_field(6, 6))
class TestCarSimulation(unittest.TestCase):
@patch("builtins.input", side_effect=["10 10", "1", "A", "1 2 N", "FFRFFFFRRL", "2", "2"])
def test_simulation_with_single_car(self, mock_input):
with patch("sys.stdout", new_callable=StringIO) as mock_stdout:
main()
output = mock_stdout.getvalue().strip()
self.assertIn("Your current list of cars are:", output)
self.assertIn("- A, (1, 2) N, FFRFFFFRRL", output)
self.assertIn("After simulation, the result is:", output)
self.assertIn("- A, (5, 4) S", output)
@patch("builtins.input", side_effect=["10 10", "1", "A", "1 2 N", "FFRFFFFRRL", "1", "B", "7 8 W", "FFLFFFFFFF", "2", "2"])
def test_simulation_with_multiple_car(self, mock_input):
with patch("sys.stdout", new_callable=StringIO) as mock_stdout:
main()
output = mock_stdout.getvalue().strip()
self.assertIn("Your current list of cars are:", output)
self.assertIn("- A, (1, 2) N, FFRFFFFRRL", output)
self.assertIn("- B, (7, 8) W, FFLFFFFFFF", output)
self.assertIn("After simulation, the result is:", output)
self.assertIn("- A, collides with B at (5, 4) at step 7", output)
self.assertIn("- B, collides with A at (5, 4) at step 7", output)
if __name__ == "__main__":
unittest.main()
| LiSheng-Chris/car-simulation | car_simulation_test.py | car_simulation_test.py | py | 3,835 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "car_simulation.Car",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "car_simulation.Field",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "car_simu... |
7649624460 | # convolution 계산 함수
import numpy as np
import pycuda.autoinit
from pycuda.compiler import SourceModule
from pycuda import gpuarray, tools
import pycuda.driver as cuda
class padding():
# CUDA Limit size
cu_lim = 32
def __init__(self,D,K,mode='vaild'):
# D : Data, K = kernel,
kw = int(K.shape[0]) # kernel width
kh = int(K.shape[1]) # kernel height
# size setting (padding)
if mode == 'vaild':
aw = D.shape[0]-kw+1
ah = D.shape[1]-kh+1
P = D
elif mode == 'same':
D = D.astype(np.float32)
aw = int(D.shape[0])
ah = int(D.shape[1])
if (aw % self.cu_lim == 0):
aw_n = int(aw/self.cu_lim)
else :
aw_n = int(aw/self.cu_lim +1)
if (ah % self.cu_lim == 0):
ah_n = int(ah/self.cu_lim)
else :
ah_n = int(ah/self.cu_lim +1)
# result size
P = np.zeros([aw+kw-1,ah+kh-1]).astype(np.float32)
# Module
mod = SourceModule(open("CUDAKernelStudy\\padding.cu", "r", encoding="utf-8").read())
cu_pad = mod.get_function("padding")
# allocate memory on device
d_gpu = cuda.mem_alloc(D.nbytes)
p_gpu = cuda.mem_alloc(P.nbytes)
# memory copy (host to device)
cuda.memcpy_htod(d_gpu, D)
cuda.memcpy_htod(p_gpu, P)
kw32 = np.int32(kw)
kh32 = np.int32(kh)
cusiz = np.int32(self.cu_lim)
# padding by CUDA
cu_pad(d_gpu,kw32,kh32,cusiz,p_gpu,block=(self.cu_lim,self.cu_lim,1),grid=(aw_n,ah_n,1))
# memory copy (device to host)
cuda.memcpy_dtoh(P, p_gpu)
d_gpu.free()
p_gpu.free()
self.D = D
self.P = P
self.C = np.zeros([aw,ah]) | JUHYUKo3o/CUDAKernelStudy | padding.py | padding.py | py | 1,964 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.float32",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "pycuda.compiler.Sou... |
32636741250 | import six
from c7n_azure.actions.tagging import Tag, AutoTagUser, RemoveTag, TagTrim, TagDelayedAction
from c7n_azure.actions.delete import DeleteAction
from c7n_azure.filters import (MetricFilter, TagActionFilter,
DiagnosticSettingsFilter, PolicyCompliantFilter)
from c7n_azure.provider import resources
from c7n_azure.query import QueryResourceManager, QueryMeta, ChildResourceManager, TypeInfo, \
ChildTypeInfo, TypeMeta
from c7n_azure.utils import ResourceIdParser
from c7n.utils import local_session
@six.add_metaclass(TypeMeta)
class ArmTypeInfo(TypeInfo):
# api client construction information for ARM resources
id = 'id'
name = 'name'
diagnostic_settings_enabled = True
default_report_fields = (
'name',
'location',
'resourceGroup'
)
@resources.register('armresource')
@six.add_metaclass(QueryMeta)
class ArmResourceManager(QueryResourceManager):
class resource_type(ArmTypeInfo):
service = 'azure.mgmt.resource'
client = 'ResourceManagementClient'
enum_spec = ('resources', 'list', None)
def augment(self, resources):
for resource in resources:
if 'id' in resource:
resource['resourceGroup'] = ResourceIdParser.get_resource_group(resource['id'])
return resources
def get_resources(self, resource_ids):
resource_client = self.get_client('azure.mgmt.resource.ResourceManagementClient')
session = local_session(self.session_factory)
data = [
resource_client.resources.get_by_id(rid, session.resource_api_version(rid))
for rid in resource_ids
]
return [r.serialize(True) for r in data]
@staticmethod
def register_arm_specific(registry, _):
for resource in registry.keys():
klass = registry.get(resource)
if issubclass(klass, ArmResourceManager):
klass.action_registry.register('tag', Tag)
klass.action_registry.register('untag', RemoveTag)
klass.action_registry.register('auto-tag-user', AutoTagUser)
klass.action_registry.register('tag-trim', TagTrim)
klass.filter_registry.register('metric', MetricFilter)
klass.filter_registry.register('marked-for-op', TagActionFilter)
klass.action_registry.register('mark-for-op', TagDelayedAction)
klass.filter_registry.register('policy-compliant', PolicyCompliantFilter)
if resource != 'resourcegroup':
klass.action_registry.register('delete', DeleteAction)
if hasattr(klass.resource_type, 'diagnostic_settings_enabled') \
and klass.resource_type.diagnostic_settings_enabled:
klass.filter_registry.register('diagnostic-settings', DiagnosticSettingsFilter)
@six.add_metaclass(QueryMeta)
class ChildArmResourceManager(ChildResourceManager, ArmResourceManager):
class resource_type(ChildTypeInfo, ArmTypeInfo):
pass
resources.subscribe(resources.EVENT_FINAL, ArmResourceManager.register_arm_specific)
| LRuttenCN/cloud-custodian | tools/c7n_azure/c7n_azure/resources/arm.py | arm.py | py | 3,163 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "c7n_azure.query.TypeInfo",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "six.add_metaclass",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "c7n_azure.query.TypeMeta",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name"... |
29579733720 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 7 15:39:51 2018
@author: Akitaka
"""
import numpy as np
from sklearn.model_selection import cross_val_score
from lwpls import LWPLS
def psi(xlist,M):
""" make a design matrix """
ret = []
for x in xlist:
ret.append([x**i for i in range(0,M+1)])
return np.array(ret)
np.random.seed(1)
""" Data """
N = 10
M = 15
xlist = np.linspace(0, 1, N)
ylist = np.sin(2 * np.pi * xlist) + np.random.normal(0, 0.2, xlist.size)
X = psi(xlist,M)
y = ylist
""" Cross validation"""
reg = LWPLS(n_components=2)
reg.fit(X,y)
y_pred = reg.predict(X)
scores = cross_val_score(reg, X, y, cv=5, scoring='neg_mean_squared_error')
print(scores.mean())
| nakanishi-akitaka/python2018_backup | 1207/cross_validation_lwpls.py | cross_validation_lwpls.py | py | 740 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
... |
73675812026 | import django
from django.conf import settings
import pandas as pd
import os, sys
proj_path = "/home/webuser/webapps/tigaserver/"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tigaserver_project.settings")
sys.path.append(proj_path)
django.setup()
from tigaserver_app.models import Fix
FILE = os.path.join(settings.STATIC_ROOT, "geo_user_fixes.csv")
# Getting all fixes and create a new DataFrame.
# Selecting only the desired fields for speed reasons.
df = pd.DataFrame.from_records(
Fix.objects.all().values('server_upload_time', 'masked_lon', 'masked_lat')
)
# Remove any NaN value
df.dropna(inplace=True)
# Rename the datetime colume to a more readable name
df.rename(
columns={"server_upload_time": "datetime"},
inplace=True
)
# Convert datetime column to just date
df['datetime'] = pd.to_datetime(df['datetime']).dt.normalize()
# Round float to 2 decimals (lat and lon)
df = df.round(decimals=2)
##########
# Group by date, lat, lon and count the number of elements
# to make the resulting file smaller.
##########
# If the dataviz is slow, create bins for the latitude and longitue.
# Example: https://stackoverflow.com/questions/39254704/pandas-group-bins-of-data-per-longitude-latitude
# import numpy as np
# degres_step = 0.1
# to_bin = lambda x: np.floor(x / step) * step
# df["latBin"] = to_bin(df.masked_lat)
# df["lonBin"] = to_bin(df.masked_lon)
# See: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
df.groupby(
[
pd.Grouper(key='datetime', freq='3W-MON'), # Every 3 weeks.
df['masked_lon'],
df['masked_lat']
]).size()\
.reset_index(name='count')\
.to_csv(FILE, index=False) | Mosquito-Alert/mosquito_alert | util_scripts/update_geo_userfixes_static.py | update_geo_userfixes_static.py | py | 1,682 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "os.environ.setdefault",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"li... |
32731940278 | from collections import deque
n, m = map(int, input().split())
number = list(map(int, input().split()))
deq = deque(i for i in range(1, n+1))
count = 0
for i in range(m):
while True:
if (deq[0] == number[i]):
deq.popleft()
break
else:
if(len(deq) / 2 > deq.index(number[i])):
deq.append(deq.popleft())
count += 1
else:
deq.appendleft(deq.pop())
count += 1
print(count) | woo222/baekjoon | python/큐,스택/s3_1021_회전하는큐.py | s3_1021_회전하는큐.py | py | 505 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 6,
"usage_type": "call"
}
] |
18028370803 | import json
from logging import info
import boto3
from botocore.exceptions import ClientError
from lgw.lambda_util import get_lambda_info, grant_permission_to_api_resource
def create_rest_api(
api_name,
api_description,
binary_types,
lambda_name,
resource_path,
deploy_stage,
integration_role,
method_response_models,
):
'''
Creates & deploys a REST API that proxies to a Lambda function, returning the URL
pointing to this API.
:param api_name: Name of the REST API
:param api_description: Textual description of the API
:param binary_types: A list of binary types that this API may serve up
:param lambda_name: Name of an existing Lambda function
:param resource_path: The resource path that points to the lambda.
:param deploy_stage: The name of the deployment stage.
:param integration_role
:param method_response_models: Dictionary of content-type => response-model mappings to be applied to child method
:return: URL of API. If error, returns None.
'''
api_client = boto3.client('apigateway')
api_id = create_api_gateway(api_client, api_name, api_description, binary_types)
(lambda_arn, lambda_uri, region, account_id) = get_lambda_info(lambda_name)
root_resource_id = get_root_resource_id(api_client, api_id)
create_method(api_client, api_id, root_resource_id, 'ANY')
create_lambda_integration(api_client, api_id, root_resource_id, lambda_uri, integration_role)
child_resource_id = create_resource(api_client, api_id, root_resource_id, resource_path)
create_method(api_client, api_id, child_resource_id, 'ANY', method_response_models)
create_lambda_integration(api_client, api_id, child_resource_id, lambda_uri, integration_role)
deploy_to_stage(api_client, api_id, deploy_stage)
# grant_permission_to_api_resource(api_id, region, account_id, lambda_arn, resource_path)
return f'https://{api_id}.execute-api.{region}.amazonaws.com/{deploy_stage}'
def delete_rest_api(api_name):
api_client = boto3.client('apigateway')
delete_api_gateway(api_client, api_name)
def deploy_to_stage(api_client, api_id, deploy_stage):
return api_client.create_deployment(restApiId=api_id, stageName=deploy_stage)
def create_lambda_integration(api_client, api_id, root_resource_id, lambda_uri, role_arn=None):
'''
Set the Lambda function as the destination for the ANY method
Extract the Lambda region and AWS account ID from the Lambda ARN
ARN format="arn:aws:lambda:REGION:ACCOUNT_ID:function:FUNCTION_NAME"
'''
api_client.put_integration(
restApiId=api_id,
resourceId=root_resource_id,
httpMethod='ANY',
type='AWS_PROXY',
integrationHttpMethod='POST',
uri=lambda_uri,
credentials=role_arn,
)
def create_method(api_client, api_id, resource_id, http_method, method_response_models={}):
try:
response = api_client.get_method(
restApiId=api_id, resourceId=resource_id, httpMethod=http_method
)
if response and response.get('httpMethod'):
info(f'{http_method} method already exists for resource {resource_id}')
return
except api_client.exceptions.NotFoundException:
info(f'{http_method} method does not exist for resource {resource_id}, adding it.')
api_client.put_method(
resourceId=resource_id, restApiId=api_id, httpMethod=http_method, authorizationType='NONE'
)
# Set the content-type of the method response to JSON
api_client.put_method_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=http_method,
statusCode='200',
responseModels=method_response_models,
)
def create_resource(api_client, api_id, parent_id, resource_path):
resources = api_client.get_resources(restApiId=api_id)
if 'items' in resources:
for resource in resources['items']:
if resource.get('parentId') == parent_id and resource.get('pathPart') == resource_path:
info('Found existing resource for %s' % resource['parentId'])
return resource['id']
info(f'No existing resource found for {parent_id}/{resource_path}, creating a new one')
result = api_client.create_resource(
restApiId=api_id, parentId=parent_id, pathPart=resource_path
)
return result['id']
def get_root_resource_id(api_client, api_id):
result = api_client.get_resources(restApiId=api_id)
root_id = None
for item in result['items']:
if item['path'] == '/':
root_id = item['id']
if root_id is None:
raise ClientError(
'Could not retrieve the ID of the API root resource using api_id [%s]' % api_id
)
return root_id
def delete_api_gateway(api_client, api_name):
api_id = lookup_api_gateway(api_client, api_name)
if api_id:
info(f'Deleting API with ID: {api_id}')
api_client.delete_rest_api(restApiId=api_id)
def create_api_gateway(api_client, api_name, api_description, binary_types):
api_id = lookup_api_gateway(api_client, api_name)
if api_id:
return api_id
info(f'No existing API account found for {api_name}, creating it.')
result = api_client.create_rest_api(
name=api_name, description=api_description, binaryMediaTypes=binary_types
)
return result['id']
def lookup_api_gateway(api_client, api_name):
apis = api_client.get_rest_apis()
if 'items' in apis:
for api in apis['items']:
if api['name'] == api_name:
info('Found existing API account for %s' % api['name'])
return api['id']
info(f'No API gateway found with name {api_name}')
return None
| ebridges/lgw | lgw/api_gateway.py | api_gateway.py | py | 5,773 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "boto3.client",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "lgw.lambda_util.get_lambda_info",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "logging.inf... |
32150136387 | from collections import deque
def solution(queue1, queue2):
answer = -1
queue1Sum = sum(queue1)
queue2Sum = sum(queue2)
sameSum = (queue1Sum + queue2Sum) // 2
queue1Copy = deque(queue1)
queue2Copy = deque(queue2)
cnt = 0
while cnt < len(queue1) * 3:
if sameSum < queue1Sum:
value = queue1Copy.popleft()
queue2Copy.append(value)
queue1Sum -= value
queue2Sum += value
elif sameSum > queue1Sum:
value = queue2Copy.popleft()
queue1Copy.append(value)
queue1Sum += value
queue2Sum -= value
else:
answer = cnt
break
cnt += 1
return answer
queue1 = [3, 2, 7, 2]
queue2 = [4, 6, 5, 1]
print(solution(queue1,queue2)) | HS980924/Algorithm | src/8.Queue,Deque/두큐합.py | 두큐합.py | py | 810 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 9,
"usage_type": "call"
}
] |
24362863500 | from odoo import models, fields, api
from ..tools.nawh_error import NAWHError
class NetaddictionWhLocationsLine(models.Model):
_name = 'netaddiction.wh.locations.line'
_description = "Netaddiction WH Locations Line"
_order = 'qty'
product_id = fields.Many2one(
'product.product',
required=True,
string="Prodotto",
)
qty = fields.Integer(
default=1,
required=True,
string="Quantità",
)
wh_location_id = fields.Many2one(
'netaddiction.wh.locations',
required=True,
string="Ripiano",
)
@api.model
def get_products(self, barcode):
"""
dato il barcode di un ripiano ritorna i prodotti allocati
"""
result = self.search([('wh_location_id.barcode', '=', barcode)])
if not result:
return NAWHError(
"Non sono stati trovati prodotti per il barcode"
)
return result
##########################
# INVENTORY APP FUNCTION #
# ritorna un dict simile #
# ad un json per il web #
##########################
@api.model
def get_json_products(self, barcode):
"""
ritorna un json con i dati per la ricerca per ripiano
"""
is_shelf = self.env['netaddiction.wh.locations'].check_barcode(barcode)
if isinstance(is_shelf, NAWHError):
return {'result': 0, 'error': is_shelf.msg}
results = self.get_products(barcode)
if isinstance(results, NAWHError):
return {'result': 0, 'error': results.msg}
return {
'result': 1,
'shelf': is_shelf.name,
'barcode': barcode,
'products': [
{'product_name': res.product_id.display_name,
'qty': res.qty,
'barcode': res.product_id.barcode}
for res in results
]
}
@api.model
def put_json_new_allocation(self, barcode, qty, product_id, now_wh_line):
"""
sposta la quantità qty dal ripiano barcode al new_shelf
"""
is_shelf = self.env['netaddiction.wh.locations'].check_barcode(barcode)
if isinstance(is_shelf, NAWHError):
return {'result': 0, 'error': is_shelf.msg}
new_shelf = is_shelf.id
line = self.search(
[('id', '=', int(now_wh_line)),
('product_id', '=', int(product_id))]
)
if not line:
return {
'result': 0,
'error': 'Prodotto non più presente in questa locazione'
}
if line.wh_location_id.id == new_shelf:
return {
'result': 0,
'error': 'Non puoi spostare un prodotto nella'
' stessa locazione di partenza'
}
dec = line.decrease(qty)
if isinstance(dec, NAWHError):
return {'result': 0, 'error': dec.msg}
self.allocate(product_id, qty, new_shelf)
product = self.env['product.product'].browse(int(product_id))
return {'result': 1, 'product_barcode': product.barcode}
##############################
# END INVENTORY APP FUNCTION #
##############################
##################
# FUNZIONI VARIE #
##################
def decrease(self, qta):
""" decrementa la quantità allocata di qta """
self.ensure_one()
end_qty = self.qty - int(qta)
if end_qty < 0:
return NAWHError(
"Non puoi scaricare una quantità maggiore di quella allocata"
)
elif end_qty > 0:
self.write({'qty': end_qty})
else:
self.unlink()
def increase(self, qta):
""" incrementa la quantità allocata di qta """
self.ensure_one()
self.qty += int(qta)
@api.model
def allocate(self, product_id, qta, new_location_id):
""" alloca in new_location_id la qta di product_id """
result = self.search(
[('product_id', '=', int(product_id)),
('wh_location_id', '=', int(new_location_id))]
)
if result:
# è già presente una locazione con questo prodotto
# incremento
for res in result:
res.increase(qta)
else:
self.create([{
'product_id': product_id,
'qty': qta,
'wh_location_id': new_location_id
}])
| suningwz/netaddiction_addons | netaddiction_warehouse/models/netaddiction_wh_locations_line.py | netaddiction_wh_locations_line.py | py | 4,531 | python | it | code | 0 | github-code | 6 | [
{
"api_name": "odoo.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Many2one",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
... |
34390260723 | from pathlib import Path
from datetime import timedelta
import environ
import os
import pymysql
pymysql.install_as_MySQLdb()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
REST_AUTH = {
'SESSION_LOGIN': False
}
###### 환경변수 쪽 설정 ##################
env = environ.Env(DEBUG=(bool, True))
environ.Env.read_env(
env_file=os.path.join(BASE_DIR, '.env')
)
SECRET_KEY=env('SECRET_KEY')
DEBUG=env('DEBUG')
DATABASES = {
'default': {
'ENGINE': env('DATABASES_ENGINE'),
'NAME': BASE_DIR / 'db.sqlite3',
}
}
#######################################
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework.authtoken',
'rest_framework',
"corsheaders",
'rest_framework_simplejwt',
'rest_framework_simplejwt.token_blacklist',
# In Folder Installed App
'ycwg',
'party_list',
'account',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
]
ROOT_URLCONF = 'ycwg.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ycwg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/dev/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# CORS_ALLOWED_ORIGINS = [
# "http://localhost:5173",
# "http://localhost:8000",
# "https://port-0-ycwg-backend-1maxx2klgvs8aq4.sel3.cloudtype.app"
# ]
CORS_ALLOW_ALL_ORIGINS = True
CORS_ALLOW_CREDENTIALS = True
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTP_ONLY = True
CSRF_TRUSTED_ORIGINS = [
"http://localhost:8000",
"http://localhost:5173",
"https://port-0-ycwg-backend-1maxx2klgvs8aq4.sel3.cloudtype.app"
]
CORS_EXPOSE_HEADERS = ["Content-Type", "X-CSRFToken"]
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SAMESITE = "None"
SESSION_COOKIE_SAMESITE = "None"
MEDIA_URL = '/media/' # ex) /media/photo1.png
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(days=30),
'REFRESH_TOKEN_LIFETIME': timedelta(days=30),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': False,
'UPDATE_LAST_LOGIN': False,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUDIENCE': None,
'ISSUER': None,
'JWK_URL': None,
'LEEWAY': 0,
'AUTH_HEADER_TYPES': ('Bearer',),
'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION',
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'USER_AUTHENTICATION_RULE': 'rest_framework_simplejwt.authentication.default_user_authentication_rule',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'TOKEN_USER_CLASS': 'rest_framework_simplejwt.models.TokenUser',
'JTI_CLAIM': 'jti',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(days=30),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=30),
# custom
'AUTH_COOKIE': 'access',
# Cookie name. Enables cookies if value is set.
'AUTH_COOKIE_REFRESH': 'refresh',
# A string like "example.com", or None for standard domain cookie.
'AUTH_COOKIE_DOMAIN': 'port-0-ycwg-backend-1maxx2klgvs8aq4.sel3.cloudtype.app',
# Whether the auth cookies should be secure (https:// only).
'AUTH_COOKIE_SECURE': False,
# Http only cookie flag.It's not fetch by javascript.
'AUTH_COOKIE_HTTP_ONLY': True,
'AUTH_COOKIE_PATH': '/', # The path of the auth cookie.
# Whether to set the flag restricting cookie leaks on cross-site requests. This can be 'Lax', 'Strict', or None to disable the flag.
'AUTH_COOKIE_SAMESITE': "Lax", # TODO: Modify to Lax
}
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
'rest_framework_simplejwt.authentication.JWTAuthentication',
'account.authenticate.CustomAuthentication',
],
"DEFAULT_PERMISSION_CLASSES": [
'rest_framework.permissions.AllowAny',
'rest_framework.permissions.IsAuthenticated',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
]
}
AUTH_USER_MODEL = "account.Account"
ALLOWED_HOSTS = [
'*'
] | YCWG/YCWG-BackEnd | ycwg/settings.py | settings.py | py | 6,231 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymysql.install_as_MySQLdb",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "environ.Env",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "environ.Env.read_e... |
29098845226 | #!/bin/env python
# This script undoes the "WellFolders.py" script, aka it empties all of the well folders out into the parent folder.
import os
import re
import argparse
parser = argparse.ArgumentParser(description='Takes a folder formatted by WellFolders and undoes it ',
usage='%(prog)s FOLDERPATH')
parser.add_argument('folderlocation', type=str, help='Absolute path of the folder to modify')
args = parser.parse_args()
regex = re.compile(r'\w{1}\d{2}_Day\d{1}')
allfolders = [file for file in os.scandir(args.folderlocation) if file.is_dir() and regex.match(file.name)]
for folder in allfolders:
redfolder = folder.path + '/Red/'
greenfolder = folder.path + '/Green/'
brightfield = folder.path + '/Brightfield/'
for foldpath in [redfolder, greenfolder, brightfield]:
for file in os.scandir(foldpath):
if file.name.endswith('.txt'):
os.remove(file.path)
elif file.name.endswith('.tif'):
os.rename(file.path, f'{args.folderlocation}/{file.name}')
os.rmdir(foldpath)
os.rmdir(folder.path)
| jc6213/CRANIUM | UndoWellFolders.py | UndoWellFolders.py | py | 1,123 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_n... |
30003290472 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Description:
# @File: test.py
# @Project: ip_nlp
# @Author: Yiheng
# @Email: GuoYiheng89@gmail.com
# @Time: 7/15/2019 10:30
import time
from pymongo import ASCENDING
from common import logger_factory
from mongo.connect import get_collection
from mongo.utils.query_filter_utils import get_clf_query_filter
logger = logger_factory.get_logger('doc_service')
def create_index(db_name, clc_name, field_name, sort=ASCENDING):
"""
create index of doc field to specified db's collection
:param db_name:
:param clc_name:
:param field_name:
:param sort: default direction is asc
:return:
"""
clc = get_collection(db_name, clc_name)
clc.create_index([(field_name, sort)], background=True)
def remove_redundant(db_name, clc_name):
"""
remove redundant docs
:param db_name:
:param clc_name:
:return:
"""
clc = get_collection(db_name, clc_name)
redundant_docs = clc.aggregate([
{'$group': {
'_id': {'pubId': '$pubId'},
'uniqueIds': {'$addToSet': '$_id'},
'count': {'$sum': 1}
}},
{'$match': {
'count': {'$gt': 1}
}}], allowDiskUse=True)
print('redundant_docs {}'.format(type(redundant_docs)))
for doc in redundant_docs:
logger.info(f'{doc}')
obj_ids = doc['uniqueIds']
logger.info(f'obj ids is {obj_ids}')
for i in range(len(obj_ids)):
if i == len(obj_ids) - 1:
break
clc.remove(obj_ids[i])
def find_some(db_name: str, clc_name: str, limit: int):
"""
find specified count of docs return a generator, whose item is a Bson obj
:param db_name:
:param clc_name:
:param limit:
:return:
"""
logger.info(f'start find_some with limit {limit}')
clc = get_collection(db_name, clc_name)
limit = 0 if limit < 0 else limit
cursor = clc.find({}).limit(limit)
for doc in cursor:
yield doc
def find_all(db_name: str, clc_name: str):
"""
find all docs and return a generator, whose item is a Bson obj
:param db_name:
:param clc_name:
:return:
"""
logger.info('start find_all')
return find_some(db_name, clc_name, 0)
def find_by_clf(db_name, clc_name, limit=300, **kwargs):
"""
find docs by classification infos and return a generator, whose item is a Bson obj
:param db_name:
:param clc_name:
:param limit:
:param kwargs:
:return:
"""
cursor = find_cursor_by_clf(db_name, clc_name, limit, **kwargs)
for doc in cursor:
yield doc
def find_cursor_by_clf(db_name, clc_name, limit, **kwargs):
"""
get docs obj by classification params
:param db_name:
:param clc_name:
:param limit:
:param kwargs:
section required
mainClass
subClass
:return:
"""
logger.info(f'start search tasks with {kwargs}')
clc = get_collection(db_name, clc_name)
query_filter = get_clf_query_filter(kwargs)
cursor = clc.find(query_filter).limit(limit)
logger.info(f'search tasks {kwargs} complete')
return cursor
if __name__ == '__main__':
db_ip_doc = 'ip_doc'
clc_raw = 'raw'
# remove_redundant('ip_doc', 'raw')
start_time = time.time()
docs = find_by_clf(db_ip_doc, clc_raw, section='B', mainClass='29', subClass='K')
# count = len(list(docs))
# print('count is {}'.format(count))
for doc in docs:
logger.info(f'find doc pubId {doc["pubId"]}')
end_time = time.time()
logger.info(f'complete...,take time {end_time - start_time}s')
| zhenxun815/ip_nlp | src/mongo/doc_service.py | doc_service.py | py | 3,720 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "common.logger_factory.get_logger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "common.logger_factory",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pymongo.ASCENDING",
"line_number": 21,
"usage_type": "name"
},
{
"api_name... |
3477127800 | import logging
from dvc.cli.command import CmdBase
logger = logging.getLogger(__name__)
class CmdQueueWorker(CmdBase):
"""Run the exp queue worker."""
def run(self):
self.repo.experiments.celery_queue.worker.start(self.args.name)
return 0
def add_parser(experiments_subparsers, parent_parser):
QUEUE_WORKER_HELP = "Run the exp queue worker."
parser = experiments_subparsers.add_parser(
"queue-worker",
parents=[parent_parser],
description=QUEUE_WORKER_HELP,
add_help=False,
)
parser.add_argument("name", help="Celery worker name.")
parser.set_defaults(func=CmdQueueWorker)
| gshanko125298/Prompt-Engineering-In-context-learning-with-GPT-3-and-LLMs | myenve/Lib/site-packages/dvc/commands/experiments/queue_worker.py | queue_worker.py | py | 656 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "dvc.cli.command.CmdBase",
"line_number": 8,
"usage_type": "name"
}
] |
2418871084 | import torch
import numpy as np
from copy import deepcopy
from typing import List, Optional, Tuple
from torch.utils.data import DataLoader
from supervised.utils import ids, keys, typeddicts
from supervised import saving, data, networks
VERBOSE = False # Default: whether the code output should be verbose
NR_EPOCHS = 50 # Default max number of epochs for training in case of no early stopping
EARLY_STOPPING_PATIENCE = 5 # Number of epochs to be patient before early stopping the training
def train_epoch_multi_readout(model: networks.classes.MultitaskLearner, data_loader: DataLoader,
optimizer: torch.optim.Optimizer, track_pmdd: Optional[bool] = False,
training_tracker: Optional[typeddicts.TrackingOutcome] = None,
train_params: Optional[typeddicts.TrainParameters] = None,
pmdd_dataset: Optional[data.load.DatasetType] = None,
validation_dl: Optional[typeddicts.DL] = None
) -> Tuple[List[float], typeddicts.TrackingOutcome]:
# List to store the train loss for each batch
loss_list = []
for batch_idx, ((data_batch, _), (task, targets)) in enumerate(data_loader):
optimizer.zero_grad()
out = model.forward(data_batch)
if len(model.rbs) != 1:
out = torch.gather(out, 0, torch.add(task.view(1, -1), train_params[keys.K_FIRST_TASK_ID])).reshape(-1)
loss = networks.evaluate.mse_loss(out, targets)
loss.backward()
optimizer.step()
loss_list += [float(loss.detach())]
if track_pmdd:
if (batch_idx + 1) in networks.evaluate.PMDD_TRACK_BATCHES:
training_tracker = networks.evaluate.track_network(model=model, pmdd_dataset=pmdd_dataset,
training_tracker=training_tracker,
train_loss=loss_list[-1],
validation_dl=validation_dl,
train_params=train_params)
return loss_list, training_tracker
def train_epoch_context_learner(model: networks.classes.MultitaskLearner, data_loader: DataLoader,
optimizer: torch.optim.Optimizer, track_pmdd: Optional[bool] = False,
training_tracker: Optional[typeddicts.TrackingOutcome] = None,
train_params: Optional[typeddicts.TrainParameters] = None,
pmdd_dataset: Optional[data.load.DatasetType] = None,
validation_dl: Optional[typeddicts.DL] = None
) -> Tuple[List[float], typeddicts.TrackingOutcome]:
# List to store the train loss for each batch
loss_list = []
# Train one batch at a time
for batch_idx, ((data_batch, _), (tasks, targets)) in enumerate(data_loader):
optimizer.zero_grad()
out = model.forward(data_batch, tasks + train_params[keys.K_FIRST_TASK_ID]).reshape(-1)
loss = networks.evaluate.mse_loss(out, targets)
loss_list += [float(loss.detach())]
loss.backward()
optimizer.step()
# Track pmdd loss, validation performance & train loss
if track_pmdd:
if (batch_idx + 1) in networks.evaluate.PMDD_TRACK_BATCHES:
training_tracker = networks.evaluate.track_network(model=model, pmdd_dataset=pmdd_dataset,
training_tracker=training_tracker,
train_loss=loss_list[-1],
validation_dl=validation_dl,
train_params=train_params)
return loss_list, training_tracker
def train_epoch(model: networks.classes.MultitaskLearner, data_loader: DataLoader, optimizer: torch.optim.Optimizer,
prog_params: typeddicts.ProgramParameters, train_params: typeddicts.TrainParameters,
save_params: typeddicts.SavingParameters, pmdd_dataset: data.load.DatasetType,
validation_dl: Optional[typeddicts.DL] = None, track_pmdd: Optional[bool] = False) -> List[float]:
# Initialize the tracker for pmdd loss, validation performance & train loss to a database
if track_pmdd:
assert validation_dl is not None
training_tracker = networks.evaluate.init_track_network(model=model, train_params=train_params,
pmdd_dataset=pmdd_dataset,
validation_dl=validation_dl)
else:
training_tracker = None
# Train for one epoch
if prog_params[keys.K_MODEL_ID] == ids.ID_MULTI_READOUT:
loss_list, training_tracker = train_epoch_multi_readout(
model=model, data_loader=data_loader, optimizer=optimizer, track_pmdd=track_pmdd, train_params=train_params,
training_tracker=training_tracker, pmdd_dataset=pmdd_dataset, validation_dl=validation_dl)
else:
loss_list, training_tracker = train_epoch_context_learner(
model=model, data_loader=data_loader, optimizer=optimizer, track_pmdd=track_pmdd, train_params=train_params,
pmdd_dataset=pmdd_dataset, validation_dl=validation_dl)
# Save the tracked pmdd loss, validation performance & train loss to a database
if track_pmdd:
saving.save.save_first_epoch_batches_pmdd(prog_params=prog_params,
train_params=train_params,
save_params=save_params,
tracked_pmdd_batches=training_tracker)
return loss_list
def train_model(model: networks.classes.MultitaskLearner, train_data: data.datasets.KTaskNClassMDatasetData,
validation_data: data.datasets.KTaskNClassMDatasetData, optimizer: torch.optim.Optimizer,
prog_params: typeddicts.ProgramParameters, train_params: typeddicts.TrainParameters,
save_params: typeddicts.SavingParameters, verbose: Optional[bool] = VERBOSE
) -> Tuple[networks.classes.MultitaskLearner, typeddicts.PerformanceOutcome]:
# Prepare to evaluate best time to stop training
validation_data_loader, nr_samples = data.load.get_dataloader(validation_data)
validation_dl: typeddicts.DL = {keys.K_NUMBER_SAMPLES: nr_samples,
keys.K_VALIDATION_DATALOADER: validation_data_loader}
best_model = deepcopy(model)
best_validation_performance = -1.
best_validation_loss = -1.
stagnation_counter = 0
training_tracker = None
if save_params[keys.K_SAVE_PMDD_LOSS]:
if save_params[keys.K_PMDD_LOSS_TRACK_DATASET] in [ids.ID_EMNIST, ids.ID_K49, ids.ID_CIFAR100]:
track_dataset = data.load.get_dataset(save_params[keys.K_PMDD_LOSS_TRACK_DATASET], False).data.reshape(
[-1, data.datasets.n_input_dimension(save_params[keys.K_PMDD_LOSS_TRACK_DATASET])])
else:
raise NotImplementedError(f"{save_params[keys.K_PMDD_LOSS_TRACK_DATASET]} track pmdd loss")
if save_params[keys.K_SAVE_PMDD_LOSS] is not None:
training_tracker = {keys.K_TRACKED_TRAIN_LOSS: [], keys.K_TRACKED_VALIDATION_PERFORMANCE: [],
keys.K_TRACKED_PMDD_LOSS: []}
pmdd_loss = networks.evaluate.get_pmdd_loss(dataset=track_dataset,
weights=np.transpose(model.ws[0].detach().numpy()))
training_tracker[keys.K_TRACKED_PMDD_LOSS] += [pmdd_loss]
print(f"Epoch 0 had pmdd L2 {pmdd_loss}")
else:
track_dataset = None
# Train epochs until performance stops to improve
for epoch in range(1, NR_EPOCHS + 1):
data_loader = DataLoader(train_data, batch_size=train_params[keys.K_BATCH_SIZE], shuffle=True)
# Train for one epoch
if epoch == 1:
train_loss = train_epoch(model=model, data_loader=data_loader, optimizer=optimizer, prog_params=prog_params,
train_params=train_params, save_params=save_params, pmdd_dataset=track_dataset,
validation_dl=validation_dl, track_pmdd=save_params[keys.K_SAVE_PMDD_LOSS])
else:
train_loss = train_epoch(model=model, data_loader=data_loader, optimizer=optimizer, prog_params=prog_params,
train_params=train_params, save_params=save_params, pmdd_dataset=track_dataset)
# Evaluate model performance on validation dataset
validation_performance, validation_loss = networks.evaluate.evaluate_performance(
model_id=prog_params[keys.K_MODEL_ID],
model=model,
dataset=validation_data_loader,
nr_samples=nr_samples,
task_id0=train_params[keys.K_FIRST_TASK_ID])
if verbose:
print(f"Epoch {epoch} had training loss {train_loss} and validation performance {validation_performance}%")
if save_params[keys.K_SAVE_PMDD_LOSS]:
training_tracker[keys.K_TRACKED_TRAIN_LOSS] += [train_loss]
training_tracker[keys.K_TRACKED_VALIDATION_PERFORMANCE] += [validation_performance]
pmdd_loss = networks.evaluate.get_pmdd_loss(dataset=track_dataset,
weights=np.transpose(model.ws[0].detach().numpy()))
training_tracker[keys.K_TRACKED_PMDD_LOSS] += [pmdd_loss]
print(f"Epoch {epoch} had "
f"training loss {sum(train_loss)}, "
f"validation performance {validation_performance}% and "
f"pmdd L2 {pmdd_loss}")
# Check whether performance is still improving and stop training otherwise
if validation_performance > best_validation_performance:
stagnation_counter = 0
best_validation_performance = validation_performance
best_validation_loss = validation_loss
best_model = deepcopy(model)
else:
stagnation_counter += 1
if stagnation_counter >= EARLY_STOPPING_PATIENCE:
if verbose:
print(f"Early stopping training at epoch {epoch} "
f"with validation performance of {best_validation_performance}%")
break
# Save the pmdd, train and validation losses during training epochs (if the saving parameters require it)
saving.save.save_train_epochs_pmdd(prog_params=prog_params, train_params=train_params, save_params=save_params,
tracked_pmdd_epochs=training_tracker)
# Evaluate training performance
train_performance, train_loss = networks.evaluate.evaluate_performance(model_id=prog_params[keys.K_MODEL_ID],
model=best_model,
dataset=train_data,
task_id0=train_params[keys.K_FIRST_TASK_ID])
outcome: typeddicts.PerformanceOutcome = {keys.K_TRAIN_PERFORMANCE: train_performance,
keys.K_TRAIN_LOSS: train_loss,
keys.K_VALIDATION_PERFORMANCE: best_validation_performance,
keys.K_VALIDATION_LOSS: best_validation_loss}
return best_model, outcome
def get_optimizer(training_type: str, model: networks.classes.MultitaskLearner, train_params: typeddicts.TrainParameters
) -> torch.optim.Optimizer:
parameters = None
optimizer = None
# Train both shared and contextual parameters
if ids.ID_TRAIN in training_type:
lr = train_params[keys.K_SHARED_PARAM_LEARNING_RATE]
if ids.ID_SINGLE_TASK in training_type:
optimizer = torch.optim.Adam(model.parameters(), lr)
elif ids.ID_CONTEXT_B_SHARED_W in training_type:
optimizer = torch.optim.Adam([{"params": model.ws},
{"params": model.bs, "lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]}],
lr)
elif ids.ID_CONTEXT_G_SHARED_BW in training_type:
optimizer = torch.optim.Adam([{"params": model.ws}, {"params": model.bs},
{"params": model.gs, "lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]}],
lr)
elif ids.ID_CONTEXT_G_SHARED_XW in training_type:
optimizer = torch.optim.Adam([{"params": model.ws}, {"params": model.xs},
{"params": model.gs, "lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]}],
lr)
elif ids.ID_CONTEXT_G_SHARED_BXW in training_type:
optimizer = torch.optim.Adam([{"params": model.ws}, {"params": model.xs}, {"params": model.bs},
{"params": model.gs, "lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]}],
lr)
elif ids.ID_CONTEXT_BG_SHARED_W in training_type:
optimizer = torch.optim.Adam([{"params": model.ws},
{"params": model.bs, "lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]},
{"params": model.gs, "lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]}],
lr)
elif ids.ID_CONTEXT_M_SHARED_BW in training_type:
optimizer = torch.optim.Adam([{"params": model.ws}, {"params": model.bs},
{"params": model.ms, "lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]}],
lr)
elif ids.ID_CONTEXT_M_SHARED_XW in training_type:
optimizer = torch.optim.Adam([{"params": model.ws}, {"params": model.xs},
{"params": model.ms, "lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]}],
lr)
elif ids.ID_CONTEXT_M_SHARED_BXW in training_type:
optimizer = torch.optim.Adam([{"params": model.ws}, {"params": model.xs}, {"params": model.bs},
{"params": model.ms, "lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]}],
lr)
elif ids.ID_CONTEXT_BM_SHARED_W in training_type:
optimizer = torch.optim.Adam([{"params": model.ws},
{"params": model.bs, "lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]},
{"params": model.ms, "lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]}],
lr)
# Train multiple labels with individual readout neurons each all at the same time
elif ids.ID_MULTI_READOUT in training_type:
optimizer = torch.optim.Adam([{"params": [w for w in model.ws] + [b for b in model.bs]},
{"params": [r for r in model.rs] + [rb for rb in model.rbs],
"lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]}], lr)
# Train only contextual parameters
elif ids.ID_TRANSFER in training_type:
lr = train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]
if "deepen" in training_type:
if ids.ID_CONTEXT_B_SHARED_W in training_type:
optimizer = torch.optim.Adam([{"params": model.ws[1:]},
{"params": model.bs,
"lr": train_params[keys.K_CONTEXT_PARAM_LEARNING_RATE]}],
train_params[keys.K_SHARED_PARAM_LEARNING_RATE])
else:
raise NotImplementedError
elif ids.ID_CONTEXT_B_SHARED_W in training_type:
parameters = model.bs
elif ids.ID_CONTEXT_BG_SHARED_W in training_type:
optimizer = torch.optim.Adam([{"params": model.bs, "lr": lr},
{"params": model.gs, "lr": lr}], lr)
elif ids.ID_CONTEXT_G_SHARED_BW in training_type or ids.ID_CONTEXT_G_SHARED_XW in training_type \
or ids.ID_CONTEXT_G_SHARED_BXW in training_type:
parameters = model.gs
# Train only readout neurons on top of a fixed trunk
elif ids.ID_MULTI_READOUT in training_type:
parameters = [r for r in model.rs] + [rb for rb in model.rbs]
else:
raise ValueError(training_type)
if optimizer is None:
optimizer = torch.optim.Adam(parameters, lr)
return optimizer
| doggydigit/Biasadaptation-jureca | supervised/simulate/train.py | train.py | py | 17,379 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "supervised.networks.classes",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "supervised.networks",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 15,
"usage_type": "name"
},
{
"... |
40546898692 | import json
from django.http import JsonResponse
from django.shortcuts import render
from admin_manage.models import Company
from admin_manage.views import method_verify, verify_token
from face_machine_client.models import ClientInfo
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
@verify_token
@method_verify()
def client_register(request):
"""
设备注册接口
请求实例
{
'type':'register',
'username':'',
'password':'',
'company_id':'',
}
:param request:
:return:
{'msg':'ok'}
"""
req_data = request.POST
# 检测消息类型
if req_data.get('type') == 'register':
client_user = req_data.get('client_user')
client_key = req_data.get('client_key')
client_company_id = req_data.get('client_company_id')
# 检测必填项
if client_user and client_key and client_company_id:
client_check = ClientInfo.objects.filter(client_user=client_user).count()
if client_check == 0:
try:
# 进行设备注册
company_id = Company.objects.filter(company_id=client_company_id).get()
ClientInfo.objects.create(client_user=client_user, client_key=client_key, client_info=company_id)
except Exception as e:
print(e)
return JsonResponse({'msg': str(e)})
return JsonResponse({'msg': '设备注册成功,请记录设备号与密码',
'username': client_user,
'password': client_key,
'code': "200",
'error_code': "0"
})
else:
return JsonResponse({'msg': '设备已经被注册',
'code': "200",
'error_code': "601"
})
else:
return JsonResponse({'msg': '有必填项未填',
'code': "200",
'error_code': "884"
})
elif req_data.get('type') == 'delete':
# 删除设备
client_user = req_data.get('client_user')
client_company_id = req_data.get('client_company_id')
company_id = Company.objects.filter(company_id=client_company_id).get()
# 检测必填项
if client_user and client_company_id:
client_object = ClientInfo.objects.filter(client_user=client_user, client_info=company_id)
if client_object.count() == 1:
try:
client_object.delete()
except Exception as e:
print(e)
return JsonResponse({'msg': '删除失败',
'error': str(e),
'code': "200",
'error_code': "884"
})
else:
return JsonResponse({'msg': '删除成功',
'code': "200",
'error_code': "0"})
elif client_object.count() == 0:
return JsonResponse({'msg': '设备不存在',
'code': "200",
'error_code': "606"
})
else:
return JsonResponse({'msg': '参数出错',
'code': "200",
'error_code': "607"
})
else:
return JsonResponse({'msg': '有必填项未填',
'code': "200",
'error_code': "884"
})
else:
return JsonResponse({'msg': '注册参数出错',
'code': "200",
'error_code': "886"
})
@method_verify()
def push_to_client(request):
"""
主动推送数据至客户端
"""
req_data = request.POST
# 推送类型
push_type = req_data.get('push_type')
push_message = request.POST.get('push_message')
# 按照企业id进行设备全推送
if push_type == "company":
company_id = request.POST.get('company_id')
try:
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)( # ASGI是异步的,这里转为同步操作;通过通信层向组群发送消息
str(company_id), # 设备的企业id
{
'type': 'get_notification', # 标记发送事件的type
'message': push_message, # 提示信息
}
)
except Exception as e:
print(e)
return JsonResponse({'msg': '推送出错',
'code': "200",
'error_code': "701"
})
else:
return JsonResponse({'msg': '推送成功',
'code': "200",
'error_code': "0"
})
# 单个设备推送
elif push_type == "single_client":
client_user = request.POST.get('client_user')
try:
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)( # ASGI是异步的,这里转为同步操作;通过通信层向组群发送消息
str(client_user), # 设备的设备号
{
'type': 'get_notification', # 标记发送事件的type
'message': push_message, # 提示信息
}
)
except Exception as e:
print(e)
return JsonResponse({'msg': '推送出错',
'code': "200",
'error_code': "701"
})
else:
return JsonResponse({'msg': '推送成功',
'code': "200",
'error_code': "0"
})
| hamster1963/face-all-in-one-machine-backend | face_machine_client/views.py | views.py | py | 6,512 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "face_machine_client.models.ClientInfo.objects.filter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "face_machine_client.models.ClientInfo.objects",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "face_machine_client.models.ClientInfo",
... |
30299002056 | import datetime
from elasticsearch import Elasticsearch
def insertData():
es = Elasticsearch('[localhost]:9200')
# index : product_list, type : _doc
index = "product_list"
doc = {
"category": "t-shirt",
"price": 16700,
"@timestamp": datetime.datetime.now()
}
es.index(index="product_list", doc_type="_doc", body=doc)
def searchAPI():
es = Elasticsearch('[localhost]:9200')
index = "product_list"
body = {
"query": {
"match_all": {}
}
}
res = es.search(index=index, body=body)
print(type(res))
print(len(res['hits']['hits']))
for item in res['hits']['hits']:
print(item['_source'])
#insertData()
searchAPI() | yeahyung/python-flask | study/elastic.py | elastic.py | py | 730 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name"... |
72787079228 | import time
import psutil
import scapy.interfaces
from scapy.all import *
from PyQt6.QtCore import QObject, pyqtSignal
class GetInterfaceServer(QObject):
"""捕获网卡信息"""
isActive = True
bytes_flow = pyqtSignal(dict)
interfaces_scapy = scapy.interfaces.get_working_ifaces()
interfaces_psutil = psutil.net_io_counters(pernic=True)
def run(self):
while True:
if not self.isActive:
break
res = {}
for interface in self.interfaces_scapy:
if interface.name in self.interfaces_psutil:
bytes_sent = psutil.net_io_counters(pernic=True)[interface.name].bytes_sent
bytes_recv = psutil.net_io_counters(pernic=True)[interface.name].bytes_recv
else:
bytes_sent = 0
bytes_recv = 0
res[interface.name] = (bytes_sent, bytes_recv)
time.sleep(1)
self.bytes_flow.emit(res)
| VanCoghChan/CCSniffer | models/GetInterfaceModel.py | GetInterfaceModel.py | py | 1,002 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt6.QtCore.QObject",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PyQt6.QtCore.pyqtSignal",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "scapy.interfaces.interfaces.get_working_ifaces",
"line_number": 13,
"usage_type": "call"
},... |
8105081811 | # coding=utf-8
import os
import re
import glob
import MeCab
import torch
from torch import nn
import pickle
import linecache
import pandas as pd
from sklearn.model_selection import train_test_split
import torch.optim as optim
import sys
sys.path.append(os.path.join('./', '..', '..'))
from classification.script.models import LSTMClassifier
def make_dataset(data_dir):
categories = [dir_name for dir_name in os.listdir(data_dir) if os.path.isdir(data_dir + dir_name)]
datasets = pd.DataFrame(columns=['title', 'category'])
for category in categories:
text_files = glob.glob(os.path.join(data_dir, category, '*.txt'))
for text_file in text_files:
title = linecache.getline(text_file, 3)
data = pd.Series([title, category], index=datasets.columns)
datasets = datasets.append(data, ignore_index=True)
# データをシャッフル
datasets = datasets.sample(frac=1).reset_index(drop=True)
return datasets
def make_wakati(sentence):
""" 文章を分かち書きしたリストにする。
Args:
sentence (string):
Returns:
list: 記号や英語が削除された、日本語の単語の分かち書きされたリスト
"""
tagger = MeCab.Tagger('-Owakati')
sentence = tagger.parse(sentence)
sentence = re.sub(r'[0-90-9a-zA-Za-zA-Z]+', " ", sentence) # 半角全角英数字除去
sentence = re.sub(r'[\._-―─!@#$%^&\-‐|\\*\“()_■×+α※÷⇒—●★☆〇◎◆▼◇△□(:〜~+=)/*&^%$#@!~`){}[]…\[\]\"\'\”\’'
r':;<>?<>〔〕〈〉?、。・,\./『』【】「」→←○《》≪≫\n\u3000]+', "", sentence) # 記号を削除
wakati = sentence.split(' ')
wakati = [word for word in wakati if word != ""] # 空を削除
return wakati
def sentence2index(sentence, word2index):
"""文章を単語に分割し、単語ごとのindex numを持つ配列にする"""
wakati = make_wakati(sentence)
return torch.tensor([word2index[word] for word in wakati], dtype=torch.long)
def main():
data_dir = '../data/text/'
dataset_pickle_file = os.path.join('../', 'data', 'text', 'title_category_dataset.pickle')
category2index = {
'movie-enter': 0, 'it-life-hack': 1, 'kaden-channel': 2, 'topic-news': 3, 'livedoor-homme': 4, 'peachy': 5,
'sports-watch': 6, 'dokujo-tsushin': 7, 'smax': 8
}
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if not os.path.exists(dataset_pickle_file):
datasets = make_dataset(data_dir)
with open(dataset_pickle_file, 'wb') as pickle_write_file:
pickle.dump(datasets, pickle_write_file)
else:
with open(dataset_pickle_file, 'rb') as pickle_read_file:
datasets = pickle.load(pickle_read_file)
word2index = {}
for title in datasets['title']:
wakati_title = make_wakati(title)
for word in wakati_title:
if word in word2index:
continue
word2index[word] = len(word2index)
print('vocab size:{}'.format(len(word2index)))
vocab_size = len(word2index)
embedding_dim = 10
hidden_dim = 128
output_size = len(category2index)
train_data, test_data = train_test_split(datasets, train_size=0.7)
model = LSTMClassifier(embedding_dim, hidden_dim, vocab_size, output_size).to(device)
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)
train_num, test_num = len(train_data), len(test_data)
train_losses, eval_losses = [], []
accuracies = [] # [TODO] 比較して精度計算する
for epoch in range(5):
train_loss = 0
train_correct_num = 0
for title, cat in zip(train_data['title'], train_data['category']):
model.zero_grad()
inputs = sentence2index(title, word2index).to(device)
outputs = model(inputs).to(device)
gt = torch.tensor([category2index[cat]], dtype=torch.long).to(device)
_, predict = torch.max(outputs, 1)
if gt == predict:
train_correct_num += 1
loss = criterion(outputs, gt)
loss.backward()
optimizer.step()
train_loss += loss.item()
train_losses.append(train_loss)
print('epoch:{}\t train loss:{}\t accuracy:{}'.format(
epoch, train_loss, round(train_correct_num / train_num, 3)))
# テストデータを確認
test_loss = 0
test_correct_num = 0
with torch.no_grad():
for title, cat in zip(test_data['title'], test_data['category']):
inputs = sentence2index(title, word2index).to(device)
outputs = model(inputs).to(device)
gt = torch.tensor([category2index[cat]], dtype=torch.long).to(device)
_, predict = torch.max(outputs, 1)
if gt == predict:
test_correct_num += 1
loss = criterion(outputs, gt)
test_loss += loss.item()
eval_losses.append(test_loss)
print('epoch:{}\t eval loss:{}\t accuracy:{}'.format(
epoch, test_loss, round(test_correct_num / test_num, 3)))
if __name__ == '__main__':
main()
| ys201810/lstm_pytorch | classification/script/train.py | train.py | py | 5,357 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
17371125676 | from django.contrib import admin
from django.contrib.auth import get_user_model
User = get_user_model()
class UserAdmin(admin.ModelAdmin):
list_display = (
'id',
'first_name',
'last_name',
'username',
'email',
'balance',
'freeze_balance',
'role',
)
admin.site.register(User, UserAdmin)
| vavsar/freelance_t | users/admin.py | admin.py | py | 363 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 7,
"usage_type": "name"
},
... |
7938194140 | import pytesseract
from PIL import Image
import cv2
# Path to the Tesseract executable (you might not need this on Ubuntu)
# pytesseract.pytesseract.tesseract_cmd = r'/usr/bin/tesseract' # You may need to set the correct path to Tesseract on your system
# Open an image file
image_path = 'image.jpeg'
img = Image.open(image_path)
# Use pytesseract to do OCR on the image
text = pytesseract.image_to_string(img)
# Print the extracted text
print(text)
image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
cv2.imshow("", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| KolKemboi/AiMazing | OCR.py | OCR.py | py | 572 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pytesseract.image_to_string",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
... |
618459417 | from typing import List
import numpy as np
from hakaton.prediction.model import SkyhacksModel
from hakaton.util import model_util
class WagonDetectorSkyhacksModel(SkyhacksModel):
MODEL_STRUCTURE_FILE = "storedmodel/model-next-wagon-structure.json"
MODEL_WEIGHTS_FILE = "storedmodel/model-next-wagon-weights.h5"
def __init__(self, frame_cnt_required=3):
self._model = model_util.load(self.MODEL_STRUCTURE_FILE, self.MODEL_WEIGHTS_FILE)
self._frame_cnt_required = frame_cnt_required
def predict(self, train_images: List[np.ndarray], batch_size=None) -> List[object]:
x = np.asarray(train_images)
x = x.reshape(x.shape[0], -1)
predicted = self._model.predict(x, batch_size)
labels = self._parse_next_wagon_prediction(predicted, self._frame_cnt_required)
return labels.tolist()
def _parse_next_wagon_prediction(self, predicted: np.ndarray, frame_cnt_required=2):
wagon_numbers = list()
current_wagon_num = 0
frame_cnt = 0
found_locomotive = False
for i, label in enumerate(predicted):
if (label == 1):
frame_cnt += 1
else:
frame_cnt = 0
if (frame_cnt == frame_cnt_required):
if found_locomotive:
current_wagon_num += 1
wagon_numbers[-frame_cnt_required + 1:] = [current_wagon_num for i in range(frame_cnt_required - 1)]
else:
found_locomotive = True
wagon_numbers.append(current_wagon_num)
return np.array(wagon_numbers)
| karynabierz/hakaton | hakaton/prediction/wagondetector_skyhacks_model.py | wagondetector_skyhacks_model.py | py | 1,623 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "hakaton.prediction.model.SkyhacksModel",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "hakaton.util.model_util.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "hakaton.util.model_util",
"line_number": 14,
"usage_type": "name"
},... |
45484267256 | from setuptools import setup, find_packages
__version__ = '0.8.6'
with open('README.rst', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setup(
name='LMRt', # required
version=__version__,
description='LMR turbo',
long_description=long_description,
long_description_content_type='text/x-rst',
author='Feng Zhu',
author_email='fengzhu@usc.edu',
url='https://github.com/fzhu2e/LMRt',
packages=find_packages(),
include_package_data=True,
license='GPL-3.0 license',
zip_safe=False,
scripts=['bin/LMRt'],
keywords='LMRt',
classifiers=[
'Natural Language :: English',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
install_requires=[
'termcolor',
'pyyaml',
'pandas',
'cftime',
'tqdm',
'xarray',
'netCDF4',
'statsmodels',
'seaborn',
'pyleoclim',
'pyvsl',
'pyresample',
'fbm',
],
)
| fzhu2e/LMRt | setup.py | setup.py | py | 1,031 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "setuptools.setup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 16,
"usage_type": "call"
}
] |
40176969885 | from setuptools import setup, find_packages
VERSION = "0.0.6"
DESCRIPTION = "Investopedia simulator trading API"
LONG_DESCRIPTION = (
"An API that allows trading with stock simulator for from Investopedia"
)
install_requires = ["selenium", "schedule"]
setup(
name="simulatorTradingApi",
version=VERSION,
author="Michael Chi",
author_email="dychi1997@gmail.com",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=install_requires,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
include_package_data=True,
)
| mchigit/investopedia-simulator-api | setup.py | setup.py | py | 752 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "setuptools.setup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 18,
"usage_type": "call"
}
] |
37018522643 | import numpy as np
import statsmodels.api as sm
np.random.seed(2021)
mu = 0
sigma = 1
# number of observations
n = 100
alpha = np.repeat(0.5, n)
beta = 1.5
def MC_estimation_slope(M):
MC_betas = []
MC_samples = {}
for i in range(M):
# to make sure the variance in X is bigger than the variance in the error term
X = 9 * np.random.normal(mu, sigma, n)
# random error term
e = np.random.normal(mu, sigma, n)
# determining Y
Y = (alpha + beta * X + e)
MC_samples[i] = Y
# running regression
model = sm.OLS(Y.reshape((-1, 1)), X.reshape((-1, 1)))
ols_result = model.fit()
# getting model slope
coeff = ols_result.params
MC_betas.append(coeff)
MC_beta_hats = np.array(MC_betas).flatten()
return (MC_samples, MC_beta_hats)
MS_samples, MC_beta_hats = MC_estimation_slope(M = 10000)
beta_hat_MC = np.mean(MC_beta_hats)
print(MC_beta_hats)
print(beta_hat_MC)
| TatevKaren/mathematics-statistics-for-data-science | Statistical Sampling/Monte Carlo Simulation OLS estimate.py | Monte Carlo Simulation OLS estimate.py | py | 985 | python | en | code | 88 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "numpy.repeat",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
... |
12580230297 | from django.shortcuts import render
from django.http import JsonResponse
import openai
# Create your views here.
openai_api_key='MI-API-KEY'
openai.api_key=openai_api_key
def ask_openai(message):
response = openai.Completion.create(
model = "text-davinci-003",
prompt= message,
max_tokens=150,
n=1,
stop= None,
temperature=0.7,
)
answer = response.choices[0].text.strip()
answer = "Miau... " + answer + " ...Miau"
return answer
def chatbot(request):
if request.method=='POST':
message = request.POST.get('message')
sarcastic_order = "Quiero que actúes como una persona sarcástica. "
message = sarcastic_order + message
response = ask_openai(message)
return JsonResponse({'message':message, 'response':response})
return render(request, 'chatbot.html') | elgualas/MichiAI | chatbot/views.py | views.py | py | 880 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "openai.api_key",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "openai.Completion.create",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "openai.Completion",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "dj... |
4728965077 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 11 10:19:22 2018
@author: psanch
"""
import tensorflow as tf
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy as np
import utils.utils as utils
class BaseVisualize:
def __init__(self, model_name, result_dir, fig_fize):
self.model_name = model_name
self.result_dir = result_dir
self.fig_size = fig_fize
self.colors = {0:'black', 1:'grey', 2:'blue', 3:'cyan', 4:'lime', 5:'green', 6:'yellow', 7:'gold', 8:'red', 9:'maroon'}
def save_img(self, fig, name):
utils.save_img(fig, self.model_name, name, self.result_dir)
return
def reduce_dimensionality(self, var, perplexity=10):
dim = var.shape[-1]
if(dim>2):
tsne = TSNE(perplexity=perplexity, n_components=2, init='pca', n_iter=1000)
var_2d = tsne.fit_transform(var)
else:
var_2d = np.asarray(var)
return var
def scatter_variable(self, var, labels, title, perplexity=10):
f, axarr = plt.subplots(1, 1, figsize=self.fig_size)
var_2d = self.reduce_dimensionality(var)
if(labels is not None):
for number, color in self.colors.items():
axarr.scatter(x=var_2d[labels==number, 0], y=var_2d[labels==number, 1], color=color, label=str(number))
axarr.legend()
else:
axarr.scatter(x=var_2d[:, 0], y=var_2d[:, 1], color=self.colors[2])
axarr.grid()
f.suptitle(title, fontsize=20)
return f | psanch21/VAE-GMVAE | base/base_visualize.py | base_visualize.py | py | 1,607 | python | en | code | 197 | github-code | 6 | [
{
"api_name": "utils.utils.save_img",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "sklearn.manifold.TSNE",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.asarray"... |
21816512190 | from flask import Flask, request
import json
app = Flask(__name__)
@app.route("/")
def api():
x = request.headers.get("Xxx")
if x == None:
return "missing header"
headers = [header for header in request.headers]
return json.dumps(headers)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=3000, debug=True)
| mrtc0/abusing-hop-by-hop-header | app/app.py | app.py | py | 344 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "flask.request.headers.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "flask.r... |
2734136284 | # -*- coding: utf-8 -*-
"""
@project ensepro
@since 25/02/2018
@author Alencar Rodrigo Hentges <alencarhentges@gmail.com>
"""
import json
import logging
from ensepro.constantes import ConfiguracoesConstantes, StringConstantes, LoggerConstantes
def __init_logger():
global logger
logging.basicConfig(
filename=ConfiguracoesConstantes.ENSEPRO_PATH + __get_config(LoggerConstantes.NOME_DO_ARQUIVO),
level=logging.INFO,
format=__get_config(LoggerConstantes.FORMATO),
filemode=__get_config(LoggerConstantes.MODO_DO_ARQUIVO),
)
logger = logging.getLogger(LoggerConstantes.GET_LOGGER_MODULO.format(modulo=LoggerConstantes.MODULO_CONFIGURACOES))
logger.setLevel(logging.getLevelName(__get_config(LoggerConstantes.NIVEL_LOG_MODULO.format(modulo=LoggerConstantes.MODULO_CONFIGURACOES))))
def __carregar_configuracoes():
global __configs
__configs = json.loads(open(
file=ConfiguracoesConstantes.ARQUIVO_CONFIGURACOES,
mode=StringConstantes.FILE_READ_ONLY,
encoding=StringConstantes.UTF_8
).read())
def __get_config(path):
value = __configs
_path = path.split(".")
for key in _path:
value = value[key]
return value
def get_config(path: str, path_params=None, config_params=None):
"""
Obtém a configuração (<i>path</i>) do arquivo de configuração.
:param path: caminho da configuração no arquivo json, separada por ponto('.').
:param path_params: mapa com os parametros necessários para preencher o caminho da configuração.
:param config_params: mapa com os parametros necessários para completar a configuração obtida
:return:
"""
logger.debug("Obtendo configuração: [path=%s, path_params=%s, config_params=%s]", path, path_params, config_params)
if path_params:
path = path.format_map(path_params)
config = __get_config(path)
if config_params:
return config.format_map(config_params)
logger.info("Configuração obtida: [path=%s] = %s", path, config)
return config
__carregar_configuracoes()
__init_logger()
| Ensepro/ensepro-core | ensepro/configuracoes/configuracoes.py | configuracoes.py | py | 2,135 | python | pt | code | 1 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ensepro.constantes.ConfiguracoesConstantes.ENSEPRO_PATH",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "ensepro.constantes.ConfiguracoesConstantes",
"line_number": ... |
40688712803 | import logging
from feg.protos import s6a_proxy_pb2, s6a_proxy_pb2_grpc
from google.protobuf.json_format import MessageToJson
from magma.common.rpc_utils import print_grpc
from magma.subscriberdb import metrics
from magma.subscriberdb.crypto.utils import CryptoError
from magma.subscriberdb.store.base import SubscriberNotFoundError
from magma.subscriberdb.subscription.utils import ServiceNotActive
class S6aProxyRpcServicer(s6a_proxy_pb2_grpc.S6aProxyServicer):
"""
gRPC based server for the S6aProxy.
"""
def __init__(self, lte_processor, print_grpc_payload: bool = False):
self.lte_processor = lte_processor
logging.info("starting s6a_proxy servicer")
self._print_grpc_payload = print_grpc_payload
def add_to_server(self, server):
"""
Add the servicer to a gRPC server
"""
s6a_proxy_pb2_grpc.add_S6aProxyServicer_to_server(self, server)
def AuthenticationInformation(self, request, context):
print_grpc(request, self._print_grpc_payload, "AIR:")
imsi = request.user_name
aia = s6a_proxy_pb2.AuthenticationInformationAnswer()
try:
plmn = request.visited_plmn
re_sync_info = request.resync_info
# resync_info =
# rand + auts, rand is of 16 bytes + auts is of 14 bytes
sizeof_resync_info = 30
if re_sync_info and (re_sync_info != b'\x00' * sizeof_resync_info):
rand = re_sync_info[:16]
auts = re_sync_info[16:]
self.lte_processor.resync_lte_auth_seq(imsi, rand, auts)
rand, xres, autn, kasme = \
self.lte_processor.generate_lte_auth_vector(imsi, plmn)
metrics.S6A_AUTH_SUCCESS_TOTAL.inc()
# Generate and return response message
aia.error_code = s6a_proxy_pb2.SUCCESS
eutran_vector = aia.eutran_vectors.add()
eutran_vector.rand = bytes(rand)
eutran_vector.xres = xres
eutran_vector.autn = autn
eutran_vector.kasme = kasme
logging.info("Auth success: %s", imsi)
return aia
except CryptoError as e:
logging.error("Auth error for %s: %s", imsi, e)
metrics.S6A_AUTH_FAILURE_TOTAL.labels(
code=metrics.DIAMETER_AUTHENTICATION_REJECTED,
).inc()
aia.error_code = metrics.DIAMETER_AUTHENTICATION_REJECTED
return aia
except SubscriberNotFoundError as e:
logging.warning("Subscriber not found: %s", e)
metrics.S6A_AUTH_FAILURE_TOTAL.labels(
code=metrics.DIAMETER_ERROR_USER_UNKNOWN,
).inc()
aia.error_code = metrics.DIAMETER_ERROR_USER_UNKNOWN
return aia
except ServiceNotActive as e:
logging.error("Service not active for %s: %s", imsi, e)
metrics.M5G_AUTH_FAILURE_TOTAL.labels(
code=metrics.DIAMETER_ERROR_UNAUTHORIZED_SERVICE,
).inc()
aia.error_code = metrics.DIAMETER_ERROR_UNAUTHORIZED_SERVICE
return aia
finally:
print_grpc(aia, self._print_grpc_payload, "AIA:")
def UpdateLocation(self, request, context):
print_grpc(request, self._print_grpc_payload, "ULR:")
imsi = request.user_name
ula = s6a_proxy_pb2.UpdateLocationAnswer()
try:
profile = self.lte_processor.get_sub_profile(imsi)
except SubscriberNotFoundError as e:
ula.error_code = s6a_proxy_pb2.USER_UNKNOWN
logging.warning('Subscriber not found for ULR: %s', e)
print_grpc(ula, self._print_grpc_payload, "ULA:")
return ula
try:
sub_data = self.lte_processor.get_sub_data(imsi)
except SubscriberNotFoundError as e:
ula.error_code = s6a_proxy_pb2.USER_UNKNOWN
logging.warning("Subscriber not found for ULR: %s", e)
print_grpc(ula, self._print_grpc_payload, "ULA:")
return ula
ula.error_code = s6a_proxy_pb2.SUCCESS
ula.default_context_id = 0
ula.total_ambr.max_bandwidth_ul = profile.max_ul_bit_rate
ula.total_ambr.max_bandwidth_dl = profile.max_dl_bit_rate
ula.all_apns_included = 0
ula.msisdn = self.encode_msisdn(sub_data.non_3gpp.msisdn)
context_id = 0
for apn in sub_data.non_3gpp.apn_config:
sec_apn = ula.apn.add()
sec_apn.context_id = context_id
context_id += 1
sec_apn.service_selection = apn.service_selection
sec_apn.qos_profile.class_id = apn.qos_profile.class_id
sec_apn.qos_profile.priority_level = apn.qos_profile.priority_level
sec_apn.qos_profile.preemption_capability = (
apn.qos_profile.preemption_capability
)
sec_apn.qos_profile.preemption_vulnerability = (
apn.qos_profile.preemption_vulnerability
)
sec_apn.ambr.max_bandwidth_ul = apn.ambr.max_bandwidth_ul
sec_apn.ambr.max_bandwidth_dl = apn.ambr.max_bandwidth_dl
sec_apn.ambr.unit = (
s6a_proxy_pb2.UpdateLocationAnswer
.AggregatedMaximumBitrate.BitrateUnitsAMBR.BPS
)
sec_apn.pdn = (
apn.pdn
if apn.pdn
else s6a_proxy_pb2.UpdateLocationAnswer.APNConfiguration.IPV4
)
print_grpc(ula, self._print_grpc_payload, "ULA:")
return ula
def PurgeUE(self, request, context):
logging.warning(
"Purge request not implemented: %s %s",
request.DESCRIPTOR.full_name, MessageToJson(request),
)
pur = s6a_proxy_pb2.PurgeUEAnswer()
print_grpc(pur, self._print_grpc_payload, "PUR:")
return pur
@staticmethod
def encode_msisdn(msisdn: str) -> bytes:
# Mimic how the MSISDN is encoded in ULA : 3GPP TS 29.329-f10
# For odd length MSISDN pad it with an extra 'F'/'1111'
if len(msisdn) % 2 != 0:
msisdn = msisdn + "F"
result = []
# Treat each 2 characters as a byte and flip the order
for i in range(len(msisdn) // 2):
first = int(msisdn[2 * i])
second = int(msisdn[2 * i + 1], 16)
flipped = first + (second << 4)
result.append(flipped)
return bytes(result)
| magma/magma | lte/gateway/python/magma/subscriberdb/protocols/s6a_proxy_servicer.py | s6a_proxy_servicer.py | py | 6,492 | python | en | code | 1,605 | github-code | 6 | [
{
"api_name": "feg.protos.s6a_proxy_pb2_grpc.S6aProxyServicer",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "feg.protos.s6a_proxy_pb2_grpc",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 19,
"usage_type": "call"
... |
8564353511 | import tkinter as tk
import random
from sql import SqlInject
LARGE_FONT = ("Verdana", 12)
data = SqlInject()
class GuiFood(tk.Tk):
def __init__(self, data=data, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.database = data
self.value1 = tk.StringVar()
self.user_choice = "purée"
self.user_product_name = "Purée de Pois Cassés "
self.user_product_data = []
self.subs_product_data = []
self.frames = {}
self.all_frames = (StartPage, ChooseCate, ChooseFood, FoodInfo, SubsFood, SaveSearch)
for F in self.all_frames:
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(StartPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Start Page", font=LARGE_FONT)
label.pack(pady=10, padx=10)
button = tk.Button(self, text="Substituer un aliment",
command=lambda: controller.show_frame(ChooseCate))
button.pack()
button2 = tk.Button(self, text="Retrouver mes aliments substitués.",
command=lambda: controller.show_frame(SaveSearch))
button2.pack()
class ChooseCate(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
label = tk.Label(self, text="Choix de l'aliment a substituer", font=LARGE_FONT)
label.pack(pady=10, padx=10)
vals = controller.database.get_category()
etiqs = controller.database.get_category()
self.varCatch = tk.StringVar() # define type of variable catching
self.choice = None
def get_value():
controller.user_choice = self.varCatch.get()
frame = ChooseFood(parent, controller)
controller.frames[ChooseFood] = frame
frame.grid(row=0, column=0, sticky="nsew")
for i in range(len(vals)):
b = tk.Radiobutton(self, variable=self.varCatch, text=etiqs[i], value=vals[i],
command=lambda: [get_value(), controller.show_frame(ChooseFood)])
b.pack(side="top", expand=1)
button1 = tk.Button(self, text="Retour au menu",
command=lambda: controller.show_frame(StartPage))
button1.pack()
class ChooseFood(tk.Frame):
def __init__(self, parent, controller):
self.controller = controller
self.choice_cate = self.controller.user_choice
tk.Frame.__init__(self, parent)
label = tk.Label(self, text=str(self.choice_cate), font=LARGE_FONT)
label.pack(side="top")
food_data = controller.database.get_categorized_food(self.choice_cate)
list = tk.Listbox(self)
for i in food_data:
list.insert(i[0], i[2])
list.pack()
button2 = tk.Button(self, text="Afficher les infos",
command=lambda:[info_food(), controller.show_frame(FoodInfo)])
button2.pack()
button1 = tk.Button(self, text="Retour au Menu",
command=lambda: controller.show_frame(StartPage))
button1.pack()
def info_food():
controller.user_product_name = list.get(list.curselection())
frame = FoodInfo(parent, controller)
controller.frames[FoodInfo] = frame
frame.grid(row=0, column=0, sticky="nsew")
class FoodInfo(tk.Frame):
def __init__(self, parent, controller):
self.controller = controller
self.choice_info = self.controller.user_product_name
tk.Frame.__init__(self, parent)
food_data = controller.database.get_data_product(column="product_name", search=self.choice_info)
column_name = ["nutriscore","nom du produit","Nom générique","Magasin","Marques","url"]
for col, data in zip(column_name, food_data[0][1:-1]):
label = tk.Label(self, text=str(col)+" : "+str(data)+"\n", font=LARGE_FONT)
label.pack(side="top")
button1 = tk.Button(self, text="Retour au Menu",
command=lambda: controller.show_frame(StartPage))
button1.pack()
button2 = tk.Button(self, text="Substituer cet aliment",
command=lambda:[substitute_food(), controller.show_frame(SubsFood)])
button2.pack()
def substitute_food():
controller.user_product_data = food_data
frame = SubsFood(parent, controller)
controller.frames[SubsFood] = frame
frame.grid(row=0, column=0, sticky="nsew")
class SubsFood(tk.Frame):
def __init__(self, parent, controller):
self.controller = controller
self.cate = self.controller.user_choice
tk.Frame.__init__(self, parent)
food_data = controller.database.get_substitute(self.cate)[random.randrange(0,2)]
column_name = ["nutriscore", "nom du produit", "Nom générique", "Magasin", "Marques", "url"]
for col, data in zip(column_name, food_data[1:-1]):
label = tk.Label(self, text=str(col)+" : "+str(data)+"\n", font=LARGE_FONT)
label.pack(side="top")
button1 = tk.Button(self, text="Retour au Menu",
command=lambda: controller.show_frame(StartPage))
button1.pack()
button2 = tk.Button(self, text="Sauvegarder?",
command= lambda: save_search())
button2.pack()
def save_search():
prod_id = controller.user_product_data[0][0]
controller.database.save_substitute(product_id=int(prod_id), substitute_id=int(food_data[0]))
frame = StartPage(parent, controller)
controller.frames[StartPage] = frame
frame.grid(row=0, column=0, sticky="nsew")
class SaveSearch(tk.Frame):
def __init__(self, parent, controller):
self.controller = controller
self.save = controller.database.get_save_substitute()
print(str(self.save))
tk.Frame.__init__(self, parent)
for i in self.save:
product_name = controller.database.get_data_product(search=i[1])[0][2]
substitute_name = controller.database.get_data_product(search=i[2])[0][2]
label = tk.Label(self, text=str(product_name)+" ==> "+str(substitute_name)+"\n", font=LARGE_FONT)
label.pack(side="top")
if __name__ == '__main__':
app = GuiFood()
app.mainloop()
| Bainard/activite5 | newgui.py | newgui.py | py | 6,917 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sql.SqlInject",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Tk.__init__",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"lin... |
34862439177 | import datetime
import decimal
import urllib.parse
from typing import Dict, Any
from django import template
from django.conf import settings
from django.template.defaultfilters import date
from django.urls import NoReverseMatch, reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.utils.timezone import make_aware
from pysvg.structure import Svg
from components.models import Component
from incidents.choices import IncidentImpactChoices
from utilities.forms import get_selected_values
from utilities.forms.forms import TableConfigForm
from utilities.pysvg_helpers import create_rect
from utilities.utils import get_viewname
register = template.Library()
#
# Filters
#
@register.filter()
def viewname(model, action):
"""
Return the view name for the given model and action. Does not perform any validation.
"""
return get_viewname(model, action)
@register.filter()
def validated_viewname(model, action):
"""
Return the view name for the given model and action if valid, or None if invalid.
"""
viewname = get_viewname(model, action)
# Validate the view name
try:
reverse(viewname)
return viewname
except NoReverseMatch:
return None
@register.filter()
def humanize_speed(speed):
"""
Humanize speeds given in Kbps. Examples:
1544 => "1.544 Mbps"
100000 => "100 Mbps"
10000000 => "10 Gbps"
"""
if not speed:
return ''
if speed >= 1000000000 and speed % 1000000000 == 0:
return '{} Tbps'.format(int(speed / 1000000000))
elif speed >= 1000000 and speed % 1000000 == 0:
return '{} Gbps'.format(int(speed / 1000000))
elif speed >= 1000 and speed % 1000 == 0:
return '{} Mbps'.format(int(speed / 1000))
elif speed >= 1000:
return '{} Mbps'.format(float(speed) / 1000)
else:
return '{} Kbps'.format(speed)
@register.filter()
def humanize_megabytes(mb):
"""
Express a number of megabytes in the most suitable unit (e.g. gigabytes or terabytes).
"""
if not mb:
return ''
if not mb % 1048576: # 1024^2
return f'{int(mb / 1048576)} TB'
if not mb % 1024:
return f'{int(mb / 1024)} GB'
return f'{mb} MB'
@register.filter()
def simplify_decimal(value):
"""
Return the simplest expression of a decimal value. Examples:
1.00 => '1'
1.20 => '1.2'
1.23 => '1.23'
"""
if type(value) is not decimal.Decimal:
return value
return str(value).rstrip('0').rstrip('.')
@register.filter(expects_localtime=True)
def annotated_date(date_value):
"""
Returns date as HTML span with short date format as the content and the
(long) date format as the title.
"""
if not date_value:
return ''
if type(date_value) == datetime.date:
long_ts = date(date_value, 'DATE_FORMAT')
short_ts = date(date_value, 'SHORT_DATE_FORMAT')
else:
long_ts = date(date_value, 'DATETIME_FORMAT')
short_ts = date(date_value, 'SHORT_DATETIME_FORMAT')
return mark_safe(f'<span title="{long_ts}">{short_ts}</span>')
@register.simple_tag
def annotated_now():
"""
Returns the current date piped through the annotated_date filter.
"""
tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
return annotated_date(datetime.datetime.now(tz=tzinfo))
@register.filter()
def divide(x, y):
"""
Return x/y (rounded).
"""
if x is None or y is None:
return None
return round(x / y)
@register.filter()
def percentage(x, y):
"""
Return x/y as a percentage.
"""
if x is None or y is None:
return None
return round(x / y * 100)
@register.filter()
def has_perms(user, permissions_list):
"""
Return True if the user has *all* permissions in the list.
"""
return user.has_perms(permissions_list)
@register.filter()
def as_range(n):
"""
Return a range of n items.
"""
try:
int(n)
except TypeError:
return list()
return range(n)
@register.filter()
def meters_to_feet(n):
"""
Convert a length from meters to feet.
"""
return float(n) * 3.28084
@register.filter("startswith")
def startswith(text: str, starts: str) -> bool:
"""
Template implementation of `str.startswith()`.
"""
if isinstance(text, str):
return text.startswith(starts)
return False
@register.filter
def get_key(value: Dict, arg: str) -> Any:
"""
Template implementation of `dict.get()`, for accessing dict values
by key when the key is not able to be used in a template. For
example, `{"ui.colormode": "dark"}`.
"""
return value.get(arg, None)
@register.filter
def get_item(value: object, attr: str) -> Any:
"""
Template implementation of `__getitem__`, for accessing the `__getitem__` method
of a class from a template.
"""
return value[attr]
@register.filter
def status_from_tag(tag: str = "info") -> str:
"""
Determine Bootstrap theme status/level from Django's Message.level_tag.
"""
status_map = {
'warning': 'bg-yellow-400',
'success': 'bg-green-400',
'error': 'bg-red-400',
'debug': 'bg-blue-400',
'info': 'bg-blue-400',
}
return status_map.get(tag.lower(), 'info')
@register.filter
def icon_from_status(status: str = "info") -> str:
"""
Determine icon class name from Bootstrap theme status/level.
"""
icon_map = {
'warning': 'alert',
'success': 'check-circle',
'danger': 'alert',
'info': 'information',
}
return icon_map.get(status.lower(), 'information')
@register.filter
def get_visible_components(value: any) -> Any:
"""
Template to return only visibly components
"""
return value.filter(visibility=True)
@register.filter
def get_historic_status(value: Component) -> Any:
"""
Template to return historic status
"""
num_days = 90
start_date = make_aware(datetime.datetime.today()) + datetime.timedelta(days=1)
end_date = (start_date - datetime.timedelta(days=num_days)).replace(microsecond=0, second=0, minute=0, hour=0)
date_list = [end_date + datetime.timedelta(days=x) for x in range(num_days)]
component_incidents = value.incidents.all()
status_svg = Svg(width=816, height=34)
for index, date in enumerate(date_list):
end = date + datetime.timedelta(days=1)
incidents = list(filter(lambda i: date <= i.created <= end, component_incidents))
if len(list(filter(lambda i: i.impact == IncidentImpactChoices.CRITICAL, incidents))) > 0:
status_svg.addElement(create_rect(index=index,
date=date,
incidents=len(incidents),
fill="rgb(239, 68, 68)"))
elif len(list(filter(lambda i: i.impact == IncidentImpactChoices.MAJOR, incidents))) > 0:
status_svg.addElement(create_rect(index=index,
date=date,
incidents=len(incidents),
fill="rgb(249, 115, 22)"))
elif len(list(filter(lambda i: i.impact == IncidentImpactChoices.MINOR, incidents))) > 0:
status_svg.addElement(create_rect(index=index,
date=date,
incidents=len(incidents),
fill="rgb(234, 179, 8)"))
else:
status_svg.addElement(create_rect(index=index,
date=date,
incidents=len(incidents),
fill="rgb(34, 197, 94)"))
return mark_safe(status_svg.getXML())
@register.filter
def join_components_with_groups(value: any) -> Any:
"""
Template to return only visibly components
"""
return mark_safe(", ".join(list(map(lambda c: f'{c.component_group.name} — {c.name}' if c.component_group else c.name, value))))
@register.filter
def urlencode(value: str) -> Any:
return urllib.parse.quote(value)
#
# Tags
#
@register.simple_tag()
def querystring(request, **kwargs):
"""
Append or update the page number in a querystring.
"""
querydict = request.GET.copy()
for k, v in kwargs.items():
if v is not None:
querydict[k] = str(v)
elif k in querydict:
querydict.pop(k)
querystring = querydict.urlencode(safe='/')
if querystring:
return '?' + querystring
else:
return ''
@register.inclusion_tag('helpers/utilization_graph.html')
def utilization_graph(utilization, warning_threshold=75, danger_threshold=90):
"""
Display a horizontal bar graph indicating a percentage of utilization.
"""
if utilization == 100:
bar_class = 'bg-secondary'
elif danger_threshold and utilization >= danger_threshold:
bar_class = 'bg-danger'
elif warning_threshold and utilization >= warning_threshold:
bar_class = 'bg-warning'
elif warning_threshold or danger_threshold:
bar_class = 'bg-success'
else:
bar_class = 'bg-gray'
return {
'utilization': utilization,
'bar_class': bar_class,
}
@register.inclusion_tag('helpers/table_config_form.html')
def table_config_form(table, table_name=None):
return {
'table_name': table_name or table.__class__.__name__,
'form': TableConfigForm(table=table),
}
@register.inclusion_tag('helpers/applied_filters.html')
def applied_filters(form, query_params):
"""
Display the active filters for a given filter form.
"""
form.is_valid()
applied_filters = []
for filter_name in form.changed_data:
if filter_name not in form.cleaned_data:
continue
querydict = query_params.copy()
if filter_name not in querydict:
continue
bound_field = form.fields[filter_name].get_bound_field(form, filter_name)
querydict.pop(filter_name)
display_value = ', '.join([str(v) for v in get_selected_values(form, filter_name)])
applied_filters.append({
'name': filter_name,
'value': form.cleaned_data[filter_name],
'link_url': f'?{querydict.urlencode()}',
'link_text': f'{bound_field.label}: {display_value}',
})
return {
'applied_filters': applied_filters,
}
| Status-Page/Status-Page | statuspage/utilities/templatetags/helpers.py | helpers.py | py | 10,704 | python | en | code | 45 | github-code | 6 | [
{
"api_name": "django.template.Library",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "utilities.utils.get_viewname",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "... |
39275803820 | ## For random paymentid
import re
import secrets
import sha3
import sys
from binascii import hexlify, unhexlify
import pyed25519
# byte-oriented StringIO was moved to io.BytesIO in py3k
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
b = pyed25519.b
q = pyed25519.q
l = pyed25519.l
# CN:
def cn_fast_hash(s):
return keccak_256(unhexlify(s))
def keccak_256(s):
# return Keccak().Keccak((len(s)*4, s), 1088, 512, 0x01, 32*8, False).lower()
k = sha3.keccak_256()
k.update(s)
return k.hexdigest()
def sc_reduce(key):
return intToHexStr(hexStrToInt(key) % l)
def sc_reduce32(key):
return intToHexStr(hexStrToInt(key) % q)
def public_from_int(i):
pubkey = ed25519.encodepoint(ed25519.scalarmultbase(i))
return hexlify(pubkey)
def public_from_secret(sk):
return public_from_int(hexStrToInt(sk)).decode('utf-8')
### base58
# MoneroPy - A python toolbox for Monero
# Copyright (C) 2016 The MoneroPy Developers.
#
# MoneroPy is released under the BSD 3-Clause license. Use and redistribution of
# this software is subject to the license terms in the LICENSE file found in the
# top-level directory of this distribution.
__alphabet = [ord(s) for s in '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz']
__b58base = 58
__UINT64MAX = 2 ** 64
__encodedBlockSizes = [0, 2, 3, 5, 6, 7, 9, 10, 11]
__fullBlockSize = 8
__fullEncodedBlockSize = 11
def _hexToBin(hex):
if len(hex) % 2 != 0:
return "Hex string has invalid length!"
return [int(hex[i * 2:i * 2 + 2], 16) for i in range(len(hex) // 2)]
def _binToHex(bin):
return "".join([("0" + hex(int(bin[i])).split('x')[1])[-2:] for i in range(len(bin))])
def _strToBin(a):
return [ord(s) for s in a]
def _binToStr(bin):
return ''.join([chr(bin[i]) for i in range(len(bin))])
def _uint8be_to_64(data):
l_data = len(data)
if l_data < 1 or l_data > 8:
return "Invalid input length"
res = 0
switch = 9 - l_data
for i in range(l_data):
if switch == 1:
res = res << 8 | data[i]
elif switch == 2:
res = res << 8 | data[i]
elif switch == 3:
res = res << 8 | data[i]
elif switch == 4:
res = res << 8 | data[i]
elif switch == 5:
res = res << 8 | data[i]
elif switch == 6:
res = res << 8 | data[i]
elif switch == 7:
res = res << 8 | data[i]
elif switch == 8:
res = res << 8 | data[i]
else:
return "Impossible condition"
return res
def _uint64_to_8be(num, size):
res = [0] * size;
if size < 1 or size > 8:
return "Invalid input length"
twopow8 = 2 ** 8
for i in range(size - 1, -1, -1):
res[i] = num % twopow8
num = num // twopow8
return res
def encode_block(data, buf, index):
l_data = len(data)
if l_data < 1 or l_data > __fullEncodedBlockSize:
return "Invalid block length: " + str(l_data)
num = _uint8be_to_64(data)
i = __encodedBlockSizes[l_data] - 1
while num > 0:
remainder = num % __b58base
num = num // __b58base
buf[index + i] = __alphabet[remainder];
i -= 1
return buf
def encode(hex):
'''Encode hexadecimal string as base58 (ex: encoding a Monero address).'''
data = _hexToBin(hex)
l_data = len(data)
if l_data == 0:
return ""
full_block_count = l_data // __fullBlockSize
last_block_size = l_data % __fullBlockSize
res_size = full_block_count * __fullEncodedBlockSize + __encodedBlockSizes[last_block_size]
res = [0] * res_size
for i in range(res_size):
res[i] = __alphabet[0]
for i in range(full_block_count):
res = encode_block(data[(i * __fullBlockSize):(i * __fullBlockSize + __fullBlockSize)], res,
i * __fullEncodedBlockSize)
if last_block_size > 0:
res = encode_block(
data[(full_block_count * __fullBlockSize):(full_block_count * __fullBlockSize + last_block_size)], res,
full_block_count * __fullEncodedBlockSize)
return _binToStr(res)
def decode_block(data, buf, index):
l_data = len(data)
if l_data < 1 or l_data > __fullEncodedBlockSize:
return "Invalid block length: " + l_data
res_size = __encodedBlockSizes.index(l_data)
if res_size <= 0:
return "Invalid block size"
res_num = 0
order = 1
for i in range(l_data - 1, -1, -1):
digit = __alphabet.index(data[i])
if digit < 0:
return "Invalid symbol"
product = order * digit + res_num
if product > __UINT64MAX:
return "Overflow"
res_num = product
order = order * __b58base
if res_size < __fullBlockSize and 2 ** (8 * res_size) <= res_num:
return "Overflow 2"
tmp_buf = _uint64_to_8be(res_num, res_size)
for i in range(len(tmp_buf)):
buf[i + index] = tmp_buf[i]
return buf
def decode(enc):
'''Decode a base58 string (ex: a Monero address) into hexidecimal form.'''
enc = _strToBin(enc)
l_enc = len(enc)
if l_enc == 0:
return ""
full_block_count = l_enc // __fullEncodedBlockSize
last_block_size = l_enc % __fullEncodedBlockSize
last_block_decoded_size = __encodedBlockSizes.index(last_block_size)
if last_block_decoded_size < 0:
return "Invalid encoded length"
data_size = full_block_count * __fullBlockSize + last_block_decoded_size
data = [0] * data_size
for i in range(full_block_count):
data = decode_block(enc[(i * __fullEncodedBlockSize):(i * __fullEncodedBlockSize + __fullEncodedBlockSize)],
data, i * __fullBlockSize)
if last_block_size > 0:
data = decode_block(enc[(full_block_count * __fullEncodedBlockSize):(
full_block_count * __fullEncodedBlockSize + last_block_size)], data,
full_block_count * __fullBlockSize)
return _binToHex(data)
"""Varint encoder/decoder
varints are a common encoding for variable length integer data, used in
libraries such as sqlite, protobuf, v8, and more.
Here's a quick and dirty module to help avoid reimplementing the same thing
over and over again.
"""
if sys.version > '3':
def _byte(b):
return bytes((b,))
else:
def _byte(b):
return chr(b)
def varint_encode(number):
"""Pack `number` into varint bytes"""
buf = b''
while True:
towrite = number & 0x7f
number >>= 7
if number:
buf += _byte(towrite | 0x80)
else:
buf += _byte(towrite)
break
return buf
def hexStrToInt(h):
'''Converts a hexidecimal string to an integer.'''
return int.from_bytes(unhexlify(h), "little")
def intToHexStr(i):
'''Converts an integer to a hexidecimal string.'''
return hexlify(i.to_bytes(32, "little")).decode("latin-1")
# Validate CN address:
def cn_validate_address(wallet_address: str, get_prefix: int, get_addrlen: int, get_prefix_char: str):
prefix_hex = varint_encode(get_prefix).hex()
remain_length = get_addrlen - len(get_prefix_char)
my_regex = r"" + get_prefix_char + r"[a-zA-Z0-9]" + r"{" + str(remain_length) + ",}"
if len(wallet_address) != int(get_addrlen):
return None
if not re.match(my_regex, wallet_address.strip()):
return None
try:
address_hex = decode(wallet_address)
if address_hex.startswith(prefix_hex):
i = len(prefix_hex) - 1
address_no_prefix = address_hex[i:]
spend = address_no_prefix[1:65]
view = address_no_prefix[65:129]
checksum = address_no_prefix[129:137]
expectedChecksum = cn_fast_hash(prefix_hex + spend + view)[0:8]
if checksum == expectedChecksum:
return wallet_address
except Exception as e:
pass
return None
# Validate address:
def cn_validate_integrated(wallet_address: str, get_prefix_char: str, get_prefix: int, get_intaddrlen: int):
prefix_hex = varint_encode(get_prefix).hex()
remain_length = get_intaddrlen - len(get_prefix_char)
my_regex = r"" + get_prefix_char + r"[a-zA-Z0-9]" + r"{" + str(remain_length) + ",}"
if len(wallet_address) != int(get_intaddrlen):
return None
if not re.match(my_regex, wallet_address.strip()):
return None
try:
address_hex = decode(wallet_address)
if address_hex.startswith(prefix_hex):
i = len(prefix_hex) - 1
address_no_prefix = address_hex[i:]
integrated_id = address_no_prefix[1:129]
spend = address_no_prefix[(128 + 1):(128 + 65)]
view = address_no_prefix[(128 + 65):(128 + 129)]
checksum = address_no_prefix[(128 + 129):(128 + 137)]
expectedChecksum = cn_fast_hash(prefix_hex + integrated_id + spend + view)[0:8]
if checksum == expectedChecksum:
checksum = cn_fast_hash(prefix_hex + spend + view);
address_b58 = encode(prefix_hex + spend + view + checksum[0:8])
result = {}
result['address'] = str(address_b58)
result['integrated_id'] = str(hextostr(integrated_id))
else:
return 'invalid'
except Exception as e:
return None
return result
# make_integrated address:
def cn_make_integrated(wallet_address, get_prefix_char: str, get_prefix: int, get_addrlen: int, integrated_id=None):
prefix_hex = varint_encode(get_prefix).hex()
remain_length = get_addrlen - len(get_prefix_char)
my_regex = r"" + get_prefix_char + r"[a-zA-Z0-9]" + r"{" + str(remain_length) + ",}"
if integrated_id is None:
integrated_id = paymentid()
if len(wallet_address) != get_addrlen:
return None
if not re.match(my_regex, wallet_address.strip()):
return None
if not re.match(r'[a-zA-Z0-9]{64,}', integrated_id.strip()):
return None
try:
address_hex = decode(wallet_address)
checkPaymentID = integrated_id
integrated_id = integrated_id.encode('latin-1').hex()
if (address_hex.startswith(prefix_hex)):
i = len(prefix_hex) - 1
address_no_prefix = address_hex[i:]
spend = address_no_prefix[1:65]
view = address_no_prefix[65:129]
expectedChecksum = cn_fast_hash(prefix_hex + integrated_id + spend + view)[0:8]
address = (prefix_hex + integrated_id + spend + view + expectedChecksum)
address = str(encode(address))
result = {}
result['address'] = wallet_address
result['paymentid'] = checkPaymentID
result['integrated_address'] = address
return result
except Exception as e:
pass
return None
## make random paymentid:
def paymentid(length=None):
if length is None: length = 32
return secrets.token_hex(length)
def hextostr(hex):
h2b = _hexToBin(hex)
# print(h2b)
res = ''
for i in h2b:
res = res + chr(i)
return res
##########
| wrkzcoin/TipBot | wrkzcoin_tipbot/cn_addressvalidation.py | cn_addressvalidation.py | py | 11,230 | python | en | code | 137 | github-code | 6 | [
{
"api_name": "pyed25519.b",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pyed25519.q",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pyed25519.l",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "binascii.unhexlify... |
26609155963 | import pyautogui
import time
def click_on_bluestacks(x, y):
# Attendez 5 secondes pour vous donner le temps de changer de fenêtre
time.sleep(5)
# Trouvez la fenêtre Bluestacks
bluestacks_windows = pyautogui.getWindowsWithTitle('Bluestacks')
# Vérifiez si la fenêtre Bluestacks a été trouvée
if len(bluestacks_windows) == 0:
print("La fenêtre Bluestacks n'a pas été trouvée.")
return
# Si la fenêtre Bluestacks a été trouvée, effectuez un clic à des coordonnées spécifiques
bluestacks_window = bluestacks_windows[0]
# Déplacez le curseur de souris à ces coordonnées dans la fenêtre Bluestacks
pyautogui.moveTo(bluestacks_window.left + x, bluestacks_window.top + y)
# Cliquez à ces coordonnées
pyautogui.click()
# Appelez la fonction click_on_bluestacks avec les coordonnées (100, 100)
if __name__ == "__main__":
click_on_bluestacks(100, 100)
| Edgarflc/Summoners-War-Bot | test.py | test.py | py | 941 | python | fr | code | 0 | github-code | 6 | [
{
"api_name": "time.sleep",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyautogui.getWindowsWithTitle",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pyautogui.moveTo",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pyautogui.cli... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.