seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
17058436164 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class PublicAuditStatus(object):
def __init__(self):
self._desc = None
self._status = None
self._type = None
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, value):
self._desc = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.desc:
if hasattr(self.desc, 'to_alipay_dict'):
params['desc'] = self.desc.to_alipay_dict()
else:
params['desc'] = self.desc
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PublicAuditStatus()
if 'desc' in d:
o.desc = d['desc']
if 'status' in d:
o.status = d['status']
if 'type' in d:
o.type = d['type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/PublicAuditStatus.py | PublicAuditStatus.py | py | 1,649 | python | en | code | 241 | github-code | 13 |
27045558728 | import numpy as np
def construct_LMS_weights(x, y, lr, order, verbose=False):
# initialize weights
w = np.zeros(order).reshape(-1, 1)
# need to basically construct a sliding window that is the size of the filter order over the input signal
for i in range(x.shape[0]):
if i + order == x.shape[0] - order: break
curr_range = x[i:i + order]
pred = curr_range.reshape(1, -1) @ w
error = y[i] - pred
update = (2 * lr * error * curr_range).reshape(-1, 1)
w = w + update
return w | richiebailey74/Linear_Forecasting | src/weight_generation/lms.py | lms.py | py | 549 | python | en | code | 0 | github-code | 13 |
17060991664 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class TreeData(object):
def __init__(self):
self._cooperation = None
self._num = None
self._tree_alias = None
self._tree_type = None
@property
def cooperation(self):
return self._cooperation
@cooperation.setter
def cooperation(self, value):
self._cooperation = value
@property
def num(self):
return self._num
@num.setter
def num(self, value):
self._num = value
@property
def tree_alias(self):
return self._tree_alias
@tree_alias.setter
def tree_alias(self, value):
self._tree_alias = value
@property
def tree_type(self):
return self._tree_type
@tree_type.setter
def tree_type(self, value):
self._tree_type = value
def to_alipay_dict(self):
params = dict()
if self.cooperation:
if hasattr(self.cooperation, 'to_alipay_dict'):
params['cooperation'] = self.cooperation.to_alipay_dict()
else:
params['cooperation'] = self.cooperation
if self.num:
if hasattr(self.num, 'to_alipay_dict'):
params['num'] = self.num.to_alipay_dict()
else:
params['num'] = self.num
if self.tree_alias:
if hasattr(self.tree_alias, 'to_alipay_dict'):
params['tree_alias'] = self.tree_alias.to_alipay_dict()
else:
params['tree_alias'] = self.tree_alias
if self.tree_type:
if hasattr(self.tree_type, 'to_alipay_dict'):
params['tree_type'] = self.tree_type.to_alipay_dict()
else:
params['tree_type'] = self.tree_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = TreeData()
if 'cooperation' in d:
o.cooperation = d['cooperation']
if 'num' in d:
o.num = d['num']
if 'tree_alias' in d:
o.tree_alias = d['tree_alias']
if 'tree_type' in d:
o.tree_type = d['tree_type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/TreeData.py | TreeData.py | py | 2,265 | python | en | code | 241 | github-code | 13 |
25580307742 | import glob
import os
import subprocess
import re
from datetime import datetime
from config import *
class TestRunner:
def __init__(self, test_path, target_path, tool="cobertura"):
"""
:param tool: coverage tool (Only support cobertura or jacoco)
:param test_path: test cases directory path e.g.:
/data/share/TestGPT_ASE/result/scope_test%20230414210243%d3_1/ (all test)
/data/share/TestGPT_ASE/result/scope_test%20230414210243%d3_1/1460%lang_1_f%ToStringBuilder%append%d3/5 (single test)
:param target_path: target project path
"""
self.coverage_tool = tool
self.test_path = test_path
self.target_path = target_path
# Preprocess
self.dependencies = self.make_dependency()
self.build_dir_name = "target/classes"
self.build_dir = self.process_single_repo()
self.COMPILE_ERROR = 0
self.TEST_RUN_ERROR = 0
def start_single_test(self):
"""
Run a single method test case with a thread.
tests directory path, e.g.:
/data/share/TestGPT_ASE/result/scope_test%20230414210243%d3_1/1460%lang_1_f%ToStringBuilder%append%d3/5
"""
temp_dir = os.path.join(self.test_path, "temp")
compiled_test_dir = os.path.join(self.test_path, "runtemp")
os.makedirs(compiled_test_dir, exist_ok=True)
try:
self.instrument(compiled_test_dir, compiled_test_dir)
test_file = os.path.abspath(glob.glob(temp_dir + '/*.java')[0])
compiler_output = os.path.join(temp_dir, 'compile_error')
test_output = os.path.join(temp_dir, 'runtime_error')
if not self.run_single_test(test_file, compiled_test_dir, compiler_output, test_output):
return False
else:
self.report(compiled_test_dir, temp_dir)
except Exception as e:
print(e)
return False
return True
def start_all_test(self):
"""
Initialize configurations and run all tests
"""
date = datetime.now().strftime("%Y%m%d%H%M%S")
# Directories for the test cases, outputs, and reports
tests_dir = os.path.join(self.target_path, f"tests%{date}")
compiler_output_dir = os.path.join(tests_dir, "compiler_output")
test_output_dir = os.path.join(tests_dir, "test_output")
report_dir = os.path.join(tests_dir, "report")
compiler_output = os.path.join(compiler_output_dir, "CompilerOutput")
test_output = os.path.join(test_output_dir, "TestOutput")
compiled_test_dir = os.path.join(tests_dir, "tests_ChatGPT")
self.copy_tests(tests_dir)
return self.run_all_tests(tests_dir, compiled_test_dir, compiler_output, test_output, report_dir)
def run_all_tests(self, tests_dir, compiled_test_dir, compiler_output, test_output, report_dir):
"""
Run all test cases in a project.
"""
tests = os.path.join(tests_dir, "test_cases")
self.instrument(compiled_test_dir, compiled_test_dir)
total_compile = 0
total_test_run = 0
for t in range(1, 1 + test_number):
print("Processing attempt: ", str(t))
for test_case_file in os.listdir(tests):
if str(t) != test_case_file.split('_')[-1].replace('Test.java', ''):
continue
total_compile += 1
try:
test_file = os.path.join(tests, test_case_file)
self.run_single_test(test_file, compiled_test_dir, compiler_output, test_output)
except Exception as e:
print(e)
self.report(compiled_test_dir, os.path.join(report_dir, str(t)))
total_test_run = total_compile - self.COMPILE_ERROR
print("COMPILE TOTAL COUNT:", total_compile)
print("COMPILE ERROR COUNT:", self.COMPILE_ERROR)
print("TEST RUN TOTAL COUNT:", total_test_run)
print("TEST RUN ERROR COUNT:", self.TEST_RUN_ERROR)
print("\n")
return total_compile, total_test_run
def run_single_test(self, test_file, compiled_test_dir, compiler_output, test_output):
"""
Run a test case.
:return: Whether it is successful or no.
"""
if not self.compile(test_file, compiled_test_dir, compiler_output):
return False
if os.path.basename(test_output) == 'runtime_error':
test_output_file = f"{test_output}.txt"
else:
test_output_file = f"{test_output}-{os.path.basename(test_file)}.txt"
cmd = self.java_cmd(compiled_test_dir, test_file)
try:
result = subprocess.run(cmd, timeout=TIMEOUT,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode != 0:
self.TEST_RUN_ERROR += 1
self.export_runtime_output(result, test_output_file)
return False
except subprocess.TimeoutExpired:
# print(Fore.RED + "TIME OUT!", Style.RESET_ALL)
return False
return True
@staticmethod
def export_runtime_output(result, test_output_file):
with open(test_output_file, "w") as f:
f.write(result.stdout)
error_msg = result.stderr
error_msg = re.sub(r'log4j:WARN.*\n?', '', error_msg)
if error_msg != '':
f.write(error_msg)
def compile(self, test_file, compiled_test_dir, compiler_output):
"""
Compile a test case.
:param test_file:
:param compiled_test_dir: the directory to store compiled tests
:param compiler_output:
"""
os.makedirs(compiled_test_dir, exist_ok=True)
cmd = self.javac_cmd(compiled_test_dir, test_file)
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode != 0:
self.COMPILE_ERROR += 1
if os.path.basename(compiler_output) == 'compile_error':
compiler_output_file = f"{compiler_output}.txt"
else:
compiler_output_file = f"{compiler_output}-{os.path.basename(test_file)}.txt"
with open(compiler_output_file, "w") as f:
f.write(result.stdout)
f.write(result.stderr)
return False
return True
def process_single_repo(self):
"""
Return the all build directories of target repository
"""
if self.has_submodule(self.target_path):
modules = self.get_submodule(self.target_path)
postfixed_modules = [f'{self.target_path}/{module}/{self.build_dir_name}' for module in modules]
build_dir = ':'.join(postfixed_modules)
else:
build_dir = os.path.join(self.target_path, self.build_dir_name)
return build_dir
@staticmethod
def get_package(test_file):
with open(test_file, "r") as f:
first_line = f.readline()
package = first_line.strip().replace("package ", "").replace(";", "")
return package
@staticmethod
def is_module(project_path):
"""
If the path has a pom.xml file and target/classes compiled, a module.
"""
if not os.path.isdir(project_path):
return False
if 'pom.xml' in os.listdir(project_path) and 'target' in os.listdir(project_path):
return True
return False
def get_submodule(self, project_path):
"""
Get all modules in given project.
:return: module list
"""
return [d for d in os.listdir(project_path) if self.is_module(os.path.join(project_path, d))]
def has_submodule(self, project_path):
"""
Is a project composed by submodules, e.g., gson
"""
for dir in os.listdir(project_path):
if self.is_module(os.path.join(project_path, dir)):
return True
return False
def javac_cmd(self, compiled_test_dir, test_file):
classpath = f"{JUNIT_JAR}:{MOCKITO_JAR}:{LOG4J_JAR}:{self.dependencies}:{self.build_dir}:."
classpath_file = os.path.join(compiled_test_dir, 'classpath.txt')
self.export_classpath(classpath_file, classpath)
return ["javac", "-d", compiled_test_dir, f"@{classpath_file}", test_file]
def java_cmd(self, compiled_test_dir, test_file):
full_test_name = self.get_full_name(test_file)
classpath = f"{COBERTURA_DIR}/cobertura-2.1.1.jar:{compiled_test_dir}/instrumented:{compiled_test_dir}:" \
f"{JUNIT_JAR}:{MOCKITO_JAR}:{LOG4J_JAR}:{self.dependencies}:{self.build_dir}:."
classpath_file = os.path.join(compiled_test_dir, 'classpath.txt')
self.export_classpath(classpath_file, classpath)
if self.coverage_tool == "cobertura":
return ["java", f"@{classpath_file}",
f"-Dnet.sourceforge.cobertura.datafile={compiled_test_dir}/cobertura.ser",
"org.junit.platform.console.ConsoleLauncher", "--disable-banner", "--disable-ansi-colors",
"--fail-if-no-tests", "--details=none", "--select-class", full_test_name]
else: # self.coverage_tool == "jacoco"
return ["java", f"-javaagent:{JACOCO_AGENT}=destfile={compiled_test_dir}/jacoco.exec",
f"@{classpath_file}",
"org.junit.platform.console.ConsoleLauncher", "--disable-banner", "--disable-ansi-colors",
"--fail-if-no-tests", "--details=none", "--select-class", full_test_name]
@staticmethod
def export_classpath(classpath_file, classpath):
with open(classpath_file, 'w') as f:
classpath = "-cp " + classpath
f.write(classpath)
return
def get_full_name(self, test_file):
package = self.get_package(test_file)
test_case = os.path.splitext(os.path.basename(test_file))[0]
if package != '':
return f"{package}.{test_case}"
else:
return test_case
def instrument(self, instrument_dir, datafile_dir):
"""
Use cobertura scripts to instrument compiled class.
Generate 'instrumented' directory.
"""
if self.coverage_tool == "jacoco":
return
os.makedirs(instrument_dir, exist_ok=True)
os.makedirs(datafile_dir, exist_ok=True)
if 'instrumented' in os.listdir(instrument_dir):
return
if self.has_submodule(self.target_path):
target_classes = os.path.join(self.target_path, '**/target/classes')
else:
target_classes = os.path.join(self.target_path, 'target/classes')
result = subprocess.run(["bash", os.path.join(COBERTURA_DIR, "cobertura-instrument.sh"),
"--basedir", self.target_path,
"--destination", f"{instrument_dir}/instrumented",
"--datafile", f"{datafile_dir}/cobertura.ser",
target_classes], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def report(self, datafile_dir, report_dir):
"""
Generate coverage report by given coverage tool.
"""
os.makedirs(report_dir, exist_ok=True)
if self.coverage_tool == "cobertura":
result = subprocess.run(["bash", os.path.join(COBERTURA_DIR, "cobertura-report.sh"),
"--format", REPORT_FORMAT, "--datafile", f"{datafile_dir}/cobertura.ser",
"--destination",
report_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
build_list = self.build_dir.split(":")
classfiles = ""
for build in build_list:
classfiles += " --classfiles " + build
result = subprocess.run(
["java", "-jar", JACOCO_CLI, "report", f"{datafile_dir}/jacoco.exec", classfiles,
"--csv", os.path.join(report_dir, "coverage.csv")], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def make_dependency(self):
"""
Generate runtime dependencies of a given project
"""
mvn_dependency_dir = 'target/dependency'
deps = []
if not self.has_made():
# Run mvn command to generate dependencies
# print("Making dependency for project", self.target_path)
subprocess.run(
f"mvn dependency:copy-dependencies -DoutputDirectory={mvn_dependency_dir} -f {self.target_path}/pom.xml",
shell=True,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subprocess.run(f"mvn install -DskipTests -f {self.target_path}/pom.xml", shell=True,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
dep_jars = glob.glob(self.target_path + "/**/*.jar", recursive=True)
deps.extend(dep_jars)
deps = list(set(deps))
return ':'.join(deps)
def has_made(self):
"""
If the project has made before
"""
for dirpath, dirnames, filenames in os.walk(self.target_path):
if 'pom.xml' in filenames and 'target' in dirnames:
target = os.path.join(dirpath, 'target')
if 'dependency' in os.listdir(target):
return True
return False
def copy_tests(self, target_dir):
"""
Copy test cases of given project to target path for running.
:param target_dir: path to target directory used to store test cases
"""
tests = glob.glob(self.test_path + "/**/*Test.java", recursive=True)
target_project = os.path.basename(self.target_path.rstrip('/'))
_ = [os.makedirs(os.path.join(target_dir, dir_name), exist_ok=True) for dir_name in
("test_cases", "compiler_output", "test_output", "report")]
print("Copying tests to ", target_project, '...')
for tc in tests:
# tc should be 'pathto/project/testcase'.
tc_project = tc.split('/')[-4].split('%')[1]
if tc_project != target_project or \
not os.path.exists(self.target_path):
continue
os.system(f"cp {tc} {os.path.join(target_dir, 'test_cases')}")
| ZJU-ACES-ISE/ChatUniTest | src/test_runner.py | test_runner.py | py | 14,549 | python | en | code | 40 | github-code | 13 |
15036459916 | import itertools
import tkinter as tk
from prettytable import PrettyTable
from tkinter import ttk
class Schedule:
def __init__(self, id, name, day1, start_time, end_time, day2=None, start_time2=None, end_time2=None, day3=None, start_time3=None, end_time3=None):
self.id = id
self.name = name
self.day1 = day1
self.start_time = start_time
self.end_time = end_time
self.day2 = day2
self.start_time2 = start_time2
self.end_time2 = end_time2
self.day3 = day3
self.start_time3 = start_time3
self.end_time3 = end_time3
class ScheduleGenerator:
def __init__(self, course_data):
self.course_data = course_data
def isValidSchedule(self, schedule):
#check if there are any conflicts in the schedule by first checking if there are any courses that are scheduled at the same day, if yes then check if the times overlap, if yes then return false , if no then return true, make sure to handle the case where a course is scheduled on 3 days, but the other course is scheduled on 2 days since not all courses have the same number of days
for course1, course2 in itertools.combinations(schedule, 2):
if course1.day1 == course2.day1:
if course1.start_time <= course2.end_time and course2.start_time <= course1.end_time:
return False
if course1.day1 == course2.day2:
if course1.start_time <= course2.end_time2 and course2.start_time2 <= course1.end_time:
return False
if course1.day2 == course2.day1:
if course1.start_time2 <= course2.end_time and course2.start_time <= course1.end_time2:
return False
if course1.day2 == course2.day2:
if course1.start_time2 <= course2.end_time2 and course2.start_time2 <= course1.end_time2:
return False
if course1.day3 and course1.day3 == course2.day1:
if course1.start_time3 <= course2.end_time and course2.start_time <= course1.end_time3:
return False
if course1.day3 and course1.day3 == course2.day2:
if course1.start_time3 <= course2.end_time2 and course2.start_time2 <= course1.end_time3:
return False
if course1.day3 and course1.day3 == course2.day3:
if course1.start_time3 <= course2.end_time3 and course2.start_time3 <= course1.end_time3:
return False
return True
def generateSchedules(self):
course_dict = self.groupCourses(self.course_data)
courses = list(course_dict.values())
schedules = list(itertools.product(*courses))
valid_schedules = []
for schedule in schedules:
if self.isValidSchedule(schedule):
valid_schedules.append(schedule)
print(f"Generated {len(valid_schedules)} valid schedules.")
return valid_schedules
def printSchedule(self, schedule):
table = PrettyTable()
table.field_names = ["ID", "Course Name", "Day1", "Start Time", "End Time", "Days 2", "Start Time 2", "End Time 2", "Days 3", "Start Time 3", "End Time 3"]
for course in schedule:
row = [course.id, course.name, course.day1, course.start_time, course.end_time]
if course.day2:
row += [course.day2, course.start_time2, course.end_time2]
else:
row += ['', '', '']
if course.day3:
row += [course.day3, course.start_time3, course.end_time3]
else:
row += ['', '', '']
table.add_row(row)
print(table)
def groupCourses(self, courseList):
course_dict = {}
for course in courseList:
name = course.name
if name not in course_dict:
course_dict[name] = []
course_dict[name].append(course)
print(f"Successfully grouped {len(course_dict)} courses.")
return course_dict
class ScheduleGeneratorGUI:
def __init__(self, master):
self.master = master
master.title("Schedule Generator")
self.label = tk.Label(master, text="Enter course data:")
self.label.pack()
self.id_label = tk.Label(master, text="Course ID:")
self.id_label.pack()
self.id_entry = tk.Entry(master)
self.id_entry.pack()
self.name_label = tk.Label(master, text="Course Name:")
self.name_label.pack()
self.name_entry = tk.Entry(master)
self.name_entry.pack()
self.day1_label = tk.Label(master, text="Day 1:")
self.day1_label.pack()
self.day1_entry = tk.Entry(master)
self.day1_entry.pack()
self.start1_label = tk.Label(master, text="Start Time 1:")
self.start1_label.pack()
self.start1_entry = tk.Entry(master)
self.start1_entry.pack()
self.end1_label = tk.Label(master, text="End Time 1:")
self.end1_label.pack()
self.end1_entry = tk.Entry(master)
self.end1_entry.pack()
self.day2_label = tk.Label(master, text="Day 2:")
self.day2_label.pack()
self.day2_entry = tk.Entry(master)
self.day2_entry.pack()
self.start2_label = tk.Label(master, text="Start Time 2:")
self.start2_label.pack()
self.start2_entry = tk.Entry(master)
self.start2_entry.pack()
self.end2_label = tk.Label(master, text="End Time 2:")
self.end2_label.pack()
self.end2_entry = tk.Entry(master)
self.end2_entry.pack()
self.day3_label = tk.Label(master, text="Day 3 (optional):")
self.day3_label.pack()
self.day3_entry = tk.Entry(master)
self.day3_entry.pack()
self.start3_label = tk.Label(master, text="Start Time 3 (optional):")
self.start3_label.pack()
self.start3_entry = tk.Entry(master)
self.start3_entry.pack()
self.end3_label = tk.Label(master, text="End Time 3 (optional):")
self.end3_label.pack()
self.end3_entry = tk.Entry(master)
self.end3_entry.pack()
self.button = tk.Button(master, text="Add Course", command=self.add_course)
self.button.pack()
self.textbox = tk.Text(master, height=10, width=50)
self.textbox.pack()
self.generate_button = tk.Button(master, text="Generate Schedules", command=self.generate_schedules)
self.generate_button.pack()
self.course_list = []
def add_course(self):
course_id = self.id_entry.get()
course_name = self.name_entry.get()
day1 = self.day1_entry.get()
start1 = self.start1_entry.get()
end1 = self.end1_entry.get()
day2 = self.day2_entry.get()
start2 = self.start2_entry.get()
end2 = self.end2_entry.get()
day3 = self.day3_entry.get()
start3 = self.start3_entry.get()
end3 = self.end3_entry.get()
# Validate input
if not course_id.isdigit():
self.textbox.insert(tk.END, "Error: Course ID must be a number.\n")
return
if not course_name:
self.textbox.insert(tk.END, "Error: Course Name cannot be empty.\n")
return
if not day1.isalpha():
self.textbox.insert(tk.END, "Error: Day 1 cannot be empty.\n")
return
if not start1.__contains__(":") or not start1.split(":")[0].isdigit or not start1.split(":")[1].isdigit or not end1.__contains__(":") or not end1.split(":")[0].isdigit() or not end1.split(":")[1].isdigit():
self.textbox.insert(tk.END, "Error: Start Time and End Time for Day 1 must be numbers.\n")
return
if not day2.isalpha():
self.textbox.insert(tk.END, "Error: Start Time and End Time for Day 2 must be numbers.\n")
return
if day2 and (not start2.__contains__(":") or not start2.split(":")[0].isdigit or not start2.split(":")[1].isdigit or not end2.__contains__(":") or not end2.split(":")[0].isdigit() or not end2.split(":")[1].isdigit()):
self.textbox.insert(tk.END, "Error: Start Time and End Time for Day 2 must be numbers.\n")
return
if day3 and not day3.isalpha():
self.textbox.insert(tk.END, "Error: Day 3 must be a letter.\n")
return
if day3 and (not start3.__contains__(":") or not start3.split(":")[0].isdigit or not start3.split(":")[1].isdigit or not end3.__contains__(":") or not end3.split(":")[0].isdigit() or not end3.split(":")[1].isdigit()):
self.textbox.insert(tk.END, "Error: Start Time and End Time for Day 3 must be numbers.\n")
return
# give a message if the course is added successfully
self.textbox.delete(1.0, tk.END)
self.textbox.insert(tk.END, "Course added successfully.\n")
course = Schedule(int(course_id), course_name, day1, start1, end1, day2, start2, end2, day3, start3, end3)
# show the course in the textbox in one line
self.textbox.insert(tk.END, f"{course.id} {course.name} {course.day1} {course.start_time} {course.end_time} {course.day2} {course.start_time2} {course.end_time2} {course.day3} {course.start_time3} {course.end_time3}\n")
# add the course to the list of courses
self.course_list.append(course)
# clear the entry boxes for new courses to be added
self.id_entry.delete(0, tk.END)
self.name_entry.delete(0, tk.END)
self.day1_entry.delete(0, tk.END)
self.start1_entry.delete(0, tk.END)
self.end1_entry.delete(0, tk.END)
self.day2_entry.delete(0, tk.END)
self.start2_entry.delete(0, tk.END)
self.end2_entry.delete(0, tk.END)
self.day3_entry.delete(0, tk.END)
self.start3_entry.delete(0, tk.END)
self.end3_entry.delete(0, tk.END)
# generate the schedules
def generate_schedules(self):
generator = ScheduleGenerator(self.course_list)
schedules = generator.generateSchedules()
for schedule in schedules:
generator.printSchedule(schedule)
root = tk.Tk()
gui = ScheduleGeneratorGUI(root)
root.mainloop() | mazenS1/schedule-in-python | ScheduleGen.py | ScheduleGen.py | py | 10,514 | python | en | code | 0 | github-code | 13 |
73127232978 | # coding utf-8
import numpy as np
import random
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, BatchNormalization
from keras.layers import Convolution2D, MaxPooling2D, normalization
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.models import load_model
from keras import backend as K
from keras.callbacks import TensorBoard as tb
from load_face_dataset import load_dataset, resize_image, IMAGE_SIZE
import matplotlib.pyplot as plt
class Dataset:
def __init__(self, path_name):
# 训练集
self.train_images = None
self.train_labels = None
# 验证集
self.valid_images = None
self.valid_labels = None
# 测试集
self.test_images = None
self.test_labels = None
# 数据集加载路径
self.path_name = path_name
# 当前库采用的维度顺序
self.input_shape = None
# 加载数据集并按照交叉验证的原则划分数据集并进行相关预处理工作
def load(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE,
img_channels=3, nb_classes=2):
# 加载数据集到内存
images, labels = load_dataset(self.path_name)
train_images, valid_images, train_labels, valid_labels = train_test_split(images, labels, test_size=0.3,
random_state=random.randint(0, 100))
_, test_images, _, test_labels = train_test_split(images, labels, test_size=0.5,
random_state=random.randint(0, 100))
# 通道 行 列顺序
# theano 作为后端:channels,rows,cols TensorFlow作为后端:rows,cols,channels
if K.image_dim_ordering() == 'th':
train_images = train_images.reshape(train_images.shape[0], img_channels, img_rows, img_cols)
valid_images = valid_images.reshape(valid_images.shape[0], img_channels, img_rows, img_cols)
test_images = test_images.reshape(test_images.shape[0], img_channels, img_rows, img_cols)
self.input_shape = (img_channels, img_rows, img_cols)
else:
train_images = train_images.reshape(train_images.shape[0], img_rows, img_cols, img_channels)
valid_images = valid_images.reshape(valid_images.shape[0], img_rows, img_cols, img_channels)
test_images = test_images.reshape(test_images.shape[0], img_rows, img_cols, img_channels)
self.input_shape = (img_rows, img_cols, img_channels)
# 输出训练集、验证集、测试集的数量
print(train_images.shape[0], '训练数据')
print(valid_images.shape[0], '验证数据')
print(test_images.shape[0], '测试数据')
# 将类别标签进行one-hot编码使其向量化,两种类别转为二维数组
train_labels = np_utils.to_categorical(train_labels, nb_classes)
valid_labels = np_utils.to_categorical(valid_labels, nb_classes)
test_labels = np_utils.to_categorical(test_labels, nb_classes)
# 像素数据浮点化以便归一化
train_images = train_images.astype('float32')
valid_images = valid_images.astype('float32')
test_images = test_images.astype('float32')
# 将其归一化,图像的各像素值归一化到0~1区间
train_images /= 255
valid_images /= 255
test_images /= 255
self.train_images = train_images
self.valid_images = valid_images
self.test_images = test_images
self.train_labels = train_labels
self.valid_labels = valid_labels
self.test_labels = test_labels
# CNN网络模型类
class Model:
def __init__(self):
self.model = None
# 建立模型
def build_model(self, dataset, nb_classes=2):
# 构建一个空的网络模型
self.model = Sequential()
# 第一层
self.model.add(Convolution2D(filters=16, kernel_size=(3, 3), padding='same', input_shape=dataset.input_shape,name='Conv_1'))
# self.model.add(BatchNormalization(input_shape=dataset.input_shape))
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='Pool_1'))
# 第二层
self.model.add(Convolution2D(filters=32, kernel_size=(3, 3), padding='same', name='Conv_2'))
# self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='Pool_2'))
# 第三层
self.model.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='same', name='Conv_3'))
# self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
# 第四层
self.model.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='same', name='Conv4'))
# self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='Pool4'))
# 第六层
self.model.add(Flatten())
self.model.add(Dense(512))
self.model.add(Activation('relu'))
# 第七层
self.model.add(Dense(nb_classes))
self.model.add(Activation('softmax'))
# 输出模型概况
self.model.summary()
# 训练模型
def train(self, dataset, batch_size=20, epochs=10, data_up=True):
# 生成SGD优化器进行训练
sgd = SGD(lr=0.01, decay=1e-6,momentum=0.9, nesterov=True)
# 完成实际的模型配置工作
self.model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])
# 数据提升
if not data_up:
self.model.fit(dataset.train_images,
dataset.train_labels,
batch_size=batch_size,
epochs=epochs,
validation_data=(dataset.valid_images, dataset.valid_labels),
shuffle=True)
# 使用实时数据提升
else:
# 定义数据生成器
datagen = ImageDataGenerator(
rotation_range=20, # 数据提升时图片随机转动的角度
width_shift_range=0.2, # 数据提升时图片水平偏移的幅度,百分比
height_shift_range=0.2, # 垂直平移幅度
horizontal_flip=True, # 随机水平翻转
vertical_flip=False) # 随机垂直翻转
# 计算整个训练样本集的数量
datagen.fit(dataset.train_images)
# 利用生成器开始训练模型
self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels,
batch_size=batch_size),
samples_per_epoch=dataset.train_images.shape[0],
epochs=epochs,
validation_data=(dataset.valid_images, dataset.valid_labels))
# 构造TensorBoard
tbCallBack = tb(log_dir="/Users/heyiheng/Desktop/biyelunwen/LastDemo/logs",
histogram_freq=1,
batch_size=batch_size,
write_grads=True)
history = self.model.fit(dataset.train_images, dataset.train_labels,
batch_size=batch_size,
epochs=epochs,
shuffle=True,
verbose=2,
validation_split=0.2,
callbacks=[tbCallBack])
return model, history
# 识别人脸
MODEL_PATH = './my_face_model.h5'
def save_model(self, file_path=MODEL_PATH):
self.model.save(file_path)
def load_model(self, file_path=MODEL_PATH):
self.model = load_model(file_path)
def evaluate(self, dataset):
score = self.model.evaluate(dataset.test_images, dataset.test_labels, verbose=1)
print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))
if __name__ == '__main__':
dataset = Dataset(path_name='/Users/heyiheng/Desktop/biyelunwen/LastDemo/data')
dataset.load()
# 训练模型
model = Model()
model.build_model(dataset)
model.train(dataset)
model.save_model(file_path='/Users/heyiheng/Desktop/biyelunwen/LastDemo/model/my_face_model.h5')
# # 评估模型
# model = Model()
# model.load_model(file_path='/Users/heyiheng/Desktop/biyelunwen/LastDemo/model/my_face_model.h5')
# model.evaluate(dataset)
| heyiheng1024/SimpleFaceRec | face_train.py | face_train.py | py | 9,146 | python | en | code | 0 | github-code | 13 |
37479877635 | import sys
def isOp(token):
return (token == "+") or (token == "-") or (token == "*") or (token == "/")
def searchDoubleNum(list):
i = 0
while 1:
if (not isOp(list[i])) and (not isOp(list[i+1])):
break
i += 1
if i + 1 > len(list) - 1:
return -1
return i
def calc(op, a1, a2):
if op == "+":
return float(a1) + float(a2)
elif op == "-":
return float(a1) - float(a2)
elif op == "*":
return float(a1) * float(a2)
else:
return float(a1) / float(a2)
def calcBack(list, pos):
if pos < 1:
return []
nl = list[:pos-1]
nl.append(calc(list[pos-1], list[pos], list[pos+1]))
if len(nl) - 2 < 0:
return nl
if (not isOp(nl[len(nl) - 2])):
return calcBack(nl, len(nl) - 2)
else:
return nl
instr = sys.argv[1]
ins = instr.split(" ")
while len(ins) > 1:
pos = searchDoubleNum(ins)
if pos == -1:
print("error!!")
break
frontList = calcBack(ins, pos)
if frontList == []:
print("error!!")
break
backList = ins[pos + 2:]
ins = frontList + backList
if (pos != -1) and (frontList != []):
print(ins[0])
| gurugurugum/Python | Poland.py | Poland.py | py | 1,044 | python | en | code | 0 | github-code | 13 |
13877070004 | '''
Convert List to a String
Ref:
'''
# Python program to convert a list
# to string, Using .join() method
my_list = ['I', 'want', 'four', 'apples', 'and', 'eighteen', 'bananas']
def list_to_string(my_list):
s1=" "
return(s1.join(my_list))
#print(list_to_string(my_list))
'''
Convert String to Words
This method also used regular expressions, but string function of getting all the punctuations is used to ignore all the punctuation marks and get the filtered result string.
'''
import re
#import string
def string_to_words(inp_string):
res = re.findall(r'\w+', inp_string)
return(res)
string1 = "She killed him with a knife when he was sleeping."
string2 = "He was killed by her with a knife when he was sleeping."
string3 = "He was killed by her, with a knife, when he was sleeping."
res = string_to_words(string3)
#print(res)
| roja19p/alignment | ace_parser/hari_transform.py | hari_transform.py | py | 855 | python | en | code | 0 | github-code | 13 |
37340099955 | import urllib.request
import os
import subprocess
from bs4 import BeautifulSoup
baseURL = "http://www.talismanwiki.com/"
url = "http://www.talismanwiki.com/Category%3AAdventure_Card_(Revised_4th_Edition)"
content = urllib.request.urlopen(url).read()
soup = BeautifulSoup(content, 'html.parser')
routingURLs = list()
urlDivs = soup.findAll("div", { 'class' : 'mw-content-ltr'})
for tag in urlDivs:
linkTags = tag.findAll({'a' : True})
for links in linkTags:
routingURLs.append(links.get('href'))
for link in routingURLs:
subPageContent = urllib.request.urlopen(baseURL + link).read()
subPageSoup = BeautifulSoup(subPageContent, 'html.parser')
for child in subPageSoup.find("div", {'class' : 'mw-content-ltr'}):
# Even tho this child.find is wrong, it breaks the rest if you remove it.
if child.find("class=GameItemImage"):
continue
elif child.find(class_='mw-headline'):
continue
elif child.find('<a'):
continue
else:
with open("./tacos.txt", "a+") as f:
text = str(child.getText())
if "Revised 4th Edition" in text:
break
elif "Copies of this Card:" in text:
break
elif "Encounter Number" in text:
break
else:
text = text.replace(".", "\n")
text = text.replace("\t", "")
f.write(text)
f.write("\n")
f.close()
print("Wrote: " + child.getText() + " to tacos.txt");
| malexanderboyd/TaliTome | cardScraper.py | cardScraper.py | py | 1,630 | python | en | code | 0 | github-code | 13 |
40131209070 | # -*- coding: utf-8 -*-
class Solution(object):
def reverseWords(self, s):
answer = ""
s = s.split(' ')
for word in s:
for idx in range(len(word)-1, -1, -1):
answer += word[idx]
answer += ' '
#print(answer)
return answer[:len(answer)-2]
s = "God Ding"
c = Solution()
c.reverseWords(s) | dlwlstks96/codingtest | LeetCode/557_Reverse Words in a String 3.py | 557_Reverse Words in a String 3.py | py | 398 | python | en | code | 2 | github-code | 13 |
5174301116 | #!/usr/bin/env python
import os
import json
import argparse
import numpy as np
from copy import deepcopy
from itertools import chain
from rbw import shapes, worlds, simulation
from rbw.utils.encoders import NpEncoder
surface_phys = {'density' : 0.0,
'friction': 0.3}
obj_dims = np.array([3.0, 3.0, 1.5]) / 10.0
density_map = {"Wood" : 1.0,
"Brick" : 2.0,
"Iron" : 8.0}
friction_map = {"Wood" : 0.263,
"Brick" : 0.323,
"Iron" : 0.215}
def canonical_object(material, shape, dims):
return shape(material, dims, {'density' : density_map[material],
'lateralFriction' : friction_map[material],
'restitution' : 0.9})
def low_densities(n):
return np.exp(np.linspace(-5.5, -5.0, num = n))
def high_densities(n):
return np.exp(np.linspace(5.0, 5.5, num = n))
def sample_dimensions(base):
bound = np.log(1.6)
samples = np.exp(np.random.uniform(-1*bound,bound, size = 3))
return base * samples
def interpolate_positions(n):
return np.linspace(1.5, 1.8, num = n)
def make_pair(scene, material, shp, density, pos):
dims = sample_dimensions(obj_dims)
congruent = canonical_object(material, shp, dims)
incongruent = shapes.shape.change_prop(congruent, 'density', density)
con = deepcopy(scene)
con.add_object('A', congruent, pos)
incon = deepcopy(scene)
incon.add_object('A', incongruent, pos)
return [con, incon]
def make_control(scene, material, shape, pos):
dims = sample_dimensions(obj_dims)
obj = canonical_object(material, shape, dims)
s = deepcopy(scene)
s.add_object('A', obj, pos)
return s
def main():
parser = argparse.ArgumentParser(
description = 'Generates an HDF5 for the Exp 1 dataset',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--table', type = int, nargs = 2, default = (3.5, 1.8),
help = 'XY dimensions of table.')
parser.add_argument('--ramp', type = int, nargs = 2, default = (3.5, 1.8),
help = 'XY dimensions of ramp.')
parser.add_argument('--ramp_angle', type = float, default = 35,
help = 'ramp angle in degrees')
args = parser.parse_args()
# table and table object (`B`) is held constant
base = worlds.RampWorld(args.table, args.ramp,
ramp_angle = args.ramp_angle * (np.pi/180.),
ramp_phys = surface_phys,
table_phys = surface_phys)
table_obj = canonical_object("Brick", shapes.Block, obj_dims)
base.add_object("B", table_obj, 0.35)
# materials have the same proportions of heavy/light perturbations
densities = np.hstack((low_densities(5),
high_densities(5)))
positions = interpolate_positions(5)
positions = np.repeat(positions, 2)
# generate the 60 pairs of ramp objects
pairs = []
for m in ['Iron', 'Brick', 'Wood']:
for shp in [shapes.Block, shapes.Puck]:
for dp in zip(densities, positions):
pairs.append(make_pair(base, m, shp, *dp))
# generate the 90 control trials (not paired/matched)
controls = []
positions = interpolate_positions(5)
positions = np.repeat(positions, 3)
for m in ['Iron', 'Brick', 'Wood']:
for shp in [shapes.Block, shapes.Puck]:
# 15 vs 30 since there are 2 (block+puck) per loop
for p in positions:
controls.append(make_control(base, m, shp, p))
# for each scene (pair or control) randomly pick an initial
# positions for object `A`
scenes = list(chain.from_iterable(pairs))
print(len(scenes))
scenes += controls
print(len(scenes))
# save trials to json
out_path = '/scenes/exp1/'
if not os.path.isdir(out_path):
os.mkdir(out_path)
for i,s in enumerate(scenes):
p = os.path.join(out_path, '{0:d}.json'.format(i))
# trace = physics.run_full_trace(s.serialize(),
# ['A', 'B'],
# fps = 60,
# time_scale = 1.0,
# debug = True)
data = {'scene' : s.serialize()}
with open(p, 'w') as f:
json.dump(data, f, indent = 2, cls = NpEncoder)
# write out metadata
with open(out_path + 'info', 'w') as f:
json.dump({'trials' : len(scenes)}, f)
if __name__ == '__main__':
main()
| CNCLgithub/GalileoEvents | scripts/stimuli/create_exp1_dataset.py | create_exp1_dataset.py | py | 4,592 | python | en | code | 1 | github-code | 13 |
24846846878 | from pathlib import Path
from commonroad.common.solution import VehicleType
from stable_baselines3 import PPO
from stable_baselines3.common.torch_layers import FlattenExtractor
from torch import nn
from torch.optim import Adam
from commonroad_geometric.common.io_extensions.scenario import LaneletAssignmentStrategy
from commonroad_geometric.dataset.extraction.traffic import TrafficExtractorOptions
from commonroad_geometric.dataset.extraction.traffic.edge_drawers.implementations import *
from commonroad_geometric.dataset.extraction.traffic.feature_computers.implementations.lanelet import *
from commonroad_geometric.dataset.extraction.traffic.feature_computers.implementations.lanelet_to_lanelet import *
from commonroad_geometric.dataset.extraction.traffic.feature_computers.implementations.vehicle import *
from commonroad_geometric.dataset.extraction.traffic.feature_computers.implementations.vehicle_to_lanelet import *
from commonroad_geometric.dataset.extraction.traffic.feature_computers.implementations.vehicle_to_vehicle import *
from commonroad_geometric.dataset.extraction.traffic.traffic_extractor import TrafficFeatureComputerOptions
from commonroad_geometric.dataset.postprocessing.implementations import *
from commonroad_geometric.dataset.preprocessing.implementations import *
from commonroad_geometric.learning.reinforcement import RLEnvironmentOptions
from commonroad_geometric.learning.reinforcement.experiment import RLExperiment, RLExperimentConfig
from commonroad_geometric.learning.reinforcement.observer.flattened_graph_observer import FlattenedGraphObserver
from commonroad_geometric.learning.reinforcement.project.base_rl_project import BaseRLProject
from commonroad_geometric.learning.reinforcement.rewarder.reward_aggregator.implementations import SumRewardAggregator
from commonroad_geometric.learning.reinforcement.rewarder.reward_computer.implementations import *
from commonroad_geometric.learning.reinforcement.rewarder.reward_computer.types import RewardLossMetric
from commonroad_geometric.learning.reinforcement.termination_criteria.implementations import *
from commonroad_geometric.learning.reinforcement.training.rl_trainer import RLModelConfig
from commonroad_geometric.simulation.ego_simulation.control_space.implementations.longitudinal_control_space import LongitudinalControlOptions, LongitudinalControlSpace
from commonroad_geometric.simulation.ego_simulation.ego_vehicle import VehicleModel
from commonroad_geometric.simulation.ego_simulation.ego_vehicle_simulation import EgoVehicleSimulationOptions
from commonroad_geometric.simulation.ego_simulation.respawning.implementations import RandomRespawner, RandomRespawnerOptions
from commonroad_geometric.simulation.interfaces.static.scenario_simulation import ScenarioSimulation, ScenarioSimulationOptions
from projects.geometric_models.lane_occupancy.models.occupancy.occupancy_model import DEFAULT_PATH_LENGTH
from projects.graph_rl_agents.lane_occupancy.render_config import RENDERER_OPTIONS
from projects.graph_rl_agents.lane_occupancy.utils.encoding_observer import EncodingObserver
from projects.graph_rl_agents.lane_occupancy.utils.lanelet_ego_sequence_post_processor import LaneletEgoSequencePostProcessor
from projects.graph_rl_agents.lane_occupancy.utils.lanelet_graph_feature_extractor import LaneletGraphFeatureExtractor
from projects.graph_rl_agents.lane_occupancy.utils.occupancy_encoding_post_processor import OccupancyEncodingPostProcessor
from projects.graph_rl_agents.lane_occupancy.utils.occupancy_penalty_reward_computer import OccupancyPenaltyRewardComputer
SCENARIO_PREPROCESSORS = [
#VehicleFilterPreprocessor(),
#RemoveIslandsPreprocessor()
SegmentLaneletsPreprocessor(25.0)
#(DepopulateScenarioPreprocessor(1), 1),
]
SCENARIO_PREFILTERS = [
TrafficFilterer(),
LaneletNetworkSizeFilterer(10)
]
# Control settings
EGO_VEHICLE_SIMULATION_OPTIONS = EgoVehicleSimulationOptions(
vehicle_model=VehicleModel.KS,
vehicle_type=VehicleType.BMW_320i
)
# Reinforcement learning problem configuration
REWARDER_COMPUTERS = [
AccelerationPenaltyRewardComputer(weight=0.0, loss_type=RewardLossMetric.L2),
CollisionPenaltyRewardComputer(
penalty=-2.0,
# not_at_fault_penalty=-0.75,
speed_multiplier=False,
max_speed=15.0,
speed_bias=3.0,
),
#FrictionViolationPenaltyRewardComputer(penalty=-0.01),
TrajectoryProgressionRewardComputer(
weight=0.06,
delta_threshold=0.08
),
#ConstantRewardComputer(reward=-0.001),
#
ReachedGoalRewardComputer(reward=2.0),
#SteeringAnglePenaltyRewardComputer(weight=0.0005, loss_type=RewardLossMetric.L1),
StillStandingPenaltyRewardComputer(penalty=-0.001, velocity_threshold=2.0),
#TimeToCollisionPenaltyRewardComputer(weight=0.1), # requires incoming edges
#YawratePenaltyRewardComputer(weight=0.01)
VelocityPenaltyRewardComputer(
reference_velocity=17.0,
weight=0.002,
loss_type=RewardLossMetric.L1,
only_upper=True
),
OccupancyPenaltyRewardComputer(penalty=-0.03)
]
TERMINATION_CRITERIA = [
# OffroadCriterion(),
OffrouteCriterion(),
CollisionCriterion(),
ReachedGoalCriterion(),
TrafficJamCriterion(),
# FrictionViolationCriterion()
]
# Data extraction
V_FEATURE_COMPUTERS = [
ft_veh_state,
GoalAlignmentComputer(
include_goal_distance_longitudinal=True,
include_goal_distance_lateral=False,
include_goal_distance=True,
include_lane_changes_required=True,
logarithmic=False
),
YawRateFeatureComputer(),
VehicleLaneletPoseFeatureComputer(
include_longitudinal_abs=True,
include_longitudinal_rel=True,
include_lateral_left=False,
include_lateral_right=False,
include_lateral_error=True,
include_heading_error=True,
update_exact_interval=1
),
VehicleLaneletConnectivityComputer(),
EgoFramePoseFeatureComputer(),
NumLaneletAssignmentsFeatureComputer()
]
L_FEATURE_COMPUTERS = [
LaneletGeometryFeatureComputer(),
]
L2L_FEATURE_COMPUTERS = [
LaneletConnectionGeometryFeatureComputer(),
]
V2V_FEATURE_COMPUTERS = [
ClosenessFeatureComputer(),
TimeToCollisionFeatureComputer(),
ft_rel_state_ego,
]
V2L_FEATURE_COMPUTERS = [
VehicleLaneletPoseEdgeFeatureComputer(update_exact_interval=1)
]
class LaneOccupancyRLProject(BaseRLProject):
def configure_experiment(self, cfg: dict) -> RLExperimentConfig:
occ_model_path = Path(cfg["occ_model_path"]).resolve()
enable_representations: bool = cfg["enable_representations"]
if enable_representations:
observer = EncodingObserver(
only_longitudinal_features=True
)
else:
observer = FlattenedGraphObserver(
data_padding_size=cfg["data_padding_size"],
global_features_include=[
'walks', 'ego_trajectory_sequence', 'ego_trajectory_sequence_mask', 'walk_start_length'
]
)
occupancy_encoder_post_processor = OccupancyEncodingPostProcessor(
occ_model_path,
decoding_resolution_route=500 if cfg["hd_videos"] else 50,
include_path_decodings=True,
include_ego_vehicle_decodings=True,
ego_length_multiplier=2.0,
reload_freq=10000,
deepcopy_data=not enable_representations,
masking=not enable_representations
)
postprocessors = [
RemoveEgoLaneletConnectionsPostProcessor(),
LaneletEgoSequencePostProcessor(
max_distance=occupancy_encoder_post_processor.path_length,
max_sequence_length=10, flatten=False
),
occupancy_encoder_post_processor
]
experiment_config = RLExperimentConfig(
simulation_cls=ScenarioSimulation,
simulation_options=ScenarioSimulationOptions(
lanelet_assignment_order=LaneletAssignmentStrategy.ONLY_CENTER,
),
control_space_cls=LongitudinalControlSpace,
control_space_options=LongitudinalControlOptions(
max_velocity=20.0,
min_velocity=0.0,
pid_control=False
),
respawner_cls=RandomRespawner,
respawner_options=RandomRespawnerOptions(
random_init_arclength=True,
random_goal_arclength=True,
random_start_timestep=True,
only_intersections=False,
route_length=(3, 10),
init_speed=4.0,
min_goal_distance=100.0,
max_goal_distance=200.0,
max_attempts_outer=50,
min_vehicle_distance=16.0,
min_vehicle_speed=1.5,
min_vehicles_route=2,
max_attempts_inner=5,
min_remaining_distance=DEFAULT_PATH_LENGTH
),
traffic_extraction_options=TrafficExtractorOptions(
edge_drawer=NoEdgeDrawer(),
feature_computers=TrafficFeatureComputerOptions(
v=V_FEATURE_COMPUTERS,
v2v=V2V_FEATURE_COMPUTERS,
l=L_FEATURE_COMPUTERS,
l2l=L2L_FEATURE_COMPUTERS,
v2l=V2L_FEATURE_COMPUTERS,
),
postprocessors=postprocessors,
only_ego_inc_edges=False, # set to True to speed up extraction for 1-layer GNNs
assign_multiple_lanelets=True,
ego_map_radius=cfg["ego_map_radius"]
),
ego_vehicle_simulation_options=EGO_VEHICLE_SIMULATION_OPTIONS,
rewarder=SumRewardAggregator(REWARDER_COMPUTERS),
termination_criteria=TERMINATION_CRITERIA,
env_options=RLEnvironmentOptions(
async_resets=True,
num_respawns_per_scenario=0,
loop_scenarios=True,
scenario_preprocessors=SCENARIO_PREPROCESSORS,
scenario_prefilters=SCENARIO_PREFILTERS,
render_on_step=cfg["render_on_step"],
render_debug_overlays=cfg["render_debug_overlays"],
renderer_options=RENDERER_OPTIONS,
raise_exceptions=False,
observer=observer
)
)
return experiment_config
def configure_model(self, cfg: dict, experiment: RLExperiment) -> RLModelConfig:
enable_representations: bool = cfg["enable_representations"]
if enable_representations:
feature_extractor_cls = FlattenExtractor
feature_extractor_kwargs = {}
else:
feature_extractor_cls = LaneletGraphFeatureExtractor
feature_extractor_kwargs = dict(path_length=DEFAULT_PATH_LENGTH)
return RLModelConfig(
agent_cls=PPO,
agent_kwargs=dict(
gae_lambda=cfg["gae_lambda"],
gamma=cfg["gamma"],
n_epochs=cfg["n_epochs"],
ent_coef=cfg["ent_coef"],
n_steps=cfg["n_steps"],
batch_size=cfg["batch_size"],
vf_coef=cfg["vf_coef"],
max_grad_norm=cfg["max_grad_norm"],
learning_rate=cfg["learning_rate"],
clip_range=cfg["clip_range"],
clip_range_vf=None,
policy='MultiInputPolicy',
policy_kwargs=dict(
ortho_init=False,
log_std_init=-1,
net_arch=[{'vf': [256, 128, 64], 'pi': [256, 128, 64]}],
activation_fn=nn.Tanh,
features_extractor_class=feature_extractor_cls,
features_extractor_kwargs=feature_extractor_kwargs,
optimizer_class=Adam,
optimizer_kwargs=dict(
eps=1e-5
)
),
),
)
| CommonRoad/crgeo | projects/graph_rl_agents/lane_occupancy/project.py | project.py | py | 12,148 | python | en | code | 25 | github-code | 13 |
24990796850 | class Database:
"""Handles the connections to the viri (sqlite) database,
creating it if necessary."""
def __init__(self, db_filename):
import os
self.db_filename = db_filename
self.new_db = not os.path.isfile(db_filename)
def _connect(self):
import sqlite3
return sqlite3.connect(
self.db_filename,
detect_types=sqlite3.PARSE_DECLTYPES + sqlite3.PARSE_COLNAMES)
def execute(self, sql, params=()):
"""Performs an operation that modifies the content of the database."""
conn = self._connect()
conn.execute(sql, params)
conn.commit()
conn.close()
def query(self, sql, params=()):
"""Performs an operation that gets data from the database,
without modifying its content."""
conn = self._connect()
cur = conn.cursor()
cur.execute(sql, params)
result = cur.fetchall()
conn.close()
return result
class Property:
"""Handler for a database field"""
def __init__(self, required=True):
self.required = required
def field_type(self):
raise NotImplementedError('Properties must implement a field_type' \
' method')
@property
def field_def(self):
return ''.join((
self.field_type(),
"" if self.required else " NULL"))
class BooleanProperty(Property):
"""A handler for boolean fields."""
def field_type(self):
return "BOOL"
class CharProperty(Property):
"""A handler for text fields."""
def __init__(self, size, required=True):
self.size = size
super().__init__(required)
def field_type(self):
return "VARCHAR({})".format(self.size)
class TextProperty(Property):
"""A handler for long text (not indexed) fields."""
def field_type(self):
return "LONGTEXT"
class DatetimeProperty(Property):
"""A handler for datetime fields."""
def field_type(self):
return "TIMESTAMP"
class ModelMeta(type):
"""Metaclass for the Model class, that creates an attribute _fields_
containing a ordered dictionary with the field names, and the instance
of its property."""
def __new__(cls, name, bases, attrs):
from collections import OrderedDict
attrs['_fields_'] = OrderedDict()
for base in reversed(bases):
attrs['_fields_'].update(base._fields_)
for key, val in attrs.items():
if isinstance(val, Property):
attrs['_fields_'][key] = val
res = super().__new__(cls, name, bases, attrs)
return res
class Model(metaclass=ModelMeta):
"""Handler for a database table. All methods are defined as class methods,
so the child classes should be used directly, and not instances of them."""
@classmethod
def row(cls, fields, values):
return dict(zip(fields, values))
@classmethod
def table_name(cls):
"""Returns the SQL table name. This is the name of the class in lower
case."""
return cls.__name__.lower()
@classmethod
def field_names(cls):
"""Returns a list containing the fields of the models. This is all
attributes of the class which are subclasses of the Property class."""
return list(cls._fields_.keys())
@classmethod
def create_table(cls, db):
"""Created the table in the database."""
field_defs = []
for field in cls.field_names():
field_defs.append(
'{} {}'.format(field, cls._fields_[field].field_def))
db.execute("CREATE TABLE {table} ({field_defs});".format(
table=cls.table_name(),
field_defs=','.join(field_defs)))
@classmethod
def create(cls, db, vals):
"""Inserts a new row on the associated table."""
fields = cls.field_names()
values = [vals[n] for n in fields]
db.execute(
"INSERT INTO {table} ({fields}) VALUES ({values});".format(
table=cls.table_name(),
fields=','.join(fields),
values=','.join(['?'] * len(fields))),
values)
return cls.row(fields, values)
@classmethod
def query(cls, db, fields=None, where={}, order=()):
"""Performs a query to the related table,
with the specified arguments."""
from collections import OrderedDict
where = OrderedDict(where)
if not fields:
fields = cls.field_names()
sql = "SELECT {} FROM {}".format(','.join(fields), cls.table_name())
if where:
sql += " WHERE " + " AND ".join(
map(lambda x: x + " ?", where.keys()))
if order:
sql += " ORDER BY {}".format(','.join(order))
return [cls.row(fields, vals) for vals in db.query(sql, tuple(where.values()))]
@classmethod
def get(cls, db, fields=None, where={}):
"""Performs a query to the related table, returning one or cero
results. This should be only used when filtering by unique fields,
or when returning a single random record is not a problem."""
result = cls.query(db, fields, where)
return result[0] if result else None
@classmethod
def update(cls, db, fields, where):
"""Updates all records in the database matching where conditions
with the values specified in fields."""
from collections import OrderedDict
fields = OrderedDict(fields)
where = OrderedDict(where)
sql = "UPDATE {} ".format(cls.table_name())
sql += "SET "
sql += ", ".join(map(lambda x: x + " = ?", fields.keys()))
sql += "WHERE "
sql += " AND ".join(map(lambda x: x + " = ?", where.keys()))
db.execute(sql, tuple(fields.values()) + tuple(where.values()))
@classmethod
def delete(cls, db, where):
"""Deletes all rows matching the specified criteria from the related
table."""
from collections import OrderedDict
where = OrderedDict(where)
sql = "DELETE FROM {} WHERE ".format(cls.table_name())
sql += " AND ".join(map(lambda x: x + " = ?", where.keys()))
db.execute(sql, tuple(where.values()))
| timypcr/viri | libviri/viriorm.py | viriorm.py | py | 6,268 | python | en | code | 0 | github-code | 13 |
1286794007 | import matplotlib.pyplot as plt
import numpy as np
class Glove(object):
def __init__(self, tokens, coocurrence_matrix, word_dimensions=100, x_max=100, alpha=0.75, learning_rate=0.05):
self.tokens = tokens
# note that for the cooccurrence matrix you will probably use a sparse matrix
# gist here: https://gist.github.com/raphaelgyory/dae3ad9afbbfc0591f653844ca77df5b
self.X = coocurrence_matrix
self.learning_rate = learning_rate
# initialize the word vector
# note that the papers uses only two arrays (one for the words and one for the vectors) and doubles its size
self.W = (np.random.rand(self.X.shape[0], word_dimensions) - 0.5) / float(word_dimensions)
self.W_context = (np.random.rand(self.X.shape[0], word_dimensions) - 0.5) / float(word_dimensions)
self.biases = (np.random.rand(self.X.shape[0]) - 0.5) / float(word_dimensions)
self.biases_context = (np.random.rand(self.X.shape[0]) - 0.5) / float(word_dimensions)
# gradients
self.W_gradients = np.ones(shape=self.W.shape, dtype=np.float64)
self.W_context_gradients = np.ones(shape=self.W_context.shape, dtype=np.float64)
self.biases_gradients = np.ones(shape=self.biases.shape, dtype=np.float64)
self.biases_context_gradients = np.ones(shape=self.biases_context.shape, dtype=np.float64)
# calculate the weight f(Xij)
self.weights = np.where(self.X < x_max, (self.X/x_max)**alpha, 1)
self.logXij = np.log(self.X)
self.logXij = np.where(self.logXij==-np.inf, 0, self.logXij)
# costs
self.costs_history = []
def train(self, epochs=50):
# iterate
for i in range(epochs):
# calculate the unweighted cost (used to compute the gradients)
unweighted_costs = self.get_unweighted_costs()
# compute the gradients
gradients_word, gradients_context, gradients_word_bias, gradients_context_bias = self.get_gradients(unweighted_costs)
# adagrad
self.adagrad(gradients_word, gradients_context, gradients_word_bias, gradients_context_bias)
# update gradients
self.update_gradients(gradients_word, gradients_context, gradients_word_bias, gradients_context_bias)
# calculate costs
J = self.get_cost(unweighted_costs)
self.costs_history.append(J)
def get_unweighted_costs(self):
# calculate the cost (wiT wj + bi + bj -log Xij)
# self.W @ self.W_context.T is the dot products of each word vector and each context vector;
# its shape is the same as the shape of self.W
# self.biases and self.biases_context are vectors (self.W.shape[0], )
# broadcasting will be performed by numpy so they match the dimension of the dot products
# np.log returns the log of each member of the cooccurrence matrix
return self.W @ self.W_context.T + self.biases + self.biases_context - self.logXij
def get_cost(self, unweighted_costs):
J = np.sum(np.sum(self.weights * unweighted_costs**2, axis=1), axis=0)
return J
def get_gradients(self, unweighted_costs):
weighted_costs = np.sum(self.weights * unweighted_costs, axis=0)
return (2 * weighted_costs[:,None] * self.W_context, # gradients_word
2 * weighted_costs[:,None] * self.W, # gradients_context
2 * weighted_costs, # gradients_word_bias
2 * weighted_costs) # gradients_context_bias
def adagrad(self, gradients_word, gradients_context, gradients_word_bias, gradients_context_bias):
self.W -= self.learning_rate * gradients_word / np.sqrt(self.W_gradients)
self.W_context -= self.learning_rate * gradients_context / np.sqrt(self.W_context_gradients)
self.biases -= self.learning_rate * gradients_word_bias / np.sqrt(self.biases_gradients)
self.biases_context-= self.learning_rate * gradients_context_bias / np.sqrt(self.biases_context_gradients)
def update_gradients(self, gradients_word, gradients_context, gradients_word_bias, gradients_context_bias):
self.W_gradients += gradients_word**2
self.W_context_gradients += gradients_context**2
self.biases_gradients += gradients_word_bias**2
self.biases_context_gradients += gradients_context_bias**2
| raphaelgyory/algorithms | glove.py | glove.py | py | 4,701 | python | en | code | 0 | github-code | 13 |
28250116416 | # https://www.programmingexpert.io/programming-fundamentals/assessment/4
def get_n_longest_unique_words(words, n):
# print(words)
# print(n)
valid_words = []
for word in words:
if words.count(word) > 1: #O(len(words))T
continue
sortByLength(valid_words, word)
# print(valid_words)
longest_words = []
while n > 0:
n -= 1
longest_word = valid_words.pop()
longest_words.append(longest_word)
return longest_words
def sortByLength(lst, word):
if len(lst) == 0:
lst.append(word)
return
longest_word = lst.pop()
if len(word) >= len(longest_word):
lst.append(longest_word)
lst.append(word)
else:
sortByLength(lst, word)
lst.append(longest_word)
# in operator is also O(n)T | avk-ho/programming_exp | python/assessments/programming_fundamentals/longest_unique_words.py | longest_unique_words.py | py | 837 | python | en | code | 0 | github-code | 13 |
71984715857 | #SWEA 9489번 고대 유적
'''
https://swexpertacademy.com/main/talk/solvingClub/problemView.do?solveclubId=AYXI5IoKVCoDFAQK&contestProbId=AXAd8-d6MRoDFARP&probBoxId=AYYK3r76yQwDFARc&type=USER&problemBoxTitle=%EC%97%B0%EC%8A%B5%28%EC%B6%94%EC%B2%9C%EB%AC%B8%EC%A0%9C%29&problemBoxCnt=19
접근 방법
1. 전체리스트에 패딩을 추가한다
2. 1을 만났을 때 좌우로 1이 또 있다면 그 방향에서 가장 끝으로 가서 반대쪽 끝으로 가면서 그 줄의 길이를 잰다
3. 1을 만났을 때 상하로 1이 있다면 그 방향에서 가장 끝으로 가서 그 반대쪽 끝으로 가면서 그 줄의 길이를 잰다
4. 최대로 긴 것을 미리 뽑아낸다
5. 모든 1에 대해서 같은 처리를 한다.
'''
import sys
sys.stdin = open("input.txt", "r")
T = int(input())
for Test_case in range(1, T+1):
cnt = 0
N, M = map(int, input().split()) # N = 세로(줄 수) M = 가로 줄당 개수
ground = [[0] + list(map(int, input().split())) + [0] for _ in range(N)] # 상하좌우 패딩 추가된 유적지
ground = [[0] * (M+2)] + ground + [[0] * (M+2)]
max_length = 0
for i in range(1, N+1):
for j in range(1, M+1):
if ground[i][j] == 1:
if ground[i+1][j] == 1 or ground[i-1][j] == 1: # 좌우로 1이 있을 때
ti = i # 임시 좌표 설정
tj = j
length = 0 # 길이 늘 0으로 초기화
while ground[ti][tj] != 0: # 0이 아닐 동안 (0을 만날 때까지)
ti -= 1 # 가장 왼쪽 끝으로 간다
ti += 1 # 0의 위치 바로 오른쪽 한칸 이동
while ground[ti][tj] != 0: # 0이 아닐 동안
ti += 1 # 가장 오른쪽 끝으로 간다
length += 1 # 가는 동안 길이 하나씩 추가
if max_length < length: # 최대 길이인지 확인
max_length = length
if ground[i][j+1] == 1 or ground[i][j - 1] == 1: # 위아래로 1이 있을 때, 위의 과정 반복
ti = i
tj = j
length = 0
while ground[ti][tj] != 0:
tj -= 1
tj += 1
while ground[ti][tj] != 0:
tj += 1
length += 1
if max_length < length:
max_length = length
print(f'#{Test_case} {max_length}')
| euneuneunseok/TIL | SWEA/SWEA_9489_고대유적.py | SWEA_9489_고대유적.py | py | 2,898 | python | ko | code | 0 | github-code | 13 |
24154537804 | def romanToInt(s):
dict = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
minus = {'V': 'I', 'X': 'I', 'L': 'X', 'C': 'X', 'D':'C', 'M':'C', 'Z':'', 'I':''}
str = list(s)[::-1]
prev = 'Z'
result = 0
for l in str:
if l == minus[prev]:
result -= dict[l]
else:
result += dict[l]
prev = l
return result
s = "MCMXCIV"
print(romanToInt(s)) | ilnazzia/experiments | python_leetcode/0_not_defined/13_easy_2023-05-16.py | 13_easy_2023-05-16.py | py | 429 | python | en | code | 0 | github-code | 13 |
41071625006 | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from registration.backends.simple.views import RegistrationView
from beerbookapp.models import UserProfile
# Create a new class that redirects the user to the index page, if successful at logging
class MyRegistrationView(RegistrationView):
def get_success_url(self,request, user):
profile = UserProfile(user=user)
if 'website' in request.POST:
profile.website = request.POST["website"]
if 'picture' in request.FILES:
profile.picture = request.FILES["picture"]
if 'first_name' in request.POST:
user.first_name = request.POST["first_name"]
if 'last_name' in request.POST:
user.last_name = request.POST["last_name"]
if 'bio' in request.POST:
profile.bio = request.POST["bio"]
user.save()
profile.save()
return '/beerbook/'
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'BeerBook.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^beerbook/', include('beerbookapp.urls')),
url(r'^accounts/register/$', MyRegistrationView.as_view(), name='registration_register'),
(r'^accounts/', include('registration.backends.simple.urls')),
)
if settings.DEBUG:
urlpatterns += patterns(
'django.views.static',
(r'^media/(?P<path>.*)',
'serve',
{'document_root': settings.MEDIA_ROOT}), ) | enzoroiz/beerbook | beerbook/urls.py | urls.py | py | 1,588 | python | en | code | 1 | github-code | 13 |
38615816406 | from importlib import import_module
from threading import Event
from DataManager import executioner
import queue
import datetime
from DBmanager import measurement, localdb
class Node:
def __init__(self, data):
"""
:param data: check documentation.txt for syntax
"""
self.devices = {} # keep a dict of all devices on node
self.node_id = data['node_id']
self.experiment_details = data['experiment_details']
self.measurement = measurement.PeriodicalMeasurement(self.node_id, self.devices, self.experiment_details)
def initiate_device(self, device_data):
"""
:param device_data: check documentation.txt for syntax
:return: True if successfully initialized, False otherwise
"""
device_type = device_data.get('device_type')
if device_type in self.devices or device_type == None: # raise exception if device already exists on node
return 0
device_data['node_id'] = self.node_id
device = Device(device_data, self.experiment_details)
self.devices[device_type] = device
device.checker.start() # start the queue checker
if 'initial_commands' in device_data['setup']:
for cmd in device_data['setup']['initial_commands']: # execute the initial commands
device.accept_command(cmd)
return 1
def accept_command(self, cmd):
"""
Processes the commands that affect all the devices on a certain node (currently used only to change the interval
of periodical measurements.
:param cmd: dictionary, check documentation.txt
:return: None
"""
processed = (
cmd.get('time', (datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))),
cmd['cmd_id'],
cmd['args'],
cmd.get('source', 'internal')
)
self.measurement.execute_cmd(*processed) # pass the command
def end_node(self):
"""
Ends all devices on the node. End the periodical measurement on the node.
:return: None
"""
for device in self.devices:
self.devices[device].end()
self.measurement.end()
def end_device(self, device):
"""
End a certain device on the node
:param device: str device_type
:return: None
"""
self.devices[device].end()
self.devices.pop(device)
class Device:
def __init__(self, data, experimental_details):
"""
:param data: dictionary, check documentation.txt
"""
self.data = data
self.experimental_details = experimental_details
self.device_type = data['device_type']
self.device_class = data['device_class']
self.device_id = data['device_id']
self.thread_name = str(data['node_id']) + '-' + self.device_class + '-' + self.device_type
self.device_id = data.get('device_id', self.thread_name)
self.q = queue.Queue() # Queue object - all commands will be stacking here and waiting for execution
self.q_new_item = Event() # Event object - notifies that a new command has been added to queue
self.checker = executioner.Checker(self.q, self.q_new_item, self.data,
self.thread_name, self.experimental_details)
def accept_command(self, cmd):
"""
Process the command and add id to the queue of the device.
:param cmd: dict, check documentation.txt
:return: None
"""
processed = [
cmd.get('time', (datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))),
self.data['node_id'],
self.device_type,
cmd['cmd_id'],
cmd['args'],
cmd.get('source', 'internal')
]
if self.device_type == 'PBR' and processed[3] == 19:
processed[4] = str([0, self.data['setup']['pump_id']])
self.q.put(processed) # put it to queue
self.q_new_item.set() # notify checker that a new object has been added to queue
def end(self):
"""
Puts False into the queue, which triggers the checker to exit.
:return: None
"""
self.q.put(False)
self.q_new_item.set()
| SmartBioTech/PBRcontrol | DataManager/datamanager.py | datamanager.py | py | 4,325 | python | en | code | 1 | github-code | 13 |
21254368016 | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, preorder, inorder):
if not inorder:
return None
root_value = preorder[0]
root_index = inorder.index(root_value)
left_nodes = inorder[:root_index]
right_nodes = inorder[root_index+1:]
m, n = len(left_nodes), len(right_nodes)
root = TreeNode(root_value)
root.left = self.buildTree(preorder[1: m+1], left_nodes)
root.right = self.buildTree(preorder[m+1:], right_nodes)
return root
h = {}
class Solution2:
# @param A : list of integers
# @param B : list of integers
# @return the root node in the tree
def buildTree(self, A, B):
global h
n = len(A)
for i in range(n):
h[B[i]] = i
return self.build(A, B, 0, n-1, 0, n - 1)
def build(self, A, B, ps, pe, ist, ie):
if ps > pe:
return None
val = A[ps]
node = TreeNode(val)
indx = h[val] # find indx of number in inorder
# first indx count will be on left
node.left = self.build(A, B, ps + 1, ps + indx - ist, ist, indx - 1)
node.right = self.build(A, B, ps + indx - ist + 1, pe, indx + 1, ie)
return node
| sundar91/dsa | Tree/bt-from-inorder-preorder.py | bt-from-inorder-preorder.py | py | 1,348 | python | en | code | 0 | github-code | 13 |
7012789785 | import arcpy
try:
# create a spatial reference object to be used as output coordinate system
spatial_ref = arcpy.Describe("../result/20080829112151_polyline.shp").spatialReference
# use the output of CreateSpatialReference as input to Project tool
# to reproject the shapefile
arcpy.Project_management("C:\\Users\\user\\Documents\\master_project_code\\result\\Beijing_Links.shp", "C:\\Users\\user\\Documents\\master_project_code\\result\\Beijing_4326.shp",
spatial_ref)
except arcpy.ExecuteError:
# print geoprocessing message
print(arcpy.GetMessages(2))
except Exception as ex:
# print the exception message
print(ex.args[0]) | zhongyu1997/master_thesis_project | mapmatching/(despatched)project.py | (despatched)project.py | py | 696 | python | en | code | 0 | github-code | 13 |
7017376132 | import numpy as np
import json
from scipy.interpolate import interp1d,interp2d
from scipy.special import beta as bfunc
from scipy.special import erf
from astropy.cosmology import Planck15
import astropy.units as u
import sys
sys.path.append('./../code/')
from getData import *
from utilities import calculate_gaussian_2D
import matplotlib.pyplot as plt
def read_lvk_plpeak_data():
"""
Function to load samples from LVK O3b population analysis using the PL+Peak mass model and Default spin model
Returns
-------
lvk_results : dict
Dictionary containing hyperposterior samples
"""
# Retrive json file
with open('./../input/o1o2o3_mass_c_iid_mag_iid_tilt_powerlaw_redshift_result.json','r') as jf:
plpeak_data = json.load(jf)
# Read out mass parameters
plpeak_alphas = np.array(plpeak_data['posterior']['content']['alpha'])
plpeak_mMaxs = np.array(plpeak_data['posterior']['content']['mmax'])
plpeak_mMins = np.array(plpeak_data['posterior']['content']['mmin'])
plpeak_fPeaks = np.array(plpeak_data['posterior']['content']['lam'])
plpeak_mu_m1s = np.array(plpeak_data['posterior']['content']['mpp'])
plpeak_sig_m1s = np.array(plpeak_data['posterior']['content']['sigpp'])
plpeak_delta_ms = np.array(plpeak_data['posterior']['content']['delta_m'])
plpeak_betas = np.array(plpeak_data['posterior']['content']['beta'])
# Read rate and redshift evolution parameters
plpeak_kappas = np.array(plpeak_data['posterior']['content']['lamb'])
plpeak_Rtot = np.array(plpeak_data['posterior']['content']['rate'])
# Read spin parameters
# Remember that `sigma_chi` as computed by gwpopulation is actually the *variance* of the spin magnitude distribution
default_mu_chi = np.array(plpeak_data['posterior']['content']['mu_chi'])
default_sig_chi = np.sqrt(np.array(plpeak_data['posterior']['content']['sigma_chi']))
default_sig_aligned = np.array(plpeak_data['posterior']['content']['sigma_spin'])
default_f_aligned = np.array(plpeak_data['posterior']['content']['xi_spin'])
# Construct and return dictionary
lvk_results = {
'alpha':plpeak_alphas,
'mMax':plpeak_mMaxs,
'mMin':plpeak_mMins,
'fPeak':plpeak_fPeaks,
'mu_m1':plpeak_mu_m1s,
'sig_m1':plpeak_sig_m1s,
'delta_m':plpeak_delta_ms,
'bq':plpeak_betas,
'kappa':plpeak_kappas,
'Rtot':plpeak_Rtot,
'mu_chi':default_mu_chi,
'sig_chi':default_sig_chi,
'sig_cost':default_sig_aligned,
'f_aligned':default_f_aligned
}
return lvk_results
def read_lvk_gaussian_spin_data():
"""
Function to load samples from LVK O3b population analysis using a (variant of the) PL+Peak mass model and Gaussian effective spin model
Returns
-------
lvk_results : dict
Dictionary containing hyperposterior samples
"""
# Retrieve file
with open('../input/posteriors_gaussian_spin_samples_FAR_1_in_1.json','r') as jf:
gaussian_spin_data = json.load(jf)
# Read out hyperparameters
gaussian_bq = np.array(gaussian_spin_data['beta_q'])
gaussian_mMax = np.array(gaussian_spin_data['mMax'])
gaussian_kappa = np.array(gaussian_spin_data['kappa'])
gaussian_alpha = np.array(gaussian_spin_data['lmbda'])
gaussian_mu_m1 = np.array(gaussian_spin_data['m0'])
gaussian_sig_m1 = np.array(gaussian_spin_data['sigM'])
gaussian_f_peak = np.array(gaussian_spin_data['peak_fraction'])
gaussian_chiEff_mean = np.array(gaussian_spin_data['chiEff_mean'])
gaussian_chiEff_std = np.array(gaussian_spin_data['chiEff_std'])
gaussian_chiP_mean = np.array(gaussian_spin_data['chiP_mean'])
gaussian_chiP_std = np.array(gaussian_spin_data['chiP_std'])
gaussian_rho = np.array(gaussian_spin_data['rho_chiEff_chiP'])
# Construct and return dictionary
lvk_results = {
'bq':gaussian_bq,
'mMax':gaussian_mMax,
'kappa':gaussian_kappa,
'alpha':gaussian_alpha,
'mu_m1':gaussian_mu_m1,
'sig_m1':gaussian_sig_m1,
'f_peak':gaussian_f_peak,
'chiEff_mean':gaussian_chiEff_mean,
'chiEff_std':gaussian_chiEff_std,
'chiP_mean':gaussian_chiP_mean,
'chiP_std':gaussian_chiP_std,
'rho':gaussian_rho
}
return lvk_results
def plpeak_m1_q(alpha,mMax,mMin,fPeak,mu_m1,sig_m1,delta_m,bq,kappa,R,npts):
"""
Helper function that computes the merger rate over a grid of primary masses and mass ratios, according to
a PowerLaw+Peak mass model
Inputs
------
alpha : float
Slope on the "power law" piece of the Power-Law+Peak primary mass model
mMax : float
Maximum black hole mass
mMin : float
Minimum black hole mass
fPeak : float
Fraction of primaries occupying the "peak" part of the Power-Law+Peak primary mass model
mu_m1 : float
Mean location of the primary mass peak
sig_m1 : float
Standard deviation of the primary mass peak
delta_m : float
Smoothing length over which the primary mass distribution "turns on" above `mMin`
bq : float
Power-law index governing the mass ratio distribution
R : float
Overall merger rate (integrated across masses) at `z=0`
npts : int
Number of grid points to use when constructing primary mass and mass ratio grid
Returns
-------
m1_grid : np.array
An array of primary mass grid points
q_grid : np.array
An array of mass ratio grid points
dR_dm1_dq : np.array
2D array of merger rate densities `dR/(dm1*dq)` defined across `m1_grid` and `q_grid`
"""
# Define primary mass and mass ratio grid
# Make grids slightly different sizes to disambiguate dimensions
m1_grid = np.linspace(2,100,npts)
q_grid = np.linspace(0,1,npts+1)
M,Q = np.meshgrid(m1_grid,q_grid)
# Primary mass probability distribution
# Start by defining normalized power-law and gaussian components
pl = (1.-alpha)*M**(-alpha)/(mMax**(1.-alpha) - mMin**(1.-alpha))
pl[M<mMin] = 0
pl[M>mMax] = 0
peak = np.exp(-(M-mu_m1)**2/(2.*sig_m1**2))/np.sqrt(2.*np.pi*sig_m1**2)
# Identify masses at which smoothing will be applied
smoothing = np.ones(M.shape)
to_smooth = (M>mMin)*(M<mMin+delta_m)
# Then define and apply smoothing factor
smoothing[to_smooth] = 1./(np.exp(delta_m/(M[to_smooth]-mMin) + delta_m/(M[to_smooth]-mMin-delta_m))+1.)
smoothing[M<mMin] = 0
p_m1_unnormed = (fPeak*peak + (1.-fPeak)*pl)*smoothing
# Similarly identify (m1,q) gridpoints for which smoothing should be applied on m2
# Define the corresponding smoothing factor
q_smoothing = np.ones(Q.shape)
to_smooth = (M*Q>mMin)*(M*Q<mMin+delta_m)
q_smoothing[to_smooth] = 1./(np.exp(delta_m/((Q*M)[to_smooth]-mMin) + delta_m/((Q*M)[to_smooth]-mMin-delta_m))+1.)
q_smoothing[(M*Q)<mMin] = 0
# Define mass ratio distribution, including smoothing
p_q_unnormed = Q**bq/(1.-(mMin/M)**(1.+bq))*q_smoothing
p_q_unnormed[Q<(mMin/M)] = 0.
# Normalize the conditional p(q|m1)
# Occasionally we run into trouble normalizing p(q|m1) when working with m1 values sufficiently small that p(m1)=p(q|m1)=0 for all q
# In this case, overwrite and set p(q|m1)=0
p_q = p_q_unnormed/np.trapz(p_q_unnormed,q_grid,axis=0)
p_q[p_q!=p_q] = 0
# Combine primary mass and mass ratio distributions and normalize over m1
p_m1_q_unnormed = p_m1_unnormed*p_q
p_m1_q = p_m1_q_unnormed/(np.sum(p_m1_q_unnormed)*(m1_grid[1]-m1_grid[0])*(q_grid[1]-q_grid[0]))
# Scale by total rate at z=0.2 and return
dR_dm1_dq = R*(1.+0.2)**kappa*p_m1_q
return m1_grid,q_grid,dR_dm1_dq
def default_spin(mu_chi,sig_chi,sig_cost,npts):
"""
Helper function that computes component spin probability densities over a grid of spin magnitude and tilts,
according to the `Default` spin model (in which spin magnitudes are Beta-distributed, while tilts are described
as a mixture between isotropic and preferentially-aligned populations)
Inputs
------
mu_chi : float
Mean of the Beta spin-magnitude distribution
sig_chi : float
Standard deviation of the Beta spin-magnitude distribution
sig_cost : float
Standard deviation of the preferentially-aligned subpopulation
npts : int
Number of grid points to use when constructing spin magnitude and tilt grids
Returns
-------
chi_grid : np.array
An array of component spin magnitude grid points
cost_grid : np.array
An array of component spin tilt grid points
p_chi : np.array
Spin magnitude probability densities defined across `chi_grid`
p_cost_peak : np.array
Spin tilt probabilities from the preferentially-aligned subpopulation, defined across `cost_grid`.
p_cost_iso : np.array
Spin tilt probabilities from the isotropic subpopulation.
The full spin-tilt probability distribution is given by `f_iso*p_cost_iso + (1-f_iso)*p_cost_peak` for some
mixture fraction `f_iso`
"""
# Define grid of spin magnitude and (cosine) spin tilt values
chi_grid = np.linspace(0,1,npts)
cost_grid = np.linspace(-1,1,npts+1)
# Transform mu_chi and sig_chi to beta distribution "alpha" and "beta" shape parameters
nu = mu_chi*(1.-mu_chi)/sig_chi**2 - 1.
alpha = mu_chi*nu
beta = (1.-mu_chi)*nu
# Define spin magnitude probability distribution
p_chi = chi_grid**(alpha-1.)*(1.-chi_grid)**(beta-1.)/bfunc(alpha,beta)
# Preferentially-aligned probability densities
p_cost_peak = np.exp(-(cost_grid-1.)**2/(2.*sig_cost**2))/np.sqrt(2.*np.pi*sig_cost**2)
p_cost_peak /= erf(0.)/2. - erf(-2./np.sqrt(2.*sig_cost**2))/2.
# Finally, define the (constant) isotropic probability densities
p_cost_iso = np.ones(cost_grid.size)/2.
return chi_grid,cost_grid,p_chi,p_cost_peak,p_cost_iso
def get_lvk_z(nTraces,m1_ref=20,nGridpoints=500):
# Get posterior samples
lvk_data = read_lvk_plpeak_data()
z_grid = np.linspace(0,2,nGridpoints)
R_zs = np.zeros((nTraces,nGridpoints))
random_inds = np.random.choice(np.arange(lvk_data['alpha'].size),nTraces,replace=False)
for i in range(nTraces):
ind = random_inds[i]
m1_grid,q_grid,R_m1_q = plpeak_m1_q(lvk_data['alpha'][ind],
lvk_data['mMax'][ind],
lvk_data['mMin'][ind],
lvk_data['fPeak'][ind],
lvk_data['mu_m1'][ind],
lvk_data['sig_m1'][ind],
lvk_data['delta_m'][ind],
lvk_data['bq'][ind],
lvk_data['kappa'][ind],
lvk_data['Rtot'][ind],
nGridpoints)
# Convert to merger rate per log mass
R_lnm1_q = R_m1_q*m1_grid[np.newaxis,:]
#R_lnm1 = np.trapz(R_lnm1_q,q_grid,axis=0)
R_lnm1 = R_lnm1_q[-1,:]
# Interpolate to reference points
R_z02_interpolator = interp1d(m1_grid,R_lnm1)
R_z02_ref = R_z02_interpolator(m1_ref)
#R_z02_ref = np.sum(R_m1_q)*(m1_grid[1]-m1_grid[0])*(q_grid[1]-q_grid[0])
# Rescale over z grid
R_zs[i,:] = R_z02_ref*(1.+z_grid)**lvk_data['kappa'][ind]/(1.+0.2)**lvk_data['kappa'][ind]
return z_grid,R_zs
def get_lvk_componentSpin(nTraces,m1_ref=20,q_ref=1,nGridpoints=500):
lvk_data = read_lvk_plpeak_data()
R_chi1_chi2 = np.zeros((nTraces,nGridpoints))
R_cost1_cost2 = np.zeros((nTraces,nGridpoints+1))
p_chis = np.zeros((nTraces,nGridpoints))
p_costs = np.zeros((nTraces,nGridpoints+1))
random_inds = np.random.choice(np.arange(lvk_data['alpha'].size),nTraces,replace=False)
for i in range(nTraces):
ind = random_inds[i]
m1_grid,q_grid,dR_dm1_dq = plpeak_m1_q(lvk_data['alpha'][ind],
lvk_data['mMax'][ind],
lvk_data['mMin'][ind],
lvk_data['fPeak'][ind],
lvk_data['mu_m1'][ind],
lvk_data['sig_m1'][ind],
lvk_data['delta_m'][ind],
lvk_data['bq'][ind],
lvk_data['kappa'][ind],
lvk_data['Rtot'][ind],
nGridpoints)
# Convert to merger rate per log mass, store
dR_dlnm1_dq = dR_dm1_dq*m1_grid[np.newaxis,:]
# Extract rate at reference points
R_interpolator = interp2d(m1_grid,q_grid,dR_dlnm1_dq)
dR_dlnm1_dq_ref = R_interpolator(m1_ref,q_ref)
# Get spin distribution data
chi_grid,cost_grid,p_chi,p_cost_peak,p_cost_iso = default_spin(lvk_data['mu_chi'][ind],lvk_data['sig_chi'][ind],lvk_data['sig_cost'][ind],nGridpoints)
# Evaluate merger rate at chi1=chi2=chi, cost1=cost2=1
p_cost1_cost2_1 = lvk_data['f_aligned'][ind]*p_cost_peak[-1]**2 + (1.-lvk_data['f_aligned'][ind])*p_cost_iso[-1]**2
p_chi1_chi2 = p_chi**2
R_chi1_chi2[i,:] = dR_dlnm1_dq_ref*p_cost1_cost2_1*p_chi1_chi2
# Evalute merger rate at chi1=chi2=0.1, cost1=cost2
p_cost1_cost2 = lvk_data['f_aligned'][ind]*p_cost_peak**2 + (1.-lvk_data['f_aligned'][ind])*p_cost_iso**2
p_chi1_chi2_01 = np.interp(0.1,chi_grid,p_chi)**2
R_cost1_cost2[i,:] = dR_dlnm1_dq_ref*p_cost1_cost2*p_chi1_chi2_01
# Store marginal component spin probability distributions
p_chis[i,:] = p_chi
p_costs[i,:] = lvk_data['f_aligned'][ind]*p_cost_peak + (1.-lvk_data['f_aligned'][ind])*p_cost_iso
return chi_grid,cost_grid,R_chi1_chi2,R_cost1_cost2,p_chis,p_costs
def get_lvk_m1_q(nTraces,nGridpoints=500):
lvk_data = read_lvk_plpeak_data()
R_m1s_qs = np.zeros((nTraces,nGridpoints+1,nGridpoints))
random_inds = np.random.choice(np.arange(lvk_data['alpha'].size),nTraces,replace=False)
for i in range(nTraces):
ind = random_inds[i]
m1_grid,q_grid,R_m1_q = plpeak_m1_q(lvk_data['alpha'][ind],
lvk_data['mMax'][ind],
lvk_data['mMin'][ind],
lvk_data['fPeak'][ind],
lvk_data['mu_m1'][ind],
lvk_data['sig_m1'][ind],
lvk_data['delta_m'][ind],
lvk_data['bq'][ind],
lvk_data['kappa'][ind],
lvk_data['Rtot'][ind],
nGridpoints)
R_m1s_qs[i,:,:] = R_m1_q
return m1_grid,q_grid,R_m1s_qs
def get_lvk_gaussian_spin():
# Get posterior samples
lvk_results = read_lvk_gaussian_spin_data()
# Load dictionary of injections and posterior samples, which we will need in order to resample rate
injectionDict = getInjections(sample_limit=50000,reweight=False)
sampleDict = getSamples(sample_limit=3000,reweight=False)
nObs = len(sampleDict)*1.
print(injectionDict['m1'].size,"!!!")
# Grid of dVdz values, which will be needed to compute total merger rate by integrating over redshift
z_grid = np.arange(0.01,2.31,0.01)
dVdz_grid = 4.*np.pi*Planck15.differential_comoving_volume(z_grid).to(u.Gpc**3/u.sr).value
z_grid = np.concatenate([[0.],z_grid])
dVdz_grid = np.concatenate([[0.],dVdz_grid])
# Grid over which we will define and normalize effective spin distribution
Xeff_grid = np.linspace(-1,1,500)
Xp_grid = np.linspace(0,1,499)
XEFF,XP = np.meshgrid(Xeff_grid,Xp_grid)
p_Xeff_Xp = np.zeros((lvk_results['bq'].size,Xeff_grid.size,Xp_grid.size))
# Instantiate array to hold resampled rates
R_refs = np.zeros(lvk_results['bq'].size)
# Loop across population posterior samples
for i in range(lvk_results['bq'].size):
bq = lvk_results['bq'][i]
mMax = lvk_results['mMax'][i]
alpha = lvk_results['alpha'][i]
mu_m1 = lvk_results['mu_m1'][i]
sig_m1 = lvk_results['sig_m1'][i]
f_peak = lvk_results['f_peak'][i]
kappa = lvk_results['kappa'][i]
mu_eff = lvk_results['chiEff_mean'][i]
sig_eff = lvk_results['chiEff_std'][i]
mu_p = lvk_results['chiP_mean'][i]
sig_p = lvk_results['chiP_std'][i]
rho = lvk_results['rho'][i]
# Evaluate normalized probability distribution over m1 and m2 of injections
p_inj_m1 = f_peak*np.exp(-(injectionDict['m1']-mu_m1)**2/(2.*sig_m1**2))/np.sqrt(2.*np.pi*sig_m1**2) \
+ (1.-f_peak)*(1.+alpha)*injectionDict['m1']**alpha/(mMax**(1.+alpha) - 5.**(1.+alpha))
p_inj_m2 = (1.+bq)*injectionDict['m2']**bq/(injectionDict['m1']**(1.+bq) - 5.**(1.+bq))
p_inj_m1[injectionDict['m1']>mMax] = 0
p_inj_m2[injectionDict['m2']<5.] = 0
# Probability distribution over redshift
# Note that we need this to be correctly normalized, and so we numerically integrate to obtain the appropriate
# normalization constant over the range of redshifts considered
p_z_norm = np.trapz((1.+z_grid)**(kappa-1.)*dVdz_grid,z_grid)
p_inj_z = (1.+injectionDict['z'])**(kappa-1.)*injectionDict['dVdz']/p_z_norm
# Finally, compute spin probability distribution
# This is internally normalized
p_inj_chi = calculate_gaussian_2D(injectionDict['Xeff'],injectionDict['Xp'],\
mu_eff,sig_eff**2,mu_p,sig_p**2,rho)
# Overall detection efficiency
xi = np.sum(p_inj_m1*p_inj_m2*p_inj_z*p_inj_chi/(injectionDict['p_draw_m1m2z']*injectionDict['p_draw_chiEff_chiP']))/injectionDict['nTrials']
# Next, draw an overall intrinsic number of events that occurred in our observation time
#log_Ntot_grid = np.linspace(8,15,10000)
log_Ntot_grid = np.linspace(np.log(nObs/xi)-3,np.log(nObs/xi)+3,10000)
Ntot_grid = np.exp(log_Ntot_grid)
logp_Ntot_grid = nObs*np.log(xi*Ntot_grid)-xi*Ntot_grid
logp_Ntot_grid -= np.max(logp_Ntot_grid)
p_Ntot_grid = np.exp(logp_Ntot_grid)
p_Ntot_grid /= np.trapz(p_Ntot_grid,log_Ntot_grid)
cdf_Ntot = np.cumsum(p_Ntot_grid)*(log_Ntot_grid[1]-log_Ntot_grid[0])
cdf_draw = np.random.random()
log_Ntot = np.interp(cdf_draw,cdf_Ntot,log_Ntot_grid)
#print(np.log(nObs/xi),log_Ntot)
R0 = np.exp(log_Ntot)/p_z_norm/2.
#fig,ax = plt.subplots()
#ax.plot(log_Ntot_grid,cdf_Ntot)
#plt.savefig('{0}.pdf'.format(i))
# Rescale to our reference values, at m1=20, q=1, z=0.2
# Additionally multiply by m1=20 so that this is a rate per logarithmic mass,
# rather than a direct rate per unit mass
p_m20 = f_peak*np.exp(-(20.-mu_m1)**2/(2.*sig_m1**2))/np.sqrt(2.*np.pi*sig_m1**2) \
+ (1.-f_peak)*(1.+alpha)*20.**alpha/(mMax**(1.+alpha) - 5.**(1.+alpha))
p_q1 = (1.+bq)/(1. - (5./20.)**(1.+bq))
R_refs[i] = R0*(1.+0.2)**kappa*p_m20*p_q1*20.
p_spins = calculate_gaussian_2D(XEFF.reshape(-1),XP.reshape(-1),\
mu_eff,sig_eff**2,mu_p,sig_p**2,rho)
p_Xeff_Xp[i,:] = np.reshape(p_spins,(Xp_grid.size,Xeff_grid.size)).T
#print(R0,p_m20,p_q1)
return Xeff_grid,Xp_grid,R_refs,p_Xeff_Xp
if __name__=="__main__":
#samps = read_lvk_plpeak_data()
#fPeaks = samps['plpeak_fPeaks']
get_lvk_gaussian_spin()
| tcallister/autoregressive-bbh-inference | figures/read_O3_LVK_results.py | read_O3_LVK_results.py | py | 19,728 | python | en | code | 3 | github-code | 13 |
73108801617 | # Gabriel Garcia Salvador
# Gustavo Henrique Spiess
# Leonardo Rovigo
# Sidnei Lanser
#PERGUNTAS
# Aplique seu kNN a este problema. Qual é a sua acurácia de classificação?
# R: A acuracia máxima é de 78.33% com 47 acertos e K = 10
# A acurácia pode ser igual a 98% com o kNN. Descubra por que o resultado atual é muito menor.
# Ajuste o conjunto de dados ou k de tal forma que a acurácia se torne 98% e explique o que você fez e
# por quê.
# R: Foi visto que o menor e maior valor tinham uma diferença enorme entre si então foi aplicada a normalização
# para colocar os valores entre 0 e 1 porem ainda só conseguimos uma acurácia de 91.666% com 55 acertos e K = 4
# nesse exercício foi aplicado um while para verificar qual o melhor K possível
import interface
import numpy as np
from scipy.io import loadmat
def main():
data_dict = loadmat('grupoDados2.mat') # carrega os dados para os dicionarios
dadosTeste = data_dict['grupoTest']
dadosTrain = data_dict['grupoTrain']
data_testRots = data_dict['testRots']
rotuloTrain = data_dict['trainRots']
rotuloPrevistoMax = []
k = 1
acuraciaMaxima = 0
acuracia = 0
for r in range(len(dadosTeste)): # fazer a normalização dos dados de teste
dadosTeste[r] = interface.normalizacao(dadosTeste[r])
for s in range(len(dadosTrain)):# fazer a normalização dos dados de treino
dadosTrain[s] = interface.normalizacao(dadosTrain[s])
while (acuracia != 100 and k <= len(dadosTrain)): # while para verificar qual o melhor K possível
numCorreto = 0
rotuloPrevisto = interface.meuKnn(dadosTrain, rotuloTrain, dadosTeste, k) # chamada para a funcao MeuKnn
for i in range(len(rotuloPrevisto)): # for para somar a o numero de acertos
if(rotuloPrevisto[i] == data_testRots[i]):
numCorreto += 1
totalNum = len(data_testRots) # total de valores
acuracia = numCorreto / totalNum # numero de acertos
if(acuracia > acuraciaMaxima): # verifica se a acuracia atual e maior que a maxima obtida
acuraciaMaxima = acuracia # se for grava a acuracia como maxima
numCorretoMaximo = numCorreto # grava o numero de acertos
kMelhor = k # grava o melhor K
rotuloPrevistoMax = rotuloPrevisto
k += 1
interface.visualizaPontos(dadosTeste,rotuloPrevistoMax,1,2)
print("Acuracia Máxima: " + str(100*acuraciaMaxima) + "%")
print("Número de Acertos Máximo: " + str(numCorretoMaximo))
print("Número Total: " + str(totalNum))
print("O K melhor é: " + str(kMelhor))
input()
if __name__ == "__main__" :
main()
| lrovigo/bcc_2019_2_IA | Trabalho_4/demoD2.py | demoD2.py | py | 2,719 | python | pt | code | 0 | github-code | 13 |
27736151666 | import os
import tkinter as tk
from tkinter import filedialog, messagebox
import pyttsx3
import speech_recognition as sr
# Function to convert voice to text
def convert_voice_to_text(audio_file_path):
recognizer = sr.Recognizer()
with sr.AudioFile(audio_file_path) as source:
try:
audio = recognizer.record(source)
return recognizer.recognize_google(audio)
except sr.UnknownValueError:
return "Couldn't understand the audio."
except sr.RequestError:
return "Couldn't request results; check your network connection."
# Function to process voice note
def process_voice_note():
if not os.path.exists("voice_notes"):
os.makedirs("voice_notes")
audio_file_path = filedialog.askopenfilename(filetypes=[("Audio Files", "*.wav;*.mp3")])
if not os.path.isfile(audio_file_path):
messagebox.showerror("Error", "File not found. Please provide a valid file path.")
return
text = convert_voice_to_text(audio_file_path)
text_output.config(state=tk.NORMAL)
text_output.delete(1.0, tk.END)
text_output.insert(tk.END, text)
text_output.config(state=tk.DISABLED)
# Function to speak text
def speak_text():
text = text_output.get(1.0, tk.END)
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
# Create the main window
root = tk.Tk()
root.title("Voice to Text Converter")
root.geometry("400x300")
# Load a custom font (replace with your font file)
custom_font = ("Helvetica", 14)
# Create GUI elements with improved design
title_label = tk.Label(root, text="Voice to Text Converter", font=("Helvetica", 20), padx=10, pady=10)
process_button = tk.Button(root, text="Process Voice Note", command=process_voice_note, font=custom_font, bg="green", fg="white")
speak_button = tk.Button(root, text="Speak Text", command=speak_text, font=custom_font, bg="blue", fg="white")
text_output = tk.Text(root, wrap=tk.WORD, state=tk.DISABLED, font=custom_font, bg="lightgray", padx=10, pady=10)
# Place GUI elements using grid layout
title_label.grid(row=0, column=0, columnspan=2)
process_button.grid(row=1, column=0, padx=10, pady=10)
speak_button.grid(row=1, column=1, padx=10, pady=10)
text_output.grid(row=2, column=0, columnspan=2, padx=10, pady=10)
# Start the GUI main loop
root.mainloop()
| Swapnil-Singh-99/PythonScriptsHub | Voice to text/voice_to_text.py | voice_to_text.py | py | 2,337 | python | en | code | 19 | github-code | 13 |
27125876643 |
import os
import numpy as np
from glob import glob
import skimage as sk
from skimage import morphology as m
from rectpack_utils import place_rectangles
def check_or_create(path):
"""
If path exists, does nothing otherwise it creates it.
Parameters
----------
path: string, path for the creation of the folder to check/create
"""
if not os.path.isdir(path):
os.makedirs(path)
def get_options():
import argparse
parser = argparse.ArgumentParser(
description='Creating heatmap')
parser.add_argument('--input', required=True,
metavar="str", type=str,
help='input file')
parser.add_argument('--output', required=True,
metavar="str", type=str,
help='output folder')
args = parser.parse_args()
return args
def repositionning(image):
"""
Repositions the connected components in an image.
The connected components are defined as the components once we define
the background as the null values on the third axis.
Parameters
----------
name: image,
Three components
Returns
-------
The same image but with the repositionned components.
"""
# only the third or 'density' channel
# test = image.copy().sum(axis=-1)
test = image[:,:,2].copy()
# left_push = np.zeros_like(image)
# left_mask = np.zeros_like(image)[:,:,0]
# repositioning = np.zeros_like(image)
test[ test > 0] = 255
test = m.dilation(test, selem=m.disk(10))
label = sk.measure.label(test)
new_image, indiv_rect = place_rectangles(label, image)
return new_image, indiv_rect
def main():
options = get_options()
# num = int(options.comp.split('comp')[-1][0])
inp = np.load(options.input)
i_r, mapping = repositionning(inp)
output = options.output
check_or_create(output)
name = os.path.join(output, os.path.basename(options.input).replace('.npy', "r.npy"))
np.save(name, i_r)
# beg = "heatmaps_comp{}_repos_".format(num) if options.do_comp else "heatmaps_repos_"
# folder = "./individual_comp{}/".format(num) if options.do_comp else "./individual/"
# print(beg + options.slide + ".npy")
# np.save(beg + options.slide + ".npy", i_r)
# try:
# os.mkdir(folder)
# except:
# pass
# for rid in mapping.keys():
# sizes, binary = mapping[rid]
# x, y, h, w = sizes
# sub_npy = i_npy[x:h, y:w]
# sub_npy[(1 - binary.astype(int)).astype(bool)] = 0
# print(folder + options.slide + "_heatmaps_{}.npy".format(rid))
# np.save(folder + options.slide + "_heatmaps_{}.npy".format(rid), sub_npy)
if __name__ == '__main__':
main() | PeterJackNaylor/CellularHeatmaps | src/repositioning/repositioning.py | repositioning.py | py | 2,772 | python | en | code | 1 | github-code | 13 |
17755307480 | import json
from typing import Dict
from uuid import UUID
from fastapi import FastAPI, HTTPException, status
import httpx
from app.settings.conf import settings
app = FastAPI()
API_URL = settings.offer_ms_api_url
async def authorize() -> Dict:
headers = {"Bearer": settings.refresh_token}
async with httpx.AsyncClient() as client:
response = await client.post(API_URL + "auth", headers=headers)
if response.status_code != status.HTTP_201_CREATED:
raise HTTPException(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail="Lost external token for offer service..wait a moment and try again",
)
settings.access_token = response.json().get("access_token")
return response.status_code
async def register_product(id: UUID, name: str, description: str) -> int:
try_count = 3
async with httpx.AsyncClient() as client:
payload = json.dumps({"id": str(id), "name": name, "description": description})
while try_count > 0:
headers = {"Bearer": settings.access_token}
response = await client.post(
API_URL + "products/register", content=payload, headers=headers
)
if response.status_code == status.HTTP_201_CREATED:
break
else:
if response.status_code == status.HTTP_401_UNAUTHORIZED:
await authorize()
try_count -= 1
return response.status_code
async def get_product_offers(id: UUID) -> Dict:
try_count = 3
async with httpx.AsyncClient() as client:
while try_count > 0:
headers = {"Bearer": settings.access_token}
response = await client.get(
API_URL + f"products/{id}/offers", headers=headers
)
if response.status_code == status.HTTP_200_OK:
break
else:
if response.status_code == status.HTTP_401_UNAUTHORIZED:
await authorize()
try_count -= 1
return {"status_code": response.status_code, "data": response.json()}
| Caky123/product-aggregator-v1 | app/external_service/offer_handler.py | offer_handler.py | py | 2,162 | python | en | code | 0 | github-code | 13 |
74443894416 | #Bubble Sort
def selectionSort(arr, n):
for i in range(0, n):
for j in range(i+1, n-1):
if arr[i] > arr[j]:
arr[i], arr[j] = arr[j], arr[i]
return arr
if __name__ == '__main__':
n = int(input("Enter number of elements "))
arr = [x for x in map(int, input().split(" "))]
arr = selectionSort(arr, n)
print("Sorted array ",arr) | hashbanger/Python_Advance_and_DS | BasicAlgorithms/SelectionSort.py | SelectionSort.py | py | 402 | python | en | code | 0 | github-code | 13 |
17173167350 | # Take input
n = int(input())
# Do a binary search to find the result
low = 0
high = n
res = 0
while low <= high:
mid = (low + high) // 2
if mid * (mid + 1) // 2 <= n:
res = mid
low = mid + 1
else:
high = mid - 1
# Print the result
print(res)
| SiddhantAttavar/NPS-INR-Cyber-Programming-2021 | Prelims/Problem6/Solution.py | Solution.py | py | 281 | python | en | code | 1 | github-code | 13 |
74059002898 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 27 15:23:12 2017
@author: socib
"""
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from matplotlib.patches import Rectangle
import matplotlib.ticker as tick
import matplotlib.dates as mdates
from matplotlib.pyplot import *
import lib.query_data_trends as query_data_trends
def plot_data_trends(figs_path_store):
input_data = query_data_trends.query_data_trends()
#input_log_path = 'C:\Users\socib\Desktop\monthly_trend.csv'
#column_names = ['period','number_users', 'number_accesses', 'total_data_volume', 'number_countries']
#input_data = pd.read_csv( input_log_path, delimiter = '\t',
# names=column_names,
# header=None,
# decimal='.',
# na_values=['-'] )
input_data = input_data.replace(r'^\s+$', np.nan, regex=True)
input_data=input_data.convert_objects(convert_numeric=True)
#time = pd.to_datetime(input_data.period, format='%m/%Y')
time = pd.to_datetime(input_data.period, format='%Y-%m-%d')
fig, ax = plt.subplots()
subplot(211)
plot(time,input_data.number_users, linestyle='-', lw=1, marker='o', ms=3)
plt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=1))
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
plt.yticks(np.arange(0, max(input_data.number_users)+20, 25), fontsize=6)
plt.grid(color='grey', linestyle='--', linewidth=0.2)
plt.title('Number of unique users', fontsize=8, loc='right')
subplot(212)
plt.plot(time,input_data.number_countries, linestyle='-', lw=1, marker='o', ms=3)
plt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=1))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b-%Y'))
plt.yticks(np.arange(0, max(input_data.number_countries)+15, 10), fontsize=6)
plt.grid(color='grey', linestyle='--', linewidth=0.2)
plt.title('Users countries of origin', fontsize=8, loc='right')
plt.xticks(rotation=90, fontsize=7)
plt.tight_layout()
savefig(figs_path_store + 'users_trends', dpi=800, bbox_inches='tight')
fig, ax = plt.subplots()
subplot(211)
plot(time,input_data.number_accesses, linestyle='-', lw=1, marker='o', ms=3)
plt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=1))
plt.tick_params(axis='x', which='both', bottom='off', top='off',labelbottom='off')
plt.yticks(np.arange(0, max(input_data.number_accesses)+2000000, 2000000), fontsize=6)
plt.grid(color='grey', linestyle='--', linewidth=0.2)
plt.title('Number of accesses to the datasets', fontsize=8, loc='right')
plt.gca().yaxis.set_major_formatter(tick.FormatStrFormatter('%2.2e'))
subplot(212)
#plt.plot(time,input_data.total_data_volume)
plt.plot(time,input_data.total_data_volume, linestyle='-', lw=1, marker='o', ms=3)
fig.autofmt_xdate()
plt.xticks(rotation=90, fontsize=7)
plt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=1))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b-%Y'))
plt.yticks(np.arange(0, max(input_data.total_data_volume)+50, 50), fontsize=6)
plt.grid(color='grey', linestyle='--', linewidth=0.2)
plt.title('Total Giga-Bytes transferred', fontsize=8, loc='right')
#plt.fill_between(time, input_data.total_data_volume,color='m')
plt.tight_layout()
savefig(figs_path_store + 'data_access_trends', dpi=800, bbox_inches='tight')
total_GB_transferred = int(round(input_data.total_data_volume.sum()))
users_avg = int(round(input_data.number_users.mean()))
countries_avg = int(round(input_data.number_countries.mean()))
| cmunozmas/processing_logs_thredds | lib/plot_data_trends.py | plot_data_trends.py | py | 4,159 | python | en | code | 1 | github-code | 13 |
27124058199 | # utils.py
# Math library
# Author: Sébastien Combéfis
# Version: February 8, 2018
from scipy.integrate import quad
def fact(n):
"""Computes the factorial of a natural number.
Pre: -
Post: Returns the factorial of 'n'.
Throws: ValueError if n < 0
"""
sum = 1
if n == 0:
return 1
elif n < 0:
raise ValueError("negative value")
else:
while n > 0 :
sum = sum*n
n = n-1
return sum
def roots(a, b, c):
"""Computes the roots of the ax^2 + bx + x = 0 polynomial.
Pre: -
Post: Returns a tuple with zero, one or two elements corresponding
to the roots of the ax^2 + bx + c polynomial.
"""
delta = b**2 - 4*a*c
try:
X1 = (-b + (delta)**(1/2))/(2*a)
X2 = (-b - (delta)**(1/2))/(2*a)
except:
return ()
if X1 == X2:
return (X1)
else:
return (X1, X2)
def integrate(function, lower, upper):
"""Approximates the integral of a fonction between two bounds
Pre: 'function' is a valid Python expression with x as a variable,
'lower' <= 'upper',
'function' continuous and integrable between 'lower‘ and 'upper'.
Post: Returns an approximation of the integral from 'lower' to 'upper'
of the specified 'function'.
Hint: You can use the 'integrate' function of the module 'scipy' and
you'll probably need the 'eval' function to evaluate the function
to integrate given as a string.
"""
def f(x):
return eval(function)
Result = quad(f, lower, upper)
Result = Result[0]
return Result
if __name__ == '__main__':
print(fact(2))
print(roots(1, 0, 1))
print(integrate('x ** 2 - 1', -1, 1))
| Moeuris/AdvancedPython2BA-Labo1 | utils.py | utils.py | py | 1,544 | python | en | code | 0 | github-code | 13 |
3415519930 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import (QWidget, QSlider, QApplication, QHBoxLayout, QVBoxLayout)
from PyQt5.QtCore import QObject, Qt, pyqtSignal
from PyQt5.QtGui import QPainter, QFont, QColor, QPen
import sys
class Communicate(QObject):
updateBW = pyqtSignal(int)
class BurningWidget(QWidget):
def __init__(self):
super().__init__()
def initUI(self):
self.setMinimunsize(1, 30)
self.value = 75
self.num = [75, 150, 225, 300, 375, 450, 525,600, 675]
def setValue(self, value):
self.value = value
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
self.drawWiget(qp)
qp.end()
def drawWidget(self, qp):
MAX_CAPACITY = 700
OVER_CAPACITY = 750
font = QFont("Serif", 7, QFont.Light)
qp.setFont(font)
size = self.size()
| Joker3Chen/Scrapy-Web-Java | Scrapy-Python/custom_comp_module.py | custom_comp_module.py | py | 887 | python | en | code | 0 | github-code | 13 |
41139202989 | from Indicators.TechIndicator import TechnicalIndicator
from talib import STOCH
from pandas import DataFrame
from pandas import concat
__author__ = 'Pedro Henrique Veronezi e Sa'
class TechnicalIndicatorSTOCH(TechnicalIndicator):
"""
Wrapper for the Stochastic from TA-lib
References:
https://github.com/mrjbq7/ta-lib
http://www.tadoc.org/indicator/STOCH.htm
"""
def __init__(self, fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0):
"""
Constructor with no parameters.
Returns:
self
"""
super(TechnicalIndicatorSTOCH, self).__init__()
self.__fastk_period = fastk_period
self.__slowk_period = slowk_period
self.__slowk_matype = slowk_matype
self.__slowd_period = slowd_period
self.__slowd_matype = slowd_matype
def _calc_indicator(self, OHLCV_input):
"""
Calculates the Stochastic technical indicator using a wrapper for the TA-lib
Args:
:param OHLCV_input: the dataframe with the Open, High, Low, Close and Volume values
:type OHLCV_input: pandas DataFrame
Returns:
DataFrame with scaled features with size (n_observations, n_features).
"""
try:
high = OHLCV_input['high'].values[:, 0]
except IndexError:
high = OHLCV_input['high'].values
try:
low = OHLCV_input['low'].values[:, 0]
except IndexError:
low = OHLCV_input['low'].values
try:
close = OHLCV_input['close'].values[:, 0]
except IndexError:
close = OHLCV_input['close'].values
slowk, slowd = STOCH(high, low, close, self.__fastk_period, self.__slowk_period, self.__slowk_matype,
self.__slowd_period, self.__slowd_matype)
slowk = DataFrame(slowk)
slowd = DataFrame(slowd)
output = concat([slowk, slowd], axis=1, ignore_index=True)
# output.columns = ['STOCH%d_%d' % (self.__fastk_period, self.__slowk_period)]
return output
def _get_max_periodNaN(self):
"""
Getter for the number of bars to be gathered form previous periods
Returns:
An integer representing the number of bars to be added
"""
# Defines the max for this application
return int(max(self.__fastk_period, self.__slowk_period, self.__slowk_matype, self.__slowd_period,
self.__slowd_matype))
| veronezipedro/TechIndicators | Indicators/TechIndSTOCH.py | TechIndSTOCH.py | py | 2,550 | python | en | code | 0 | github-code | 13 |
15064016169 | class Vertex:
def __init__(self, name, latitude, longitude, rating): # a <vertex object> has id and neighbors
self.name=name
self.latitude=latitude
self.longitude=longitude
self.rating=rating
self.neighbors={} #initialize as an empty dictionary
def addNeighbor(self, nbr, weight): #nbr is a vertex
self.neighbors[nbr.name]=weight
def getName(self):
return self.name
def getWeight(self, nbr):
return self.neighbors[nbr.name]
#one hop
def getNeighbors(self):
return self.neighbors.keys()
class Graph:
def __init__(self):
self.verticies = {} #vertices is a dictionary
def addVertex(self, vertex):
self.verticies[vertex.name] = vertex
def getVertex(self, name):
try:
return self.verticies[name]
except KeyError:
return None
def getDistance(self, from_vertex, to_vertex): #this is a vertex
import geopy.distance
coords_1 = (from_vertex.latitude, from_vertex.longitude)
coords_2 = (to_vertex.latitude, to_vertex.longitude)
return geopy.distance.distance(coords_1, coords_2).miles
def addEdge(self, from_vertex, to_vertex, distance=0): #this is a vertex
from_vertex.addNeighbor(to_vertex, distance)
#directed, one-way
def getVertices(self):
keys=[]
for key in self.verticies:
keys.append(key)
return keys
def dfs(self, visited, vertexName):
if vertexName not in visited:
print (vertexName)
visited.add(vertexName)
for neighbour in self.getVertex(vertexName).neighbors:
self.dfs(visited,neighbour)
return visited
def bfs(self,visited, vertexName):
queue=[]
queue.append(vertexName)
visited.append(vertexName)
while queue:
s=queue.pop(0)
print(s)
for neighbour in self.getVertex(s).neighbors:
if neighbour not in visited:
queue.append(neighbour)
visited.append(neighbour)
return visited
def bellmanFord(self, s, t): # graph, source, target
distance={v:float('inf') for v in self.getVertices()} #{'vertex name': inf}
distance[s]=0
paths={v:[] for v in self.getVertices()}
paths[s]=[s]
for i in range(len(self.getVertices())-1): # n-1 times iteration
for u in self.getVertices(): # u: vertex name, v: neighbor name
vertex= self.getVertex(u)
for v in vertex.getNeighbors():
if distance[u]+vertex.getWeight(self.getVertex(v))<distance[v]:
distance[v]=distance[u]+vertex.getWeight(self.getVertex(v))
paths[v]=paths[u]+[v]
for u in self.getVertices():
vertex= self.getVertex(u)
for v in vertex.getNeighbors():
if distance[u]+vertex.getWeight(self.getVertex(v))<distance[v]:
return('Graph has a negative-weight cycle')
return paths[t]
def dijkstra(self,s,t): #s: source node name
import heapq
visited = set()
paths={v:[] for v in self.getVertices()}
paths[s]=[s]
distance={v:float('inf') for v in self.getVertices()} #{'vertex name': inf}
distance[s]=0
pq=[]
heapq.heappush(pq, s)
while pq:
min_node=heapq.heappop(pq) # min_node name
visited.add(min_node) # min_node name
vertex= self.getVertex(min_node) # min_node object
for v in vertex.getNeighbors(): # v: neighbor name
if distance[min_node]+ vertex.getWeight(self.getVertex(v))<distance[v]:
distance[v]=distance[min_node]+vertex.getWeight(self.getVertex(v))
heapq.heappush(pq,v)
paths[v]=paths[min_node]+[v]
return paths[t]
def query(term, location):
# Modify this part for interative display
term=term
location=location
# base url and parameters
baseurl= 'https://api.yelp.com/v3/businesses/search'
params = {'term': term,
'location': location
}
data = make_request_with_cache(baseurl, params)
return data #list of dictionaries
def category(data):
categories={}
for business in data:
cat_1=business['categories'][0]['title']
if len(business['categories'])>1:
cat_2=business['categories'][1]['title']
categories[business['name']]=[cat_1,cat_2]
return categories
def maincode(data, categories):
#create vertices
vertices=[]
for business in data:
vertex = Vertex(business['name'], business['coordinates']['latitude'], business['coordinates']['longitude'], business['rating'] )
vertices.append(vertex)
#create graph
graph=Graph()
## add vertex
for vertex in vertices:
graph.addVertex(vertex)
## add edge
for vertex1 in vertices:
for vertex2 in vertices:
if vertex1.name != vertex2.name:
if (set(categories[vertex1.name])== set(categories[vertex2.name])) or (vertex1.rating==vertex2.rating):
distance= graph.getDistance(vertex1,vertex2)
graph.addEdge(vertex1, vertex2, distance)
return graph | Ariel-CCH/yelp-recommendation-application | yelp-data-structrues/required.py | required.py | py | 5,510 | python | en | code | 1 | github-code | 13 |
44405823321 | from django.shortcuts import render,redirect
from django.contrib import messages
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.urls import reverse_lazy, reverse
from django.contrib.auth.models import User, auth
from admins.models import Products
from .models import CustomerCart,CustomerCheckout,customerPayedProducts
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from .forms import CustomerCheckoutForm
import uuid
import razorpay
# Create your views here.
def index(request):
products = Products.objects.filter(is_active=1)
return render(request,'customer/admintemplate.html',{'products':products})
def registercustomer(request):
if request.method == 'POST':
first_name = request.POST['fname']
last_name = request.POST['lname']
username = request.POST['username']
email = request.POST['email']
pass1 = request.POST['pass1']
pass2 = request.POST['pass2']
if pass1 == pass2:
if User.objects.filter(username = username).exists():
messages.info(request,"username already exist")
return redirect('registercustomer')
elif User.objects.filter(email = email).exists():
messages.info(request,"email is already exists")
return redirect('registercustomer')
else:
user = User.objects.create_user(
first_name = first_name,
last_name = last_name,
username = username,
email = email,
password = pass1
)
user.save()
messages.info(request,'user is created')
return redirect('logincustomer')
else:
messages.info(request,"password not matched")
return redirect('registration')
else:
return render(request,'customer/register/registercustomer.html')
def logincustomer(request):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('admindashboard') )
else:
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(
username = username,
password = password
)
if user is not None:
auth.login(request, user)
return redirect('/')
else:
messages.info(request, 'invalid credintial')
return redirect('logincustomer')
else:
return render(request, 'customer/register/logincustomer.html')
@login_required(login_url = reverse_lazy('logincustomer'))
def logoutcustomer(request):
auth.logout(request)
return redirect('/')
def homepage(request):
products = Products.objects.filter(is_active=1)
usercart = []
if request.user.is_authenticated:
usercart = CustomerCart.objects.filter(customer = request.user)
return render(request,'customer/products.html',{'products':products,'usercart':usercart})
@csrf_exempt
@login_required
def addproducttocart(request):
if request.is_ajax():
product_id = int(request.POST['product'])
user = request.user
cart_instance = CustomerCart(product_id = product_id,
customer = user)
cart_instance.save()
return JsonResponse({'result':'success'})
@csrf_exempt
@login_required
def removeproductfromcart(request):
if request.is_ajax():
product_id = int(request.POST['product'])
user = request.user
cart_instance = CustomerCart.objects.filter(customer = user,product=product_id)
cart_instance.delete()
return JsonResponse({'result':'success'})
@login_required(login_url = reverse_lazy('logincustomer'))
def viewcustomercart(request):
usercart = CustomerCart.objects.filter(customer = request.user).select_related('product')
totalprice = sum(item.product.price for item in usercart)
checkoutForm = CustomerCheckoutForm()
return render(request,'customer/customercart.html',{
'usercart':usercart,
'totalprice':totalprice,
'checkoutform':checkoutForm
})
@login_required(login_url = reverse_lazy('logincustomer'))
def removeproductcartpage(request,cart_item_id):
user = request.user
cart_instance = CustomerCart.objects.filter(customer = user,id=cart_item_id)
cart_instance.delete()
return HttpResponseRedirect(reverse('viewcustomercart'))
@login_required
def checkoutcustomer(request):
if request.method == 'POST':
user = request.user
address = request.POST['address']
phone = request.POST['phone']
usercart = CustomerCart.objects.filter(customer = request.user).select_related('product')
totalprice = sum(item.product.price for item in usercart)
receipt = str(uuid.uuid1())
client = razorpay.Client(auth=("rzp_test_bAYqeZhjXN8pf0", "cgw5fGdAZHz9CO1GCGp2UJG6"))
DATA = {
'amount':totalprice*100,
'currency':'INR',
'receipt':'masupreiept',
'payment_capture':1,
'notes':{}
}
order_details = client.order.create(data=DATA)
# return HttpResponse(order_details)
customercheckout_order_instance = CustomerCheckout(customer = request.user,
order_id = order_details.get('id'),
total_amount = totalprice,
reciept_num = receipt,
delivery_address = address,
delivery_phone = phone)
customercheckout_order_instance.save()
customercheckout = CustomerCheckout.objects.get(id = customercheckout_order_instance.id)
for item in usercart:
orderedproduct_instance = customerPayedProducts(customer = request.user,
product_name = item.product.product_name,
price = item.product.price,
product_description = item.product.product_description,
checkout_details = customercheckout)
orderedproduct_instance.save()
context = {'order_id' : order_details.get('id'),
'amount' : totalprice,
'amountscript' : totalprice*100,
'currency' : 'INR',
'companyname' : 'Mashupcommrz',
'username' : request.user.first_name+' '+request.user.last_name,
'useremail' : request.user.email,
'phonenum' : phone,
'rzpkey' : 'rzp_test_bAYqeZhjXN8pf0'
}
return render(request,'customer/checkoutform.html',context)
else:
return HttpResponseRedirect(reverse('products'))
@csrf_exempt
@login_required(login_url = reverse_lazy('logincustomer'))
def markpaymentsuccess(request):
if request.is_ajax():
order_id = request.POST['order_id']
payment_id = request.POST['payment_id']
payment_signature = request.POST['payment_signature']
user = request.user
customercart_order_instance = CustomerCheckout.objects.get(order_id = order_id,
customer=user)
customercart_order_instance.payment_signature = payment_signature
customercart_order_instance.payment_id = payment_id
customercart_order_instance.payment_complete = 1
customercart_order_instance.save()
customercart_instance = CustomerCart.objects.filter(customer = user)
customercart_instance.delete()
return JsonResponse({'result':'success'})
| subinkhader/Ecommerce | customer/views.py | views.py | py | 8,303 | python | en | code | 0 | github-code | 13 |
14470534921 | import atexit
import bisect
import multiprocessing as mp
from collections import deque
import cv2
import torch
from abandoned_bag_heuristic import SimpleTracker
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.video_visualizer import VideoVisualizer
import detectron2.utils.video_visualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
SAVE_PREDICTIONS = False
SAVED_PREDICTIONS = []
import pickle
def draw_instance_predictions(visualizer, frame, predictions, tracker):
"""
Draw instance-level prediction results on an image.
Args:
frame (ndarray): an RGB image of shape (H, W, C), in the range [0, 255].
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
frame_visualizer = Visualizer(frame, visualizer.metadata)
num_instances = len(predictions)
if num_instances == 0:
return frame_visualizer.output
boxes = predictions.pred_boxes.tensor.numpy() if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes.numpy() if predictions.has("pred_classes") else None
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = predictions.pred_masks
# mask IOU is not yet enabled
# masks_rles = mask_util.encode(np.asarray(masks.permute(1, 2, 0), order="F"))
# assert len(masks_rles) == num_instances
else:
masks = None
detected = [
detectron2.utils.video_visualizer._DetectedInstance(classes[i], boxes[i], mask_rle=None, color=None, ttl=8)
for i in range(num_instances)
]
colors = visualizer._assign_colors(detected)
labels = detectron2.utils.video_visualizer._create_text_labels(classes, scores,
visualizer.metadata.get("thing_classes", None))
if visualizer._instance_mode == ColorMode.IMAGE_BW:
# any() returns uint8 tensor
frame_visualizer.output.img = frame_visualizer._create_grayscale_image(
(masks.any(dim=0) > 0).numpy() if masks is not None else None
)
alpha = 0.3
else:
alpha = 0.5
frame_visualizer.overlay_instances(
boxes=None if masks is not None else boxes, # boxes are a bit distracting
masks=masks,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
for bag_id in tracker.prev_frame_ids['bags']:
bag_center = tracker.all_centers['bags'][bag_id]
if bag_id in tracker.bag_person_association:
person_id = tracker.bag_person_association[bag_id]
if person_id is not None and person_id in tracker.prev_frame_ids['persons']:
person_center = tracker.all_centers['persons'][person_id]
if tracker.is_unattended(bag_id):
frame_visualizer.draw_line(
[bag_center[0], person_center[0]],
[bag_center[1], person_center[1]],
'r'
)
else:
frame_visualizer.draw_line(
[bag_center[0], person_center[0]],
[bag_center[1], person_center[1]],
'g'
)
if tracker.is_unattended(bag_id):
frame_visualizer.draw_text(
'abandoned',
tuple(bag_center[0:2]),
color='r'
)
return frame_visualizer.output
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
self.tracker = SimpleTracker(150, 200)
if parallel:
num_gpu = torch.cuda.device_count()
self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
else:
self.predictor = DefaultPredictor(cfg)
def run_on_image(self, image):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_output = visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to(self.cpu_device), segments_info
)
else:
if "sem_seg" in predictions:
vis_output = visualizer.draw_sem_seg(
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def _frame_from_video(self, video):
while video.isOpened():
success, frame = video.read()
if success:
yield frame
else:
break
def run_on_video(self, video):
"""
Visualizes predictions on frames of the input video.
Args:
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
either a webcam or a video file.
Yields:
ndarray: BGR visualizations of each video frame.
"""
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
def process_predictions(frame, predictions, tracker):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
frame, panoptic_seg.to(self.cpu_device), segments_info
)
elif "instances" in predictions:
predictions = predictions["instances"].to(self.cpu_device)
tracker.update(boxes=predictions.pred_boxes.tensor.numpy(), labels=predictions.pred_classes.numpy())
if SAVE_PREDICTIONS:
SAVED_PREDICTIONS.append(predictions)
if len(SAVED_PREDICTIONS) == 100:
with open('predictions.pkl', 'wb') as fp:
pickle.dump(SAVED_PREDICTIONS, fp)
print('Saving done!')
vis_frame = draw_instance_predictions(video_visualizer, frame, predictions, tracker)
elif "sem_seg" in predictions:
vis_frame = video_visualizer.draw_sem_seg(
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
# Converts Matplotlib RGB format to OpenCV BGR format
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
return vis_frame
frame_gen = self._frame_from_video(video)
if self.parallel:
buffer_size = self.predictor.default_buffer_size
frame_data = deque()
for cnt, frame in enumerate(frame_gen):
frame_data.append(frame)
self.predictor.put(frame)
if cnt >= buffer_size:
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions, self.tracker)
while len(frame_data):
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions, self.tracker)
else:
for frame in frame_gen:
yield process_predictions(frame, self.predictor(frame), self.tracker)
class AsyncPredictor:
"""
A predictor that runs the model asynchronously, possibly on >1 GPUs.
Because rendering the visualization takes considerably amount of time,
this helps improve throughput when rendering videos.
"""
class _StopToken:
pass
class _PredictWorker(mp.Process):
def __init__(self, cfg, task_queue, result_queue):
self.cfg = cfg
self.task_queue = task_queue
self.result_queue = result_queue
super().__init__()
def run(self):
predictor = DefaultPredictor(self.cfg)
while True:
task = self.task_queue.get()
if isinstance(task, AsyncPredictor._StopToken):
break
idx, data = task
result = predictor(data)
self.result_queue.put((idx, result))
def __init__(self, cfg, num_gpus: int = 1):
"""
Args:
cfg (CfgNode):
num_gpus (int): if 0, will run on CPU
"""
num_workers = max(num_gpus, 1)
self.task_queue = mp.Queue(maxsize=num_workers * 3)
self.result_queue = mp.Queue(maxsize=num_workers * 3)
self.procs = []
for gpuid in range(max(num_gpus, 1)):
cfg = cfg.clone()
cfg.defrost()
cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
self.procs.append(
AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
)
self.put_idx = 0
self.get_idx = 0
self.result_rank = []
self.result_data = []
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, image):
self.put_idx += 1
self.task_queue.put((self.put_idx, image))
def get(self):
self.get_idx += 1 # the index needed for this request
if len(self.result_rank) and self.result_rank[0] == self.get_idx:
res = self.result_data[0]
del self.result_data[0], self.result_rank[0]
return res
while True:
# make sure the results are returned in the correct order
idx, res = self.result_queue.get()
if idx == self.get_idx:
return res
insert = bisect.bisect(self.result_rank, idx)
self.result_rank.insert(insert, idx)
self.result_data.insert(insert, res)
def __len__(self):
return self.put_idx - self.get_idx
def __call__(self, image):
self.put(image)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(AsyncPredictor._StopToken())
@property
def default_buffer_size(self):
return len(self.procs) * 5
| roym899/abandoned_bag_detection | predictor.py | predictor.py | py | 11,940 | python | en | code | 14 | github-code | 13 |
24296771464 | # -*- coding: utf-8 -*-
"""Convert a plain YAML file with application configuration into a CloudFormation template with SSM parameters."""
import sys
from datetime import datetime
from datetime import timezone
from functools import partial
from functools import wraps
from typing import Callable
from typing import Dict
from typing import List
import click
import yaml
from flyingcircus.core import Resource
from flyingcircus.core import Stack
from flyingcircus.service.ssm import SSMParameter
from ssmash.config import InvalidatingConfigKey
from ssmash.converter import convert_hierarchy_to_ssm
from ssmash.invalidation import create_lambda_invalidation_stack
from ssmash.loader import EcsServiceInvalidator
from ssmash.loader import get_cfn_resource_from_options
from ssmash.util import clean_logical_name
from ssmash.yamlhelper import SsmashYamlLoader
# TODO move helper functions to another module
# TODO tests for helper functions
#: Prefix for specifying a CloudFormation import as a CLI parameter
CFN_IMPORT_PREFIX = "!ImportValue:"
@click.group("ssmash", chain=True, invoke_without_command=True, help=__doc__)
@click.option(
"-i",
"--input",
"--input-file",
"input_file",
type=click.File("r"),
default="-",
help="Where to read the application configuration YAML file",
)
@click.option(
"-o",
"--output",
"--output-file",
"output_file",
type=click.File("w"),
default="-",
help="Where to write the CloudFormation template file",
)
@click.option(
"--description",
type=str,
default="Application configuration",
help="The description for the CloudFormation stack.",
)
def run_ssmash(input_file, output_file, description: str):
pass
@run_ssmash.resultcallback()
def process_pipeline(processors, input_file, output_file, description: str):
# Create basic processor inputs
appconfig = _load_appconfig_from_yaml(input_file)
stack = _initialise_stack(description)
# Augment processing functions with default loader and writer
processors = (
[_create_ssm_parameters]
+ processors
+ [_create_embedded_invalidations]
+ [partial(_write_cfn_template, output_file)]
)
# Apply all chained commands
for processor in processors:
processor(appconfig, stack)
def appconfig_processor(func: Callable) -> Callable:
"""Decorator to convert a Click command into a custom processor for application configuration."""
@wraps(func)
def wrapper(*args, **kwargs):
def processor(appconfig: dict, stack: Stack):
try:
return func(appconfig, stack, *args, **kwargs)
except ValueError as ex:
raise click.UsageError(str(ex)) from ex
return processor
return wrapper
@run_ssmash.command(
"invalidate-ecs",
options_metavar="(--cluster-name|--cluster-import) CLUSTER "
"(--service-name|--service-import) SERVICE "
"(--role-name|--role-import) ROLE ",
)
@click.option(
"--cluster-name",
type=str,
default=None,
help="The cluster that contains the ECS Service to invalidate (as a name or ARN).",
metavar="ARN",
)
@click.option(
"--cluster-import",
type=str,
default=None,
help="Alternatively, specify the cluster as a CloudFormation import.",
metavar="EXPORT_NAME",
)
@click.option(
"--service-name",
type=str,
default=None,
help="The ECS Service that depends on this configuration (as a name or ARN).",
metavar="ARN",
)
@click.option(
"--service-import",
type=str,
default=None,
help=("Alternatively, specify the ECS Service as a CloudFormation export."),
metavar="EXPORT_NAME",
)
@click.option(
"--role-name",
type=str,
default=None,
help="The IAM role to use for invalidating this service (as an ARN).",
metavar="ARN",
)
@click.option(
"--role-import",
type=str,
default=None,
help="Alternatively, specify the IAM role as a CloudFormation export.",
metavar="EXPORT_NAME",
)
@appconfig_processor
def invalidate_ecs_service(
appconfig,
stack,
cluster_name,
cluster_import,
service_name,
service_import,
role_name,
role_import,
):
"""Invalidate the cache in an ECS Service that uses these parameters,
by restarting the service.
"""
invalidator = EcsServiceInvalidator(
cluster_name=cluster_name,
cluster_import=cluster_import,
service_name=service_name,
service_import=service_import,
role_name=role_name,
role_import=role_import,
)
all_parameters = [
r for r in stack.Resources.values() if isinstance(r, SSMParameter)
]
stack.merge_stack(
invalidator.create_resources(all_parameters).with_prefixed_names(
"InvalidateEcs"
)
)
@run_ssmash.command(
"invalidate-lambda",
options_metavar="(--function-name|--function-import) FUNCTION "
"(--role-name|--role-import) ROLE ",
)
@click.option(
"--function-name",
type=str,
default=None,
help="The Lambda Function to invalidate (as a name or ARN).",
metavar="ARN",
)
@click.option(
"--function-import",
type=str,
default=None,
help="Alternatively, specify the Lambda Function as a CloudFormation import.",
metavar="EXPORT_NAME",
)
@click.option(
"--role-name",
type=str,
default=None,
help="The IAM role to use for invalidating this Lambda (as an ARN).",
metavar="ARN",
)
@click.option(
"--role-import",
type=str,
default=None,
help=("Alternatively, specify the IAM role as a CloudFormation export."),
metavar="EXPORT_NAME",
)
@appconfig_processor
def invalidate_lambda(
appconfig, stack, function_name, function_import, role_name, role_import
):
"""Invalidate the cache in a Lambda Function that uses these parameters,
by restarting the Lambda Execution Context.
"""
# TODO be able to invalidate all lambda functions in an entire stack
# Unpack the resource references
function = get_cfn_resource_from_options("function", function_name, function_import)
role = get_cfn_resource_from_options("role", role_name, role_import)
# Use a custom Lambda to invalidate the function iff it's dependent resources
# have changed
stack.merge_stack(
create_lambda_invalidation_stack(
function=function,
dependencies=[
r for r in stack.Resources.values() if isinstance(r, SSMParameter)
],
role=role,
).with_prefixed_names("InvalidateLambda")
)
def _create_ssm_parameters(appconfig: dict, stack: Stack):
"""Create SSM parameters for every item in the application configuration"""
clean_config = dict(appconfig)
clean_config.pop(".ssmash-config", None)
stack.merge_stack(
convert_hierarchy_to_ssm(clean_config).with_prefixed_names("SSMParam")
)
def _create_embedded_invalidations(appconfig: dict, stack: Stack):
"""Invalidate the cache in applications that use some of these parameters
(by restarting the application), as specified by configuration embedded
inline in the input file.
"""
invalidatable_services = appconfig.get(".ssmash-config", {}).get("invalidations")
if not invalidatable_services:
return
clean_config = dict(appconfig)
clean_config.pop(".ssmash-config", None)
invalidated_resources = _get_invalidated_resources(clean_config)
for appname, appresources in invalidated_resources.items():
invalidator = invalidatable_services.get(appname)
if not invalidator:
# TODO this error message is a bit fragile
raise ValueError(
f"Parameter {appresources[0].Properties.Name} invalidates service {appname}, but that service is not defined."
)
stack.merge_stack(
invalidator.create_resources(appresources).with_prefixed_names(
"Invalidate" + clean_logical_name(appname)
)
)
def _get_invalidated_resources(appconfig: dict) -> Dict[str, List[Resource]]:
"""Lookup which applications are associated with resources.
Returns:
A dictionary of {application_name: [cfn_resource]}
"""
result = dict()
for key, value in appconfig.items():
if isinstance(key, InvalidatingConfigKey):
for appname in key.invalidated_applications:
result.setdefault(appname, []).extend(key.dependent_resources)
if isinstance(value, dict):
for appname, appresources in _get_invalidated_resources(value).items():
result.setdefault(appname, []).extend(appresources)
return result
def _initialise_stack(description: str) -> Stack:
"""Create a basic Flying Circus stack, customised for ssmash"""
stack = Stack(Description=description)
from ssmash import __version__
stack.Metadata["ssmash"] = {
"generated_timestamp": datetime.now(tz=timezone.utc).isoformat(),
"version": __version__,
}
return stack
def _load_appconfig_from_yaml(input) -> dict:
"""Load a YAML description of the application configuration"""
appconfig = yaml.load(input, SsmashYamlLoader)
# Note that PyYAML returns None for an empty file, rather than an empty
# dictionary
if appconfig is None:
appconfig = {}
return appconfig
def _write_cfn_template(output, appconfig: dict, stack: Stack):
"""Write the CloudFormation template"""
output.write(stack.export("yaml"))
if __name__ == "__main__":
sys.exit(run_ssmash())
| garyd203/ssmash | src/ssmash/cli.py | cli.py | py | 9,623 | python | en | code | 1 | github-code | 13 |
8851315914 | #aleart / Confirmation.
from selenium import webdriver
import time
driver=webdriver.Chrome(executable_path="C:\DRIVERS\chromedriver.exe")
driver.get("http://testautomationpractice.blogspot.com/")
driver.maximize_window()
driver.find_element_by_xpath("//button[contains(text(),'Click Me')]").click()
time.sleep(5)
x=driver.switch_to.alert
#x.accept()
x.dismiss()
driver.close()
| Basavakiran134/Simplilearn | PopUps.py | PopUps.py | py | 399 | python | en | code | 0 | github-code | 13 |
16879682787 | def replace_space(s, l):
new_s = ""
for i in range(l):
if s[i] != " ":
new_s += s[i]
else:
new_s += "%20"
return new_s
def reverse_remove_space(s, l):
ch_list = list(s)
new_i = len(ch_list)
for i in reversed(range(l)):
if ch_list[i] == " ":
ch_list[new_i - 3 : new_i] = "%20"
new_i -= 3
else:
ch_list[new_i - 1] = ch_list[i]
new_i -= 1
return "".join(ch_list)
if __name__ == "__main__":
s = "Mr John Smith "
l = 13
expected = "Mr%20John%20Smith"
result = replace_space(s, l)
print(result == expected)
ans = reverse_remove_space(s, l)
print(ans == expected) | melanietai/leetcode-practice | array_and_strings/replace_space.py | replace_space.py | py | 729 | python | en | code | 0 | github-code | 13 |
16349992190 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 26 16:51:38 2021
This is the code for tesing PCA(max in projection variance)
@author: Yingjian Song
"""
from Principle_component_analysis import Principle_Component_Analysis as PCA
import matplotlib.pyplot as plt
from sklearn import datasets
import numpy as np
# prepare the data
iris = datasets.load_iris()
X = iris.data
y = iris.target
names = iris.feature_names
labels = iris.target_names
# find new feature space for data
clf = PCA()
clf.fit(X)
# transform data to new feature space, only keep 2 features in this case
X_trans = clf.transform(X,2)
np.linalg.norm(X_trans[:,1])
#plot my PCA transformed data in 2 dimension
plt.figure(figsize=(8, 4))
plt.subplot(121)
plt.scatter(X_trans[y == 0, 0], X_trans[y == 0, 1], c='r')
plt.scatter(X_trans[y == 1, 0], X_trans[y == 1, 1], c='g')
plt.scatter(X_trans[y == 2, 0], X_trans[y == 2, 1], c='b')
plt.title('my PCA')
plt.xlabel('PCA_1')
plt.ylabel('PCA_2')
plt.legend(labels, loc='best', fancybox=True)
#compare with sklearn PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
sk_PCA = pca.fit_transform(X)
#plot sklearn transformed data in 2 dimension
plt.subplot(122)
plt.scatter(sk_PCA[y == 0, 0], sk_PCA[y == 0, 1], c='r')
plt.scatter(sk_PCA[y == 1, 0], sk_PCA[y == 1, 1], c='g')
plt.scatter(sk_PCA[y == 2, 0], sk_PCA[y == 2, 1], c='b')
plt.title('sklearn PCA')
plt.xlabel('PCA_1')
plt.ylabel('PCA_2')
plt.legend(labels, loc='best', fancybox=True) | syj63016/Machine-Learning | machine_learning/PCA/PCA_test.py | PCA_test.py | py | 1,474 | python | en | code | 2 | github-code | 13 |
26268807778 | #Circular Primes
import random
from itertools import permutations
def check_prime(n):
num_checks = 300
check_list = [2, 3, 5, 7, 11]
if n in check_list:
return True
for i in range(num_checks): # Fermat's Little Theorem
random_num = random.randint(2, n - 1)
if pow(random_num, n - 1, n) != 1:
return False
else:
return True
def rotation(n):
rotations = set()
for i in range(len(str(n))):
m = int(str(n)[i:] + str(n)[:i])
rotations.add(m)
return rotations
def circular_prime(n):
for i in rotation(n):
if check_prime(i) != True:
return False
return True
print(circular_prime(20))
| nezawr/ProjectEuler | Problem35.py | Problem35.py | py | 700 | python | en | code | 0 | github-code | 13 |
74252424978 |
OP_REQSIZE = [0, 0, 2, 2, 2, 2, 1, 0, 1, 0, 1, 0, 2, 0, 1, 0, 2, 2, 0, 1, 1, 0, 0, 2, 1, 0]
OP_STACKDEL = [0, 0, 2, 2, 2, 2, 1, 0, 1, 0, 1, 0, 2, 0, 1, 0, 2, 2, 0, 1, 1, 0, 0, 0, 0, 0]
OP_STACKADD = [0, 0, 1, 1, 1, 1, 0, 1, 2, 0, 0, 0, 1, 0, 0, 0, 1, 2, 0, 0, 0, 1, 1, 0, 0, 0]
VAL_QUEUE = 21
VAL_PORT = 27
STORAGE_COUNT = 28
# ㄱ
# ㄲ
OP_DIV = 2 # ㄴ
OP_ADD = 3 # ㄷ
OP_MUL = 4 # ㄸ
OP_MOD = 5 # ㄹ
OP_POP = 6 # ㅁ
OP_PUSH= 7 # ㅂ
OP_DUP = 8 # ㅃ
OP_SEL = 9 # ㅅ
OP_MOV = 10 # ㅆ
OP_NONE= 11 # ㅇ
OP_CMP = 12 # ㅈ
# ㅉ
OP_BRZ = 14
# ㅋ
OP_SUB = 16 # ㅌ
OP_SWAP= 17 # ㅍ
OP_HALT= 18 # ㅎ
## end of primitive
OP_POPNUM = 19
OP_POPCHAR = 20
OP_PUSHNUM = 21
OP_PUSHCHAR = 22
OP_BRPOP2 = -3 # special
OP_BRPOP1 = -2 # special
OP_JMP = -1 # special
OP_BRANCHES = [OP_BRZ, OP_BRPOP1, OP_BRPOP2]
OP_JUMPS = OP_BRANCHES + [OP_JMP]
OP_BINARYOPS = [OP_DIV, OP_ADD, OP_MUL, OP_MOD, OP_CMP, OP_SUB]
| Algy/aheui-cc | const.py | const.py | py | 910 | python | en | code | 25 | github-code | 13 |
71831387858 | #!/usr/bin/env python3
'''
Developed By: Dhanish Vijayan
Company: Elementz Engineers Guild Pvt Ltd
https://www.elementzonline.com/blog/running-mqtt-broker-in-raspberry-pi
addapted by David Torrens (https://github.com/grayerbeard/mqtt) based on info from
http://www.steves-internet-guide.com/into-mqtt-python-client/
Was revised to work with Python3
'''
import paho.mqtt.client as mqtt
from time import sleep as time_sleep
from utility import fileexists
from datetime import datetime
from sys import exit as sys_exit
from config import class_config
config = class_config()
if fileexists(config.config_filename):
print( "will try to read Config File : " ,config.config_filename)
config.read_file() # overwrites from file
else : # no file so file needs to be writen
config.write_file()
print("New Config File Made with default values, you probably need to edit it")
count = 0
# set latest_message global so that on_message can set it with latest info
global latest_msg
# The callback for when the client receives a CONNACK response from the server.
def on_connect(self, client, userdata, rc):
''' Result Codes
0: Connection successful
1: Connection refused – incorrect protocol version
2: Connection refused – invalid client identifier
3: Connection refused – server unavailable
4: Connection refused – bad username or password
5: Connection refused – not authorised
6-255: Currently unused.
Subscribing in on_connect() means that if we lose the connection and
reconnect then subscriptions will be renewed.'''
print("Connected with result code "+str(rc))
self.subscribe(config.topic)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global latest_msg
latest_msg = msg
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(config.broker_address, config.broker_port, 60)
# Set up the client to keep listening
client.loop_start()
try:
# Loop at a slower rate that the new messages are published and print latest info
while True:
count += 1
time_sleep(1.33 * config.scan_delay)
print("My Loop Count (every " + str(1.33 * config.scan_delay) + " seconds) : " + str(count)
+ " Message Received on Topic \"" + config.topic + "\" was >" + latest_msg.payload.decode() + "<")
except KeyboardInterrupt:
print(".........Ctrl+C pressed... I am stopping")
print("Last Message Received on Topic : \"" + config.topic + "\" was >" + latest_msg.payload.decode() + "<")
client.loop_stop()
time_sleep(2.5)
sys_exit()
| grayerbeard/mqtt | test_subscribe.py | test_subscribe.py | py | 2,565 | python | en | code | 0 | github-code | 13 |
15271436799 | from utils import *
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from collections import Counter
sns.set(style="darkgrid")
save_name = "log/histogram.png"
def create_entity_dicts(all_tuples):
e1_to_multi_e2 = {}
e2_to_multi_e1 = {}
e12_to_multi_r={}
ent_set=set()
ent_to_freq={}
for tup in all_tuples:
e1, rel, e2 = tup
ent_set.add(e1)
ent_set.add(e2)
if e1 not in ent_to_freq:
ent_to_freq[e1] = 1
else:
ent_to_freq[e1] +=1
if e2 not in ent_to_freq:
ent_to_freq[e2] =1
else:
ent_to_freq[e2] +=1
if (e1, rel) in e1_to_multi_e2:
e1_to_multi_e2[(e1, rel)].append(e2)
else:
e1_to_multi_e2[(e1, rel)] = [e2]
if (e2, rel) in e2_to_multi_e1:
e2_to_multi_e1[(e2, rel)].append(e1)
else:
e2_to_multi_e1[(e2, rel)] = [e1]
if (e1, e2) in e12_to_multi_r:
e12_to_multi_r[(e1, e2)].append(rel)
else:
e12_to_multi_r[(e1, e2)]= [rel]
print("triple_num: {}, entity_num: {}".format(len(all_tuples), len(ent_set)))
count_1_1=0
count_1_n=0
count_n_1=0
count_n_n=0
for k, e2s in e1_to_multi_e2.items():
if len(e2s)==1:
count_1_1 +=1
if len(e2s)>1:
#print(k, e2s)
count_1_n +=1
print("e1_to_multi_e2. count_1_1: {}, count_1_n: {}".format(count_1_1, count_1_n))
count_1_1=0
for k, e1s in e2_to_multi_e1.items():
if len(e1s)==1:
count_1_1 +=1
if len(e1s)>1:
#print(k, e2s)
count_n_1 +=1
print("e2_to_multi_e1. count_1_1: {}, count_n_1: {}".format(count_1_1, count_n_1))
count_1_1=0
for e12, rs in e12_to_multi_r.items():
if len(rs)==1:
count_1_1 +=1
if len(rs)>1:
print(e12, rs)
count_n_n +=1
print("e12_to_multi_r. count_1_1: {}, count_n_n: {}".format(count_1_1, count_n_n))
print('-------')
#print("e1_to_multi_e2: {}".format( e1_to_multi_e2))
#print("e2_to_multi_e1: {}".format( e2_to_multi_e1))
return e1_to_multi_e2, e2_to_multi_e1, ent_to_freq, ent_set
def count_ent_degree(triples, is_sorted=False):
ent_degree = {}
for (h, _, t) in triples:
degree = 1
if h in ent_degree:
degree += ent_degree[h]
ent_degree[h] = degree
degree = 1
if t in ent_degree:
degree += ent_degree[t]
ent_degree[t] = degree
if is_sorted:
ent_degree = sorted(ent_degree.items(), key=lambda d: d[1], reverse=True)
return {e:c for (e, c) in ent_degree}
return ent_degree
def filter_pairs_by_degree_interval(pair_degree, degree_interval):
pair_set = set()
for pair, degree in pair_degree.items():
if degree_interval[0] <= degree < degree_interval[1]:
pair_set.add(pair)
return pair_set
def gold_standard_compare(gold_set, exp_set):
right_set = gold_set & exp_set
print(len(right_set), len(exp_set), len(gold_set))
if len(right_set) == 0:
return 0, 0, 0
p = len(right_set) / len(exp_set)
r = len(right_set) / len(gold_set)
f1 = 2*p*r / (p+r)
return p, r, f1
def count_pair_degree(ent_degree_1, ent_degree_2, links):
pair_degree = {}
for (e1, e2) in links:
pair_degree[(e1, e2)] = (ent_degree_1[e1] + ent_degree_2[e2]) / 2
return pair_degree
def get_ent_freq(ent_to_freq, ent_set, freq_threshold=250):
subset_ent_to_freq = Counter()
for ent in ent_set:
if ent in ent_to_freq:
freq = ent_to_freq[ent]
if freq>=250:
freq=250
subset_ent_to_freq[ent] = freq
#print("subset_ent_to_freq: {}".format(subset_ent_to_freq))
return subset_ent_to_freq
def get_freq_distribution(data):
cnt = Counter()
for (k,v) in data.items():
cnt[v] += 1
return cnt
def calcualte_data_freq(dataset):
data_folder = '../../../datasets/'+dataset+'/'
print("dataset: {}".format(data_folder))
rel_triples_1, _, _ = read_relation_triples(data_folder + 'rel_triples_1')
rel_triples_2, _, _ = read_relation_triples(data_folder + 'rel_triples_2')
rel_triples_overlap12, _, _ = read_relation_triples(data_folder + 'rel_triples_overlap12')
rel_triples_valid, _, _ = read_relation_triples(data_folder + 'rel_triples_valid')
rel_triples_test, _, _ = read_relation_triples(data_folder + 'rel_triples_test')
_, _, ent_to_freq1, ent_set1 = create_entity_dicts(rel_triples_1|rel_triples_valid|rel_triples_test)
_, _, ent_to_freq2, ent_set2 = create_entity_dicts(rel_triples_2|rel_triples_valid|rel_triples_test)
_, _, ent_to_freq12, ent_set12 = create_entity_dicts(rel_triples_overlap12)
_, _, ent_to_freq_valid, ent_set_valid = create_entity_dicts(rel_triples_valid)
_, _, ent_to_freq_test, ent_set_test = create_entity_dicts(rel_triples_test)
#freq on whole kg1
#ent_set_valid_freq = get_ent_freq(ent_to_freq1, ent_set_valid)
#ent_set_test_freq = get_ent_freq(ent_to_freq1, ent_set_test)
def _get_freq_distribution(ent_to_freq_all):
ent_set_valid_freq = get_ent_freq(ent_to_freq_all, ent_set_valid)
ent_set_test_freq = get_ent_freq(ent_to_freq_all, ent_set_test)
cnt_valid = get_freq_distribution(ent_set_valid_freq)
cnt_test = get_freq_distribution(ent_set_test_freq)
return cnt_valid, cnt_test
cnt_valid12, cnt_test12 = _get_freq_distribution(ent_to_freq12)
cnt_valid1, cnt_test1 = _get_freq_distribution(ent_to_freq1)
cnt_valid2, cnt_test2 = _get_freq_distribution(ent_to_freq2)
#def _plot_freq_distribution(ent_to_freq_all , title, save_name):
f, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(6, 1, figsize=(64, 32))
all_keys = set(cnt_test12.keys())|set(cnt_valid12.keys())| set(cnt_test1.keys())|set(cnt_valid1.keys())|set(cnt_test2.keys())|set(cnt_valid2.keys())
def _merge_keys(keys, dic):
for k in keys:
if k not in dic:
dic[k]=0
return dic
cnt_valid12 = _merge_keys(all_keys, cnt_valid12)
cnt_valid1 = _merge_keys(all_keys, cnt_valid1)
cnt_valid2 = _merge_keys(all_keys, cnt_valid2)
cnt_test12 = _merge_keys(all_keys, cnt_test12)
cnt_test1 = _merge_keys(all_keys, cnt_test1)
cnt_test2 = _merge_keys(all_keys, cnt_test2)
d = {"entity_freq": list(all_keys),
"t-count-12": list(cnt_test12.values()),
"v-count-12": list(cnt_valid12.values()),
"t-count-1": list(cnt_test1.values()),
"v-count-1": list(cnt_valid1.values()),
"t-count-2": list(cnt_test2.values()),
"v-count-2": list(cnt_valid2.values()),
}
df = pd.DataFrame(data=d)
ax1 = sns.barplot(x="entity_freq", y="v-count-12", data=df, ax=ax1)
ax1.set_xlabel("Valid_Entity_Freq_on_OverlapKG" )
ax1.set_ylabel("Entity-Num")
ax1.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax1.xaxis.set_major_formatter(ticker.ScalarFormatter())
ax2 = sns.barplot(x="entity_freq", y="t-count-12", data=df, ax=ax2)
ax2.set_xlabel("Test_Entity_Freq_on_OverlapKG" )
ax2.set_ylabel("Entity-Num" )
ax2.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax2.xaxis.set_major_formatter(ticker.ScalarFormatter())
#ax2.set_title(title)
#ax2 = sns.relplot(x="entity_freq", y="t-count", kind="line", data=df)
ax3 = sns.barplot(x="entity_freq", y="v-count-1", data=df, ax=ax3)
ax3.set_xlabel("Valid_Entity_Freq_on_ConceptNet" )
ax3.set_ylabel("Entity-Num-on-ConceptNet")
ax3.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax3.xaxis.set_major_formatter(ticker.ScalarFormatter())
ax4 = sns.barplot(x="entity_freq", y="t-count-1", data=df, ax=ax4)
ax4.set_xlabel("Test_Entity_Freq_on_ConceptNet" )
ax4.set_ylabel("Entity-Num" )
ax4.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax4.xaxis.set_major_formatter(ticker.ScalarFormatter())
ax5 = sns.barplot(x="entity_freq", y="v-count-2", data=df, ax=ax5)
ax5.set_xlabel("Valid_Entity_Freq_on_SWOW" )
ax5.set_ylabel("Entity-Num")
ax5.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax5.xaxis.set_major_formatter(ticker.ScalarFormatter())
ax6 = sns.barplot(x="entity_freq", y="t-count-2", data=df, ax=ax6)
ax6.set_xlabel("Test_Entity_Freq_on_SWOW" )
ax6.set_ylabel("Entity-Num" )
ax6.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax6.xaxis.set_major_formatter(ticker.ScalarFormatter())
plt.tight_layout()
save_name = 'log/{}_valid_test_ent_freq_distributions.png'.format(dataset)
plt.savefig(save_name, format='png')
print("save {}".format(save_name))
def calcualte_data_degree(dataset):
#ent_set_valid_freq = get_ent_freq(ent_to_freq2, ent_set_valid)
#ent_set_test_freq = get_ent_freq(ent_to_freq2, ent_set_test)
#ent_degree_1 = count_ent_degree(rel_triples_1, is_sorted=True)
#ent_degree_2 = count_ent_degree(rel_triples_2, is_sorted=True)
#ent_degree_valid = count_ent_degree(rel_triples_valid, is_sorted=True)
#ent_degree_test = count_ent_degree(rel_triples_test, is_sorted=True)
#cnt_valid = get_freq_distribution(ent_degree_valid)
#cnt_test = get_freq_distribution(ent_degree_test)
#average_count = sum(np.array(list(cnt.keys()))*np.array(list(cnt.values())))/ sum(np.array(list(cnt.values())))
#rint("average_degree: {}. cnt: {}".format( average_degree, cnt))
f, (ax1, ax2) = plt.subplots(2, 1, figsize=(7, 5), sharex=True)
keys = set(cnt_test.keys())|set(cnt_valid.keys())
for k in keys:
if k not in cnt_test:
cnt_test[k]=0
if k not in cnt_valid:
cnt_valid[k]=0
d = {"test_entity_degree":list(cnt_test.keys()), "t-count": list(cnt_test.values()),
"valid_entity_degree":list(cnt_valid.keys()), "v-count": list(cnt_valid.values())}
df = pd.DataFrame(data=d)
ax2 = sns.barplot(x="test_entity_degree", y="t-count", data=df, ax=ax2)
ax2.set_ylabel("Entity-Num")
df = pd.DataFrame(data=d)
ax1 = sns.barplot(x="valid_entity_degree", y="v-count", data=df, ax=ax1)
ax1.set_ylabel("Entity-Num")
plt.savefig('log/{}_valid_test_ent_degree_distribution.png'.format(dataset), format='png')
#ent_links_valid = read_links(data_folder+'ent_links_valid')
#pair_degree_gold = count_pair_degree(ent_degree_1, ent_degree_2, ent_links_valid)
#print("pari_degree_valid:{}".format(pair_degree_gold))
#ent_links = read_links(data_folder+'/'+'ent_links_test')
#pair_degree_gold = count_pair_degree(ent_degree_1, ent_degree_2, ent_links)
#print("pari_degree_test:{}".format(pair_degree_gold))
def run(dataset, data_split, method, degree_interval=None):
if degree_interval is None:
degree_interval = [1, 6, 11, 16, 21, 1000000]
data_folder = '../../../datasets/'+dataset+'/'
result_folder = '../../../output/results/'+method+'/'+dataset+'/'+data_split+'/'
result_folder += list(os.walk(result_folder))[0][1][0] + '/'
assert os.path.exists(result_folder)
assert os.path.exists(data_folder)
rel_triples_1, _, _ = read_relation_triples(data_folder + 'rel_triples_1')
rel_triples_2, _, _ = read_relation_triples(data_folder + 'rel_triples_2')
rel_triples_valid, _, _ = read_relation_triples(data_folder + 'rel_triples_valid')
rel_triples_test, _, _ = read_relation_triples(data_folder + 'rel_triples_test')
ent_degree_1 = count_ent_degree(rel_triples_1)
ent_degree_2 = count_ent_degree(rel_triples_2)
ent_degree_valid = count_ent_degree(rel_triples_valid)
ent_degree_test = count_ent_degree(rel_triples_test)
ent_links = read_links(data_folder+'/'+data_split+'/'+'ent_links_test')
pair_degree_gold = count_pair_degree(ent_degree_1, ent_degree_2, ent_links)
ent_links_valid = read_links(data_folder+'/'+data_split+'/'+'ent_links_valid')
pair_degree_gold = count_pair_degree(ent_degree_1, ent_degree_2, ent_links_valid)
#id_ent_dict_1, id_ent_dict_2 = id2ent_by_ent_links_index(ent_links)
#aligned_ent_id_pair_set = read_alignment_results(result_folder+'alignment_results_12')
#aligned_ent_pair_set = set([(id_ent_dict_1[e1], id_ent_dict_2[e2]) for (e1, e2) in aligned_ent_id_pair_set])
#pair_degree_exp = count_pair_degree(ent_degree_1, ent_degree_2, aligned_ent_pair_set)
#pairs_gold = filter_pairs_by_degree_interval(pair_degree_gold, [1, 1000000])
#pairs_exp = filter_pairs_by_degree_interval(pair_degree_exp, [1, 1000000])
#p, r, f1 = gold_standard_compare(pairs_gold, pairs_exp)
#print('[%d, %d): [P, R, F1] = [%.4f, %.4f, %.4f]' % (1, 1000000, p, r, f1))
#f1s = []
#ps = []
#rs = []
#for i in range(len(degree_interval)-1):
# pairs_gold = filter_pairs_by_degree_interval(pair_degree_gold, [degree_interval[i], degree_interval[i+1]])
# pairs_exp = filter_pairs_by_degree_interval(pair_degree_exp, [degree_interval[i], degree_interval[i+1]])
# p, r, f1 = gold_standard_compare(pairs_gold, pairs_exp)
# print('[%d, %d): [P, R, F1] = [%.4f, %.4f, %.4f]' % (degree_interval[i], degree_interval[i+1], p, r, f1))
# f1s.append(f1)
# ps.append(p)
# rs.append(r)
#return ps, rs, f1s
if __name__ == '__main__':
#dataset = 'DBP_en_DBP_fr_15K_V1'
datasets = ['C_S_V0']
for dataset in datasets:
#calcualte_data_degree(dataset)
calcualte_data_freq(dataset)
#data_split = '721_5fold/1'
#p_r_f1 = 'r'
#methods = ['MTransE', 'IPTransE', 'JAPE', 'KDCoE', 'BootEA', 'GCN_Align', 'AttrE', 'IMUSE', 'SEA', 'RSN4EA',
# 'MultiKE', 'RDGCN']
#res = [[0 for i in range(len(methods))] for j in range(4)]
#cnt = 0
#for method in methods:
# ps, rs, f1s = run(dataset, data_split, method, degree_interval=[1, 6, 11, 16, 1000000])
# results = ps
# if p_r_f1 == 'r':
# results = rs
# elif p_r_f1 == 'f1':
# results = f1s
# res[0][cnt] = results[0]
# res[1][cnt] = results[1]
# res[2][cnt] = results[2]
# res[3][cnt] = results[3]
# cnt += 1
#for i in range(4):
# output = ''
# for j in range(len(methods)):
# output += str(res[i][j])
# if j != len(methods) - 1:
# output += '\t'
# print(output)
| ChunhuaLiu596/CommonsenseKG | OpenEA/run/statistics/degree_interval.py | degree_interval.py | py | 14,521 | python | en | code | 2 | github-code | 13 |
30839852232 | import albumentations as albu
import segmentation_models_pytorch as smp
logs_path = '/wdata/segmentation_logs/'
folds_file = '/wdata/folds.csv'
load_from = '/wdata/segmentation_logs/fold_1_siamse-senet154/checkpoints/best.pth'
multiplier = 5
main_metric = 'dice'
minimize_metric = False
device = 'cuda'
val_fold = 1
folds_to_use = (2, 3, 4, 5, 6, 7, 8)
n_classes = 3
input_channels = 3
crop_size = (320, 320)
val_size = (1024, 1024)
batch_size = 4
num_workers = 4
val_batch_size = 1
shuffle = True
lr = 1e-4
momentum = 0.0
decay = 0.0
loss = 'focal_dice'
optimizer = 'adam_gcc'
fp16 = False
alias = 'fold_'
model_name = 'siamse-senet154'
scheduler = 'steps'
steps = [15, 25]
step_gamma = 0.25
augs_p = 0.5
best_models_count = 5
epochs = 30
weights = 'imagenet'
limit_files = None
preprocessing_fn = smp.encoders.get_preprocessing_fn('senet154', weights)
train_augs = albu.Compose([albu.OneOf([albu.RandomCrop(crop_size[0], crop_size[1], p=1.0)
], p=1.0),
albu.Flip(p=augs_p),
albu.RandomBrightnessContrast(brightness_limit=0.1, contrast_limit=0.1, p=augs_p),
], p=augs_p)
valid_augs = albu.Compose([albu.PadIfNeeded(min_height=val_size[0], min_width=val_size[1], p=1.0)]) | chenliang1111CL/change-detection | SpaceNet7_Multi-Temporal_Solutions-master/5-MaksimovKA/code/config.py | config.py | py | 1,299 | python | en | code | 0 | github-code | 13 |
24770375449 | """
Calculate the RDF M - O RDF from a trajectory given as xyzs. The metal atom must be the first in the file
first argument is the filename the second is the box size in angstroms
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import argparse
import crdfgen
from scipy.integrate import trapz
mpl.rcParams['figure.dpi'] = 200
mpl.rcParams['axes.titlesize'] = 24
mpl.rcParams['axes.labelsize'] = 13
mpl.rcParams['lines.linewidth'] = 1
mpl.rcParams['lines.markersize']= 5
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
mpl.rcParams['axes.linewidth'] = 1
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("filename", action='store', type=str, help='Trajectory filename (.xyz,)')
parser.add_argument('elem_1', action='store', type=str, help='Atomic symbol of element 1')
parser.add_argument('elem_2', action='store', type=str, help='Atomic symbol of element 2')
parser.add_argument('-p', '--plot', action='store_true', default=False, help='Generate a RDF plot')
parser.add_argument('-l', '--boxlength', action='store', type=float, help='Length of the box in Å')
parser.add_argument('-f', '--firstframe', action='store', type=int, default=0,
help='First frame of the trajectory to calculate the RDF from')
parser.add_argument('-w', '--binwidth', action='store', type=float, help='Size of the bins to plot the RDF in')
return parser.parse_args()
def get_n_atoms(traj_file_lines):
"""From the trajectory file lines extract the number of atoms, which should be the first item in the first line"""
try:
return int(traj_file_lines[0].split()[0])
except ValueError:
exit('It looks like the trajectory is malformatted')
def get_elem1_elem2_ids(xyzs, elem1_name, elem2_name):
"""
From a set of xyzs get the indexes of element 1 and element 2 as two lists
:param xyzs: (list(list))
:param elem1_name: (str)
:param elem2_name: (str)
:return: (list, list)
"""
elem1_ids, elem2_ids = [], []
for i, xyz in enumerate(xyzs):
if xyz[0].lower() == elem1_name.lower():
elem1_ids.append(i)
if xyz[0].lower() == elem2_name.lower():
elem2_ids.append(i)
return elem1_ids, elem2_ids
def get_rdf_arrays(xyz_traj_filename, elem1, elem2, box_length, bin_size, first_frame):
"""
From an MD(/MC) trajectory filename compute the radial distribution function (RDF) from elem1–elem2 e.g. Pd–O.
Note the whole file will be read into memory, which may be slow/impossible if the trajectory is large
:param xyz_traj_filename: (str)
:param elem1: (str)
:param elem2: (str)
:param box_length: (float) Å
:param bin_size: (float) Å
:param first_frame: (int) first frame of the trajectory to read
:return:
"""
try:
box_length = float(box_length)
bin_size = float(bin_size)
except ValueError:
exit('Box length and bin size MUST be numbers')
if not os.path.exists(xyz_traj_filename):
exit(f'Could not open {xyz_traj_filename}. Please make sure it exists')
traj_file_lines = open(xyz_traj_filename, 'r').readlines()
n_atoms = get_n_atoms(traj_file_lines)
# Iterate from the first frame
n = first_frame
# Set up the lists of the bin edges and the total frequency of atoms found
n_bins, bin_edges = int(box_length / (2 * bin_size)), None
cummulative_hist = np.zeros(n_bins)
while n*(n_atoms + 2) < len(traj_file_lines):
xyzs = []
for line in traj_file_lines[2 + n * (n_atoms + 2):n_atoms + 2 + n * (n_atoms + 2)]:
atom_label, x, y, z = line.split()[:4]
xyzs.append([atom_label, float(x), float(y), float(z)])
# If the first trajectory point, get the element ids of elem1 and elem2 which should not change
if n == first_frame:
elem1_ids, elem2_ids = get_elem1_elem2_ids(xyzs, elem1_name=elem1, elem2_name=elem2)
# Use Cython extension to construct the supercell and calculate the distances between elem1 – elem2
dists = crdfgen.get_distances(xyzs, elem1_ids, elem2_ids, box_length)
# Histogram the distances
hist, bin_edges = np.histogram(dists, bins=n_bins, range=(0.0, box_length / 2.0))
cummulative_hist += np.array(hist)
n += 1
average_hist = cummulative_hist / (n - first_frame)
# Frequencies and bin edges -> r and densities for plotting p(r) vs r
r_vals = [(bin_edges[i] + bin_edges[i + 1]) / 2.0 for i in range(len(bin_edges) - 1)]
# Divide the frequency by the volume between r and r + dr to get the density
rho_vals = [average_hist[i] / (4.0 / 3.0 * np.pi * (bin_edges[i+1] ** 3 - bin_edges[i] ** 3)) for i in
range(len(average_hist))]
# Normalise by the total density to get the pair correlation function
total_density = sum(average_hist) / (4.0 / 3.0 * np.pi * bin_edges[-1]**3)
g_vals = np.array(rho_vals) / total_density
return r_vals, g_vals, total_density
def get_int_r(gs, rho, rs):
"""Get the integral of the pair correlation function, as a function of distance"""
integrals = []
integrand = [rs[i]**2 * gs[i] for i in range(len(rs))]
for i in range(len(rs)):
integral = rho * 4 * np.pi * trapz(integrand[:i], rs[:i])
integrals.append(integral)
return integrals
def main():
args = get_args()
rs, gs, rho = get_rdf_arrays(xyz_traj_filename=args.filename, elem1=args.elem_1, elem2=args.elem_2,
box_length=args.boxlength, bin_size=args.binwidth, first_frame=args.firstframe)
if args.plot:
fig, ax = plt.subplots()
ax.plot(rs, gs, lw=1.5)
ax.set_ylabel('$g(r)$')
ax2 = ax.twinx()
ax2.plot(rs, get_int_r(gs, rho, rs), ls='--', c='k')
ax2.set_ylabel('int($g(r)$)')
ax2.set_ylim(-0.05, 10.0)
ax.set_xlabel(f'$r$({args.elem_1}–{args.elem_2}) / Å')
ax.set_xlim(0.0, args.boxlength/2.0)
ax.set_ylim(-0.01, 2.0)
plt.savefig('rdf.png', dpi=300)
print('r: ', [np.round(r, 4) for r in rs], sep='\n')
print()
print('g(r): ', [np.round(d, 4) for d in gs], sep='\n')
return None
if __name__ == '__main__':
main()
| t-young31/MDutils | MDutils/rdfgen.py | rdfgen.py | py | 6,512 | python | en | code | 1 | github-code | 13 |
26863184795 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('countries', '0004_auto_20150903_0156'),
]
operations = [
migrations.AlterField(
model_name='chart',
name='segments',
field=models.PositiveIntegerField(null=True, choices=[(3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12')], blank=True),
),
migrations.AlterField(
model_name='country',
name='code',
field=models.CharField(max_length=3, verbose_name='Country code 3-char', unique=True),
),
]
| sentinel-project/sentinel-app | sentinel/countries/migrations/0005_auto_20150905_1915.py | 0005_auto_20150905_1915.py | py | 740 | python | en | code | 0 | github-code | 13 |
35441253060 | # minzhou@bu.edu
def sort_in_place(arr):
n = len(arr)
for i in range(n):
for j in range(0, n - i - 1):
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
def move_left(arr):
n = len(arr)
i = -1
for j in range(n):
if (arr[j] > 0):
i += 1
# swapping of arr
arr[i], arr[j] = arr[j], arr[i]
def main():
arr = [2, 6, -6, -3, 1, 19]
print('Original array is:', arr)
sort_in_place(arr)
move_left(arr)
print('Sorted array is:', arr)
main() | minzhou1003/intro-to-programming-using-python | practice7/additional_problem5.py | additional_problem5.py | py | 586 | python | en | code | 0 | github-code | 13 |
11586760774 | from mas.psf_generator import PhotonSieve, PSFs
from mas.forward_model import add_noise, get_measurements
from mas.data import strands
from mas.measure import compare_ssim
from bayes_opt import BayesianOptimization
from mas.deconvolution import ista
import numpy as np
from matplotlib import pyplot as plt
# %% problem -----
truth = strands[0:1]
ps = PhotonSieve()
wavelengths = np.array([33.4e-9, 40e-9])
psfs = PSFs(
ps,
source_wavelengths=wavelengths,
measurement_wavelengths=wavelengths,
num_copies=10
)
measured = get_measurements(psfs=psfs, sources=truth, real=True)
measured = add_noise(measured, model='poisson', max_count=10)
def cost(lam_exp, time_step_exp):
recon = ista(
psfs=psfs,
measurements=measured,
lam=10**lam_exp,
time_step=10**time_step_exp,
iterations=100
)[0]
cost = compare_ssim(
truth[0],
recon
)
plt.subplot(1, 3, 3)
plt.title('Reconstruction - SSIM {:.3f}'.format(cost))
plt.imshow(recon)
plt.axis('off')
plt.xlabel('lam_exp={:.3f}\ntime_step_exp={:.3f}')
plt.show()
plt.pause(.05)
return cost if cost > 0 else 0
# %% optimization -----
# Bounded region of parameter space
pbounds = {'lam_exp': (-6, -3), 'time_step_exp':(-5, -2)}
optimizer = BayesianOptimization(
cost,
pbounds=pbounds,
random_state=1,
)
# %% optimize -----
plt.figure(figsize=(8,3))
plt.subplot(1, 3, 1)
plt.title('Truth')
plt.imshow(truth[0])
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('Measured')
plt.imshow(measured[0])
plt.axis('off')
plt.pause(.05)
try:
np.seterr(all='ignore')
optimizer.maximize(
acq='ei',
# kappa=0.1,
init_points=10,
n_iter=25,
)
# plot scatterplot of previous trials on keyboard interrupt
except KeyboardInterrupt:
pass
plt.figtext(0.02, 0.02, str(optimizer.max), fontsize='xx-small')
plt.savefig('ista_best.png', dpi=300)
plt.figure()
searched = np.array([
[
x['params']['lam_exp'],
x['params']['time_step_exp'],
x['target']
] for x in optimizer.res
])
plt.tricontourf(searched[:, 0], searched[:, 1], searched[:, 2])
plt.plot(searched[:, 0], searched[:, 1], 'ko', ms=3)
plt.xlabel('lam_exp')
plt.ylabel('time_step_exp')
plt.tight_layout()
plt.savefig('ista_search.png', dpi=300)
| UIUC-SINE/old_website | content/reports/csbs_4modes/bayesian_optimization/bayesian_ista.py | bayesian_ista.py | py | 2,349 | python | en | code | 1 | github-code | 13 |
9494269054 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from enum import Enum, unique
"""
-------------------------------------------------------
|value | observerTypeEnum |
-------------------------------------------------------
| "*" | observerTypeEnum.state |
-------------------------------------------------------
| "whatever" | observerTypeEnum.element |
-------------------------------------------------------
| ["str","str", ...] | observerTypeEnum.listOfElements|
-------------------------------------------------------
"""
@unique
class observerTypeEnum(Enum):
unknown = 0
state = 1
element = 2
listOfElements = 3
@classmethod
def typeOf(cls, what):
"""
Return the type of the observer
:param str|Array what: a string to evaluate
:return: the type of the string
:rtype: observerTypeEnum
"""
result = observerTypeEnum.unknown
if isinstance(what, str):
result = (observerTypeEnum.state
if (what == "*")
else observerTypeEnum.element)
elif hasattr(what, "__len__"):
result = observerTypeEnum.listOfElements
return result
| fredericklussier/ObservablePy | observablePy/ObserverTypeEnum.py | ObserverTypeEnum.py | py | 1,298 | python | en | code | 0 | github-code | 13 |
70303036497 | import random
import math
# removed the menu stuff because I want to just work these exercises and get to my own projects for now.
# this is more of my own learning practice and I'll be adding in some more complex stuff like unit testing
# print statements (yes, I did look up redirecting sys.stdout and using doctest), but then I'll be working that
# when I get in to building out network stacks from RFCs since I feel comfortable in that domain.
answers = ["It is decidely so.", "Yes.", "The answer is certain.",
"Maybe", "It is uncertain, try again later.", "The future is cloudy",
"No", "Definitely not."]
def magic(question):
if len(question) <= 0:
raise ValueError("Invalid question, try again.")
if not question[-1] == '?':
raise ValueError("That is not a question")
return answers[math.floor((random.random() * len(answers)))]
if __name__=="__main__":
print(magic("Hello?")) | marinme/learning | Magic 8 Ball/magic.py | magic.py | py | 940 | python | en | code | 0 | github-code | 13 |
9036791408 | from odoo import models, fields, api
class Vehicle(models.Model):
_name = "vehicle"
_inherit = ['mail.thread','mail.activity.mixin']
_description = "Mantenimiento de vehiculos"
tipo_vehiculo = fields.Selection([
('50 pasajeros', '50 Pasajeros'),
('30 pasajeros', '30 Pasajeros'),
('15 pasajeros', '15 Pasajeros'),
('5 pasajeros', '5 Pasajeros'),
], string = 'Cantidad de pasajeros', default='other', tracking=True, required = True)
brand = fields.Char(string = "Marca", required = True)
car_model = fields.Char(string = "Modelo", required = True)
year = fields.Date(string = "Año", required = True)
plate_number = fields.Char(string = "Placa", required = True)
image = fields.Binary(string = "Imagen")
state = fields.Selection([
('disponible', 'Disponible'),
('reservado', 'Reservado'),
], string='States', required=True, readonly=True, copy=False,
tracking=True, default='disponible')
def button_disponible(self):
self.state = 'disponible'
def button_reservado(self):
self.state = 'reservado' | OnilyValera/Transportation-Odoo | transportacion/models/vehicle.py | vehicle.py | py | 1,162 | python | en | code | 1 | github-code | 13 |
12832534745 | #Realizar un programa que sea capaz de convertir los grados centígrados en grados Farenheit y viceversa.
celsius=0
farenheit=0
#Pedir grados celsius
celsius=float(input("Introduce los grados celsius:"))
#Realizar la operación
farenheit=1.8*celsius+32
#Dar el resultado
print("El resultado es",farenheit, " grados Farenheit") | Jorgediiazz/EjerciciosPython | ej2.py | ej2.py | py | 328 | python | es | code | 0 | github-code | 13 |
70196268177 | from selenium import webdriver
import time
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException, ElementClickInterceptedException
# log in w/ facebook, so we need your fb credentials here
EMAIL = "YOUR_FB_EMAIL"
PWD = "YOUR_FB_PWD"
TINDER_URL = "https://tinder.com/"
chrome_driver_path = "C:\Development\chromedriver.exe"
driver = webdriver.Chrome(executable_path=chrome_driver_path)
driver.get(TINDER_URL)
time.sleep(4)
driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div/div/header/div/div[2]/div[2]/button').click()
time.sleep(4)
driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div[1]/div/div[3]/span/div[2]/button').click()
time.sleep(5)
base_window = driver.window_handles[0]
fb_login_window = driver.window_handles[1]
driver.switch_to.window(fb_login_window)
print(driver.title)
email = driver.find_element_by_id("email")
pwd = driver.find_element_by_id("pass")
email.send_keys(EMAIL)
pwd.send_keys(PWD)
pwd.send_keys(Keys.ENTER)
time.sleep(5)
driver.switch_to.window(base_window)
print(driver.title)
# - Click ALLOW for location.
driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/button[1]').click()
time.sleep(2)
# - Click NOT INTERESTED for notifications.
driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/button[1]').click()
time.sleep(2)
# - Click I ACCEPT for cookies
driver.find_element_by_xpath('//*[@id="content"]/div/div[2]/div/div/div[1]/button').click()
time.sleep(5)
# max of 100 swipes a day on Free tier
for n in range(100):
try:
driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div[1]/div[1]/div[2]/div[4]/button').click()
#Catches the cases where there is a "Matched" pop-up in front of the "Like" button:
except ElementClickInterceptedException:
try:
match_popup = driver.find_element_by_css_selector(".itsAMatch a")
match_popup.click()
#Catches the cases where the "Like" button has not yet loaded, so wait 2 seconds before retrying.
except NoSuchElementException:
time.sleep(2)
| erinfeaser311/automated-tinder-swipe-right-bot | main.py | main.py | py | 2,192 | python | en | code | 0 | github-code | 13 |
4682819282 | # 4
# 0 0 0 0
# 0 1 0 0
# 0 0 0 1
# 1 0 0 0
grid = []
rows = int(input())
for _ in range(rows):
grid.append(list(map(int,input().split())))
dp = [[0]*rows]*rows
n = rows-1
for i in range(n,-1,-1):
for j in range(n,-1,-1):
if (i==j and i==n):
dp[i][j] = 1
else:
option1 = 0 if i==n else dp[i+1][j]
option2 = 0 if j==n <rows else dp[i][j+1]
dp[i][j] = option1+option2
if grid[i][j]==1:
dp[i][j]=0
if grid[n][n]==1:
print(0)
else:
print(dp[0][0])
| SatyasaiNandigam/competitive-coding-solutions | grid_ways.py | grid_ways.py | py | 590 | python | en | code | 0 | github-code | 13 |
28376417289 | import numpy as np
class vec_spin:
def __init__(self,s):
Vectorx =[0.5* np.sqrt((a - 1) * (2 *s + 2 - a))for a in range(2, int((2 * s) + 2))]
Vectory = [0.5j * np.sqrt((a - 1) * (2 * s + 2 - a))for a in range(2, int((2 * s) + 2))]
Vectorz = [(s+1 -a) for a in range(1, int((2 * s)+2))]
self.x =np.diag(Vectorx,k=1)+np.diag(Vectorx,k=-1)
self.y = (np.diag(Vectory, k=1) - np.diag(Vectory, k=-1))*-1
self.z = np.diag(Vectorz, k=0)
def ABCD(s,i,N):
Spin = vec_spin(s)
if i==1:
Sx= np.kron(Spin.x, np.identity(int(s * 2 + 1) ** (N - 1)))
Sy = np.kron(Spin.y, np.identity(int(s * 2 + 1) ** (N - 1)))
Sz = np.kron(Spin.z, np.identity(int(s * 2 + 1) ** (N - 1)))
elif (i<N):
Sx = np.kron(np.kron(np.identity(int(s*2+1)**(i-1)), Spin.x), np.identity(int(s * 2 + 1) ** (N - 1)))
Sy = np.kron(np.kron(np.identity(int(s * 2 + 1) ** (i - 1)), Spin.y), np.identity(int(s * 2 + 1) ** (N - 1)))
Sz = np.kron(np.kron(np.identity(int(s * 2 + 1) ** (i - 1)), Spin.z), np.identity(int(s * 2 + 1) ** (N - 1)))
else:
Sx = np.kron(np.identity(int(s * 2 + 1) ** (N - 1)), Spin.x)
Sy = np.kron(np.identity(int(s * 2 + 1) ** (N - 1)), Spin.y)
Sz = np.kron(np.identity(int(s * 2 + 1) ** (N - 1)), Spin.z)
#print("\n")
#print(Sx,"\n\n")
#print(Sy, "\n\n")
#print(Sz, "\n\n")
Spins = [Sx,Sy,Sz]
return Spins
def Hamil(s,N,j,h):
spiny=[]
wynik = [np.zeros((int((2*s+1)**N), int((2*s+1)**N)), dtype = complex) for i in range(3)]
for i in range(0,N+1):
spiny.append(ABCD(s,1,N))
for i in range (len(spiny)-1):
wynik[0] += spiny[i].x @ spiny[i+1].x
wynik[1] += spiny[i].y @ spiny[i+1].y
wynik[2] += spiny[i].z @ spiny[i+1].z
wynik[0] += spiny[N - 1].x @ spiny[0].x
wynik[1] += spiny[N - 1].y @ spiny[0].y
wynik[2] += spiny[N - 1].z @ spiny[0].z
ostateczny = []
for i in range(0,3):
ostateczny = wynik[i]
zmienna = np.zeros((int((s * 2 + 1) ** N),int( (s * 2 + 1) ** N)), dtype=complex)
Suma_H_Z = []
for i in range(N):
Suma_H_Z +=spiny[i].z
return (j*ostateczny)-(h*Suma_H_Z)
def MIN_energy(H): ## minimum energii punkt 2
A = np.linalg.eig(Hamil(s,N,1,1))
N =3
s =0.5
print(ABCD(s,1,N))
#Spin = vec_spin(s)
##print(a1.x)
##print(a1.y)
##print(a1.z)
#spins= ABCD(s,1,N)
#print(spins)
#print(Hamil(s,N,1,1))
| aszpatowski/JSP2019 | pythonforscientist/zadaniestare.py | zadaniestare.py | py | 2,684 | python | en | code | 0 | github-code | 13 |
10844848112 | from ally.container.ioc import injected
from ally.design.context import Context, defines, requires, optional
from ally.design.processor import HandlerProcessorProceed
from ally.http.spec.server import IDecoderHeader, IEncoderHeader
from collections import deque, Iterable
import re
# --------------------------------------------------------------------
class Request(Context):
'''
The request context.
'''
# ---------------------------------------------------------------- Required
headers = requires(dict)
# ---------------------------------------------------------------- Optional
parameters = optional(list)
# ---------------------------------------------------------------- Defined
decoderHeader = defines(IDecoderHeader, doc='''
@rtype: IDecoderHeader
The decoder used for reading the headers.
''')
class Response(Context):
'''
The response context.
'''
# ---------------------------------------------------------------- Defined
headers = defines(dict, doc='''
@rtype: dictionary{string, string}
The raw headers for the response.
''')
encoderHeader = defines(IEncoderHeader, doc='''
@rtype: IEncoderPath
The path encoder used for encoding paths that will be rendered in the response.
''')
# --------------------------------------------------------------------
@injected
class HeaderHandler(HandlerProcessorProceed):
'''
Provides encoder/decoder for handling HTTP headers.
'''
useParameters = False
# If true then if the data is present in the parameters will override the header.
separatorMain = ','
# The separator used in splitting value and attributes from each other.
separatorAttr = ';'
# The separator used between the attributes and value.
separatorValue = '='
# The separator used between attribute name and attribute value.
def __init__(self):
assert isinstance(self.useParameters, bool), 'Invalid use parameters flag %s' % self.useParameters
assert isinstance(self.separatorMain, str), 'Invalid main separator %s' % self.separatorMain
assert isinstance(self.separatorAttr, str), 'Invalid attribute separator %s' % self.separatorAttr
assert isinstance(self.separatorValue, str), 'Invalid value separator %s' % self.separatorValue
super().__init__()
self.reSeparatorMain = re.compile(self.separatorMain)
self.reSeparatorAttr = re.compile(self.separatorAttr)
self.reSeparatorValue = re.compile(self.separatorValue)
def process(self, request:Request, response:Response, **keyargs):
'''
@see: HandlerProcessorProceed.process
Provide the headers encoders and decoders.
'''
assert isinstance(request, Request), 'Invalid request %s' % request
assert isinstance(response, Response), 'Invalid response %s' % response
if Request.decoderHeader not in request: # Only add the decoder if one is not present
request.decoderHeader = DecoderHeader(self, request.headers, request.parameters
if Request.parameters in request and self.useParameters else None)
if Response.encoderHeader not in response: # Only add the encoder if one is not present
response.encoderHeader = EncoderHeader(self)
if response.headers: response.encoderHeader.headers.update(response.headers)
response.headers = response.encoderHeader.headers
# --------------------------------------------------------------------
class DecoderHeader(IDecoderHeader):
'''
Implementation for @see: IDecoderHeader.
'''
__slots__ = ('handler', 'headers', 'parameters', 'parametersUsed')
def __init__(self, handler, headers, parameters=None):
'''
Construct the decoder.
@param handler: HeaderHandler
The header handler of the decoder.
@param headers: dictionary{string, string}
The header values.
@param parameters: list[tuple(string, string)]
The parameter values, this list will have have the used parameters removed.
'''
assert isinstance(handler, HeaderHandler), 'Invalid handler %s' % handler
assert isinstance(headers, dict), 'Invalid headers %s' % headers
assert parameters is None or isinstance(parameters, list), 'Invalid parameters %s' % parameters
self.handler = handler
self.headers = {hname.lower():hvalue for hname, hvalue in headers.items()}
self.parameters = parameters
if parameters: self.parametersUsed = {}
def retrieve(self, name):
'''
@see: IDecoderHeader.retrieve
'''
assert isinstance(name, str), 'Invalid name %s' % name
name = name.lower()
value = self.readParameters(name)
if value: return self.handler.separatorMain.join(value)
return self.headers.get(name)
def decode(self, name):
'''
@see: IDecoderHeader.decode
'''
assert isinstance(name, str), 'Invalid name %s' % name
name = name.lower()
value = self.readParameters(name)
if value:
parsed = []
for v in value: self.parse(v, parsed)
return parsed
value = self.headers.get(name)
if value: return self.parse(value)
# ----------------------------------------------------------------
def parse(self, value, parsed=None):
'''
Parses the provided value.
@param value: string
The value to parse.
@param parsed: list[tuple(string, dictionary{string, string}]
The parsed values.
@return: list[tuple(string, dictionary{string, string}]
The parsed values, if parsed is provided then it will be the same list.
'''
assert isinstance(value, str), 'Invalid value %s' % value
handler = self.handler
assert isinstance(handler, HeaderHandler)
parsed = [] if parsed is None else parsed
for values in handler.reSeparatorMain.split(value):
valAttr = handler.reSeparatorAttr.split(values)
attributes = {}
for k in range(1, len(valAttr)):
val = handler.reSeparatorValue.split(valAttr[k])
attributes[val[0].strip()] = val[1].strip().strip('"') if len(val) > 1 else None
parsed.append((valAttr[0].strip(), attributes))
return parsed
def readParameters(self, name):
'''
Read the parameters for the provided name.
@param name: string
The name (lower case) to read the parameters for.
@return: deque[string]
The list of found values, might be empty.
'''
if not self.parameters: return
assert isinstance(name, str), 'Invalid name %s' % name
assert name == name.lower(), 'Invalid name %s, needs to be lower case only' % name
value = self.parametersUsed.get(name)
if value is None:
value, k = deque(), 0
while k < len(self.parameters):
if self.parameters[k][0].lower() == name:
value.append(self.parameters[k][1])
del self.parameters[k]
k -= 1
k += 1
self.parametersUsed[name] = value
return value
class EncoderHeader(IEncoderHeader):
'''
Implementation for @see: IEncoderHeader.
'''
__slots__ = ('handler', 'headers')
def __init__(self, handler):
'''
Construct the encoder.
@param handler: HeaderHandler
The header handler of the encoder.
'''
assert isinstance(handler, HeaderHandler), 'Invalid handler %s' % handler
self.handler = handler
self.headers = {}
def encode(self, name, *value):
'''
@see: IEncoderHeader.encode
'''
assert isinstance(name, str), 'Invalid name %s' % name
handler = self.handler
assert isinstance(handler, HeaderHandler)
values = []
for val in value:
assert isinstance(val, Iterable), 'Invalid value %s' % val
if isinstance(val, str): values.append(val)
else:
value, attributes = val
attributes = handler.separatorValue.join(attributes)
values.append(handler.separatorAttr.join((value, attributes)) if attributes else value)
self.headers[name] = handler.separatorMain.join(values)
| galiminus/my_liveblog | components/ally-http/ally/http/impl/processor/header.py | header.py | py | 8,635 | python | en | code | 0 | github-code | 13 |
3462581591 | import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
host = "smtp.gmail.com"
port = 587
username = "abcdef18032015@gmail.com"
password = "Shubham96"
from_ = username
to_list = "abcdef18032015@gmail.com"
class MessageUser():
user_details = []
messages = []
email_messages = []
base_message = """Hi {name}!
Thank you for the purchase on {date}.
We hope you are exicted about using it. Just as a
reminder the purcase total was ${total}.
Have a great one!
Team CFE
"""
def add_user(self, name, amount, email=None):
name = name[0].upper() + name[1:].lower()
amount = "%.2f" %(amount)
today = datetime.date.today()
dates = "{today.day}/{today.month}/{today.year}".format(today = today)
detail = {
"name": name,
"amount": amount,
"date":dates
}
if email is not None: # if email != None
detail["email"] = email
self.user_details.append(detail)
def get_details(self):
return self.user_details
def make_messages(self):
if len(self.user_details) > 0:
for detail in self.get_details():
name = detail["name"]
amount = detail["amount"]
date = detail["date"]
mess = self.base_message
new_message = mess.format(
name=name,
date=date,
total=amount
)
user_email = detail.get("email")
if user_email:
user_data = {
"email":user_email,
"message" : new_message
}
self.email_messages.append(user_data)
else:
self.messages.append(new_message)
return self.messages
return []
def send_email(self):
self.make_messages()
if len(self.email_messages) > 0:
for detail in self.email_messages:
user_email = detail["email"]
user_message = detail["message"]
try:
email = smtplib.SMTP(host,port)
email.ehlo()
email.starttls()
email.login(username,password)
message = MIMEMultipart("alternative")
message["Subject"] = "Billing Update"
message["From"] = from_
message["To"] = user_email
m1 = MIMEText(user_message,"plain")
message.attach(m1)
email.sendmail(from_, [user_email], message.as_string())
email.quit()
except smtplib.SMTPException:
print("Unable to connect")
return True
return False
obj = MessageUser()
obj.add_user("Justin", 123.32, email='abcdef18032015@gmail.com')
obj.add_user("jOhn", 94.23, email='abcdef18032015@gmail.com')
obj.add_user("Sean", 93.23, email='abcdef18032015@gmail.com')
obj.add_user("Emilee", 193.23, email='abcdef18032015@gmail.com')
obj.add_user("Marie", 13.23, email='abcdef18032015@gmail.com')
obj.get_details()
obj.send_email()
| shubhammuramkar/pythonwork | data/message.py | message.py | py | 3,062 | python | en | code | 0 | github-code | 13 |
8628272064 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'dataset', views.dataset, name='dataset'),
url(r'paper', views.paper, name='paper'),
url(r'about', views.about, name='about'),
url(r'keywords', views.keywords, name='keywords'),
url(r'annotation/(?P<questionid>\d+)/$', views.annotation, name='annotion'),
url(r'label', views.label, name='label'),
url(r'signin', views.signin, name='signin'),
url(r'login', views.login, name='login'),
url(r'logout', views.logout, name='logout'),
]
| wujindou/SMA | sma/urls.py | urls.py | py | 569 | python | en | code | 0 | github-code | 13 |
20848920543 | from django.contrib.auth.decorators import (
login_required,
permission_required,
)
from django.contrib.auth.mixins import (
LoginRequiredMixin,
PermissionRequiredMixin,
)
from django.db.models import Q
from django.shortcuts import (
get_object_or_404,
redirect,
render,
)
from django.utils import timezone
from django.views.generic import (
CreateView,
ListView,
)
from .forms import PostForm
from .models import Post
class PostList(ListView):
context_object_name = 'posts'
template_name = 'blog/post_list.html'
paginate_by = 2
def get_queryset(self):
posts = Post.objects.filter(published_date__lte=timezone.now())
keyword = self.request.GET.get('keyword')
if keyword:
posts = posts.filter(
Q(title__icontains=keyword) | Q(text__icontains=keyword)
)
return posts
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
class PostNew(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
permission_required = 'blog.add_post'
form_class = PostForm
template_name = 'blog/post_edit.html'
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
@login_required
@permission_required('blog.change_post', raise_exception=True)
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == 'POST':
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('blog:post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
@login_required
@permission_required('blog.view_draft_posts', raise_exception=True)
def post_draft_list(request):
posts = Post.objects.filter(published_date__isnull=True)
return render(request, 'blog/post_draft_list.html', {'posts': posts})
@login_required
@permission_required('blog.publish_post', raise_exception=True)
def post_publish(request, pk):
post = get_object_or_404(Post, pk=pk)
post.publish()
return redirect('blog:post_detail', pk=pk)
@login_required
@permission_required('blog.delete_post', raise_exception=True)
def post_remove(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
return redirect('blog:post_list')
| ftnext/nextstep_djangogirls_tutorial | apps/blog/views.py | views.py | py | 2,569 | python | en | code | 0 | github-code | 13 |
20529071390 | import matplotlib.pyplot as plt
# prepare data values
sclices = [7,2,2,13]
activities = ['sleeping','eating','working','playing']
# draw a pic
plt.pie(sclices,labels=activities,autopct='%1.1f%%')
plt.title('Pie Graph')
plt.show() | lesenelir/LshAIWorkPrograms | MatplotlibwwL/plt05-pyplot-pie.py | plt05-pyplot-pie.py | py | 233 | python | en | code | 0 | github-code | 13 |
16471972933 | from db import RedisClient
from crawler import Crawler
from setting import *
import sys
class Fetcher:
def __init__(self):
self.redis = RedisClient()
self.crawler = Crawler()
def is_over_threshold(self):
"""
判断是否达到了代理池数量上限
"""
if self.redis.count() >= POOL_UPPER_THRESHOLD:
return True
else:
return False
def run(self):
print('获取器开始执行')
if not self.is_over_threshold():
for func in self.crawler.get_funclist():
# 从各个代理IP网站开始获取IP代理地址
proxies = self.crawler.get_proxies(func)
sys.stdout.flush()
for proxy in proxies:
# 将获取的proxy加入到redis队列
self.redis.add(proxy)
| qingchunjun/proxy_pool | fetcher.py | fetcher.py | py | 885 | python | en | code | 5 | github-code | 13 |
13897936248 | from django.contrib.auth import get_user_model
from django.db import transaction
from rest_framework import serializers
from scholarships.models import RequiredDocument, Scholarship
User = get_user_model()
class RequiredDocumentSerializer(serializers.ModelSerializer):
class Meta:
model = RequiredDocument
fields = [
'id',
'name',
'description',
'attachment',
]
class ScholarshipSerializer(serializers.ModelSerializer):
admin = serializers.SlugRelatedField(slug_field='email', read_only=True)
evaluators = serializers.SlugRelatedField(queryset=User.objects.filter(is_evaluator=True),
slug_field='email', many=True)
required_documents = RequiredDocumentSerializer(many=True)
class Meta:
model = Scholarship
fields = [
'id',
'name',
'description',
'admin',
'evaluators',
'is_active',
'created',
'required_documents',
]
@transaction.atomic
def create(self, validated_data):
required_documents_data = validated_data.pop('required_documents')
instance = super().create(validated_data)
RequiredDocument.objects.bulk_create(
RequiredDocument(
scholarship=instance,
**data,
)
for data in required_documents_data
)
return instance
def update(self, instance, validated_data):
if 'is_active' in validated_data:
instance.is_active = validated_data['is_active']
instance.save()
if 'evaluators' in validated_data:
instance.evaluators.add(*validated_data['evaluators'])
return instance
class ScholarshipSimpleSerializer(serializers.ModelSerializer):
class Meta:
model = Scholarship
fields = [
'id',
'name',
'description',
]
class ScholarshipOverviewSerializer(serializers.ModelSerializer):
rooms_in_progress = serializers.IntegerField()
rooms_won = serializers.IntegerField()
rooms_lost = serializers.IntegerField()
class Meta:
model = Scholarship
fields = [
'id',
'name',
'rooms_in_progress',
'rooms_won',
'rooms_lost',
]
| javierdiazp/scholarships | scholarships/serializers.py | serializers.py | py | 2,434 | python | en | code | 0 | github-code | 13 |
11443346307 | import torch.utils.data as data
import numpy as np
import pickle
import os
__author__ = "Rana Hanocka"
__license__ = "MIT"
__maintainer__ = "Francis Rhys Ward"
"""
Modifications made to: collate_fn
Functionality: padding of meshes to same size in same batch
"""
class BaseDataset(data.Dataset):
def __init__(self, opt):
self.opt = opt
self.mean = 0
self.std = 1
self.ninput_channels = None
super(BaseDataset, self).__init__()
def get_mean_std(self):
""" Computes Mean and Standard Deviation from Training Data
If mean/std file doesn't exist, will compute one
:returns
mean: N-dimensional mean
std: N-dimensional standard deviation
ninput_channels: N
(here N=5)
"""
mean_std_cache = os.path.join(self.root, 'mean_std_cache.p')
if not os.path.isfile(mean_std_cache):
print('computing mean std from train data...')
# doesn't run augmentation during m/std computation
num_aug = self.opt.num_aug
self.opt.num_aug = 1
mean, std = np.array(0), np.array(0)
for i, data in enumerate(self):
if i % 500 == 0:
print('{} of {}'.format(i, self.size))
features = data['edge_features']
mean = mean + features.mean(axis=1)
std = std + features.std(axis=1)
mean = mean / (i + 1)
std = std / (i + 1)
transform_dict = {'mean': mean[:, np.newaxis], 'std': std[:, np.newaxis],
'ninput_channels': len(mean)}
with open(mean_std_cache, 'wb') as f:
pickle.dump(transform_dict, f)
print('saved: ', mean_std_cache)
self.opt.num_aug = num_aug
# open mean / std from file
with open(mean_std_cache, 'rb') as f:
transform_dict = pickle.load(f)
print('loaded mean / std from cache')
self.mean = transform_dict['mean']
self.std = transform_dict['std']
self.ninput_channels = transform_dict['ninput_channels']
def collate_fn(batch):
"""Creates mini-batch tensors
We should build custom collate_fn rather than using default collate_fn
"""
meta = {}
keys = batch[0].keys()
for key in keys:
temp = [d[key] for d in batch]
# Resize meshes to same size with padding to make sure meshes can be batched
if key == 'edge_features':
max_num_faces = max([t.shape[1] for t in temp])
[t.resize((5, max_num_faces), refcheck=False) for t in temp if t.shape[1] != max_num_faces]
a = np.array(temp)
meta.update({key: a})
else:
a = np.array(temp)
meta.update({key: a})
return meta
| andwang1/BrainSurfaceTK | models/MeshCNN/data/base_dataset.py | base_dataset.py | py | 2,847 | python | en | code | 11 | github-code | 13 |
2846964410 | from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_restx import Api, Resource
from flask_swagger_ui import get_swaggerui_blueprint
app = Flask(__name__)
# Initialize Flask-RestX API
api = Api(app, version='1.0', title='Task Management API', description='API endpoints for managing tasks')
# Configure the SQLAlchemy database connection
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:rawad.18@localhost:5432/task_manager'
# Initialize SQLAlchemy and database migration
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# Configure Swagger UI
SWAGGER_URL = '/swagger'
API_URL = '/swagger.json'
swaggerui_blueprint = get_swaggerui_blueprint(
SWAGGER_URL,
API_URL,
config={
'app_name': "Task Management API"
}
)
app.register_blueprint(swaggerui_blueprint, url_prefix=SWAGGER_URL)
class Task(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
description = db.Column(db.Text)
completed = db.Column(db.Boolean, default=False)
priority = db.Column(db.String(50), default='low')
due_date = db.Column(db.Date)
category = db.Column(db.String(50))
def __init__(self, title, description, priority, due_date, category):
self.title = title
self.description = description
self.priority = priority
self.due_date = due_date
self.category = category
@api.route('/tasks')
class TasksResource(Resource):
def get(self):
tasks = Task.query.all()
result = []
for task in tasks:
result.append({
'id': task.id,
'title': task.title,
'description': task.description,
'completed': task.completed,
'priority': task.priority,
'due_date': task.due_date,
'category': task.category
})
return jsonify(result)
def post(self):
data = request.get_json()
title = data.get('title')
description = data.get('description')
priority = data.get('priority')
due_date = data.get('due_date')
category = data.get('category')
task = Task(title=title, description=description, priority=priority, due_date=due_date, category=category)
db.session.add(task)
db.session.commit()
return jsonify({'message': 'Task created successfully'})
@api.route('/tasks/<int:task_id>')
class TaskResource(Resource):
def get(self, task_id):
task = Task.query.get(task_id)
if task:
result = {
'id': task.id,
'title': task.title,
'description': task.description,
'completed': task.completed,
'priority': task.priority,
'due_date': task.due_date,
'category': task.category
}
return jsonify(result)
else:
return jsonify({'message': 'Task not found'}), 404
def put(self, task_id):
task = Task.query.get(task_id)
if not task:
return jsonify({'error': 'Task not found'})
data = request.get_json()
if 'title' in data:
task.title = data['title']
if 'description' in data:
task.description = data['description']
if 'completed' in data:
task.completed = data['completed']
if 'priority' in data:
task.priority = data['priority']
if 'due_date' in data:
task.due_date = data['due_date']
if 'category' in data:
task.category = data['category']
db.session.commit()
return jsonify({'message': 'Task updated successfully'})
def delete(self, task_id):
task = Task.query.get(task_id)
if not task:
return jsonify({'message': 'Task not found'}), 404
db.session.delete(task)
db.session.commit()
return {'message': 'Task deleted successfully'}, 200
if __name__ == '__main__':
app.run(debug=True) | RawadKadi/task-management-api | app.py | app.py | py | 4,128 | python | en | code | 0 | github-code | 13 |
43298584459 | # Visualize a single character in an OpenGL window (python)
# https://stackoverflow.com/questions/60738691/visualize-a-single-character-in-an-opengl-window-python
import os
import sys
import numpy
import freetype
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../resource'))
def load_font(filename, size):
global font_texture_id, font_height, font_map
face = freetype.Face(filename)
face.set_char_size(size*64)
face_list = []
font_bitmap_width = 0
ascender, descender = 0, 0
for c in range(32, 128):
face.load_char(chr(c), freetype.FT_LOAD_RENDER | freetype.FT_LOAD_FORCE_AUTOHINT)
face_list.append((chr(c), font_bitmap_width))
font_bitmap_width += face.glyph.bitmap.width
ascender = max(ascender, face.glyph.bitmap_top)
descender = max(descender, face.glyph.bitmap.rows-face.glyph.bitmap_top)
font_height = ascender + descender
font_map = {}
font_bitmap = numpy.zeros((font_height, font_bitmap_width), dtype=numpy.ubyte)
for c, x in face_list:
face.load_char(c, freetype.FT_LOAD_RENDER | freetype.FT_LOAD_FORCE_AUTOHINT)
y = ascender - face.glyph.bitmap_top
w, h = face.glyph.bitmap.width, face.glyph.bitmap.rows
font_bitmap[y:y+h, x:x+w].flat = face.glyph.bitmap.buffer
font_map[c] = (w, x / font_bitmap_width, (x+w) / font_bitmap_width)
font_texture_id = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, font_texture_id)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, font_bitmap_width, font_height, 0, GL_ALPHA, GL_UNSIGNED_BYTE, font_bitmap)
def display():
glClearColor(0, 0, 0, 0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor(1, 1, 1, 1)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, font_texture_id)
glBegin(GL_QUADS)
x, y = 40, 20
for c in "Hello World!":
if c in font_map:
w, h = font_map[c][0], font_height
u0, u1 = font_map[c][1], font_map[c][2]
glTexCoord2f(u0, 0)
glVertex2f(x, y)
glTexCoord2f(u1, 0)
glVertex2f(x + w, y)
glTexCoord2f(u1, 1)
glVertex2f(x + w, y + h)
glTexCoord2f(u0, 1)
glVertex2f(x, y + h)
x += w + 5
glEnd()
glutSwapBuffers()
def reshape(width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, width, height, 0, -1, 1)
glMatrixMode(GL_MODELVIEW)
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(400, 100)
glutCreateWindow('Hello World - FreeType')
glutDisplayFunc(display)
glutReshapeFunc(reshape)
load_font('font/FreeSans.ttf', 64)
glutMainLoop( ) | Rabbid76/graphics-snippets | example/python/legacy_opengl/text_freetype_hello_world.py | text_freetype_hello_world.py | py | 3,101 | python | en | code | 172 | github-code | 13 |
17617504156 | import random
import math
class Ball:
def __init__(self, ball, vector):
self.x = 1
self.y = 1
self.vector = vector
self.speed = None
self.iterations = 1000
self.random_direction = random.randrange(0, 2)
# generate random coordinates here
randX = random.randrange(0, 800)
randY = random.randrange(0, 600)
# check that random location isn't close to bounds
while randX > 600 or randX < 100:
randX = random.randrange(0, 800)
while randY > 400 or randY < 100:
randY = random.randrange(0, 600)
self.location = [randX, randY]
self.ballrect = ball.get_rect()
self.initmove(self.location)
def setSpeed(self, x, y):
"""
if x > 1:
x = 1
y = 1
if y > 1:
x = 1
y = 1
"""
self.x += x
self.y += y
self.speed = [self.x, self.y]
def move(self):
# decompose angle and speed
(angle, z) = self.vector
# add random movement
# add or subtract a scalar value to only one axis per 1000 iterations
tmp = random.random() * 0.2
op = random.randrange(0, 2)
if op == 0 and self.random_direction == 0:
angle += tmp
elif op == 1 and self.random_direction == 0:
angle -= tmp
if op == 0 and self.random_direction == 1:
angle += tmp
elif op == 1 and self.random_direction == 1:
angle -= tmp
# if the new coordinates cause a bump against the wall, change direction
if self.ballrect.left < 0 or self.ballrect.right > 800:
angle *= -1
if self.ballrect.top < 0 or self.ballrect.bottom > 600:
z *= -1
# re-compose angle and speed
self.vector = (angle, z)
# update the rectangle
self.speed = self.calcnewpos()
self.ballrect = self.ballrect.move(self.vector)
# update the iteration counter for smooth random movement and reset the random direction flag
self.iterations -= 1
if self.iterations <= 0:
self.iterations = 1000
self.random_direction = random.randrange(0, 2)
def calcnewpos(self):
(angle, z) = self.vector
(dx, dy) = (z*math.cos(angle), z*math.sin(angle))
return dx, dy
def initmove(self, loc):
self.ballrect = self.ballrect.move(loc)
def draw(self, ball, screen):
screen.blit(ball, self.ballrect)
def update(self):
self.move() | Zidane8998/PyGene | game_objects/ball.py | ball.py | py | 2,616 | python | en | code | 0 | github-code | 13 |
23606178529 | #@ type: compute
#@ parents:
#@ - func1
#@ - func2
#@ - func3
#@ - func4
#@ corunning:
#@ mem2:
#@ trans: mem2
#@ type: rdma
import struct
import pickle
from typing import List
import cv2
import numpy as np
INPUT1 = "data/predict_store"
INPUT2 = "data/boxes_store"
class Box:
def __init__(self, wmin: int, hmin: int, wmax: int, hmax: int, compressed_img: List[int]):
self.wmin = wmin
self.hmin = hmin
self.wmax = wmax
self.hmax = hmax
self.compressed_img = compressed_img
def serialize(self):
return [self.wmin, self.hmin, self.wmax, self.hmax] + self.compressed_img
@staticmethod
def deserialize(obj):
return Box(obj[0], obj[1], obj[2], obj[3], obj[4:])
def getFromServer(output, remoteIndex):
with open(output, "r+b") as fin:
fin.seek(remoteIndex, 0)
lengthBytes = fin.read(4)
remoteIndex += 4
length = struct.unpack('@I', lengthBytes)[0]
dataBytes = fin.read(length)
data = pickle.loads(dataBytes)
remoteIndex += length
return data, remoteIndex
def test():
remoteIndexBox = 0
remoteIndexPre = 0
boxBlockCount = 0
preBlockCount = 0
with open(INPUT1, "r+b") as fin:
fin.seek(remoteIndexPre, 0)
countBytes = fin.read(4)
remoteIndexPre += 4
preBlockCount = struct.unpack('@I', countBytes)[0]
with open(INPUT2, "r+b") as fin:
fin.seek(remoteIndexBox, 0)
countBytes = fin.read(4)
remoteIndexBox += 4
boxBlockCount = struct.unpack('@I', countBytes)[0]
filename = "data/1234567890.png"
output_name = "data/1234567890_ocr.png"
img = cv2.imread(filename)
h, w, _ = img.shape
while boxBlockCount > 0:
boxes, remoteIndexBox = getFromServer(INPUT2, remoteIndexBox)
predictions, remoteIndexPre = getFromServer(INPUT1, remoteIndexPre)
for boxByte, prediction in zip(boxes, predictions):
box = Box.deserialize(boxByte)
img = cv2.rectangle(img, (box.wmin, box.hmin), (box.wmax, box.hmax), (87, 201, 0), 2)
img = cv2.putText(img, str(prediction), ((box.wmin + box.wmax) // 2, box.hmin), cv2.FONT_HERSHEY_COMPLEX, min(h, w) / 500, (255, 144, 30), 2)
boxBlockCount -= 1
cv2.imwrite(output_name, img)
print("Finish output image")
return {}
if __name__ == "__main__":
test()
| zerotrac/CSE291_mnist | Mnist_test/func5.py | func5.py | py | 2,468 | python | en | code | 2 | github-code | 13 |
21328638809 | # usage:
# python3 train_model.py --train_data /Users/joshgardner/Documents/UM-Graduate/UMSI/LED_Lab/s17/model_build_infrastructure/job_runner/1496853720-josh_gardner-clinicalskills/week_3/week_3_sum_feats.csv --output_loc .
from sklearn import linear_model
import argparse
import pandas as pd
from sklearn.externals import joblib
import os
def train_model(data_fp, label_col = 'dropout_current_week'):
# trains logistic regression from dataframe
data = pd.read_csv(data_fp).drop(['userID', 'week'], axis = 1)
# get X and Y from data
x_cols = [c for c in data.columns if c != label_col]
X = data[x_cols].as_matrix()
Y = data[label_col].as_matrix()
# train model
logreg = linear_model.LogisticRegression(C=1e5)
logreg.fit(X, Y)
return logreg
def main(data_fp, output_loc = './data/output'):
# reads data, trains model, and writes result to specified path
# call train_model() to build
mod = train_model(data_fp=data_fp)
# write output; can be loaded with joblib.load()
joblib.dump(mod, os.path.join(output_loc, 'mod.pkl'))
if __name__ == '__main__':
# parse args
parser = argparse.ArgumentParser(description='Train predictive model.')
parser.add_argument('--train_data', required=True, help='path to training data')
parser.add_argument('--output_loc', required=False, help='path to output directory for trained model',
default='./data/output')
args = parser.parse_args()
main(data_fp = args.train_data, output_fp = args.output_loc) | educational-technology-collective/xing-replication | xing/modeling/deprecated/train_model.py | train_model.py | py | 1,543 | python | en | code | 0 | github-code | 13 |
3285426157 | from project import socketio, app
from project.model.subscriber.smart_tv_subscriber import SmartTvSubscriber
from project.model.publisher.smart_tv_publisher import SmartTvPublisher
from project.model.service.smart_tv_service import SmartTvService
from flask import request, jsonify
from time import sleep
import random
from datetime import datetime
tv_on = False
@app.route("/change_tv_status", methods=["POST"])
def change_status():
command = request.json["lock"]
block_tv(command)
value = 'locked' if SmartTvService().last_record()['block'] else 'unlocked'
SmartTvPublisher().start()
return (
jsonify(
{"info": f"Tv's status is {value}"}
),
200
)
@socketio.on("tvConnect")
def tv_connect():
global tv_on
tv_on = True
subscriber = SmartTvSubscriber()
random.seed(datetime.now())
SmartTvService().insert_data(dict(block=False)) # Set random here
subscriber.start()
while True:
sleep(1)
if not tv_on:
subscriber.stop()
break
@socketio.on("tvDisconnect")
def tv_disconnect():
global tv_on
tv_on = False
@socketio.on("tvBlock")
def block_tv(blocked):
if blocked:
info = {"info": "Tv is locked"}
socketio.emit("TvInformation", info)
socketio.emit("RedColor")
SmartTvService().insert_data(dict(block=True))
else:
info = {"info": "Tv unlocked"}
socketio.emit("TvInformation", info)
socketio.emit("NormalColor")
SmartTvService().insert_data(dict(block=False))
| BabyMonitorSimulation/BabyMonitorSoS | project/controller/smart_tv_controller.py | smart_tv_controller.py | py | 1,575 | python | en | code | 0 | github-code | 13 |
18528042152 | # DESCRIPTION:
# Given a string of numbers, you must perform a method in which you will
# translate this string into text, based on the phone keypad.
#
# For example if you get "22" return "b", if you get "222" you will return "c".
# If you get "2222" return "ca".
#
# Further details:
#
# 0 is a space in the string.
# 1 is used to separate letters with the same number.
# always transform the number to the letter with the maximum value, as long as
# it does not have a 1 in the middle. So, "777777" --> "sq"
# and "7717777" --> "qs".
# you cannot return digits.
# Given a empty string, return empty string.
# Return a lowercase string.
# Examples:
# "443355555566604466690277733099966688" --> "hello how are you"
# "55282" --> "kata"
# "22266631339277717777" --> "codewars"
# "66885551555" --> "null"
# "833998" --> "text"
# "000" --> " "
KEYPAD = {
'0': [' '],
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z'],
}
def phone_words(strng: str) -> str:
cnt = 0
current = ''
syms = []
for sym in strng:
if sym == current:
cnt += 1
elif sym == '1':
cnt = 0
continue
else:
current = sym
cnt = 1
syms.append((sym, cnt))
for idx in range(1, len(syms)):
if syms[idx][0] == syms[idx-1][0] and syms[idx][1] > syms[idx-1][1]:
syms[idx-1] = None
syms = [el for el in syms if el]
letters = []
for sym, cnt in syms:
if sym == '0':
letters.append(cnt * KEYPAD[sym][0])
else:
try:
letters.append(KEYPAD[sym][cnt-1])
except IndexError:
whole_parts = cnt // len(KEYPAD[sym])
remainder = cnt % len(KEYPAD[sym])
for _ in range(whole_parts):
letters.append(KEYPAD[sym][-1])
if remainder:
letters.append(KEYPAD[sym][remainder-1])
return ''.join(letters)
| Darya-Kuzmich/my-codewars-solutions | 6_kyu/phonewords.py | phonewords.py | py | 2,204 | python | en | code | 0 | github-code | 13 |
35540500800 | # This script will allow you to:
# 1. Choose by 4 color bands (tell me a value)
# 2. Choose by value (tell me colors)
# TODO:
# make it easier to read the ohm value. for example:
# 300000000 --> 300,000,000 ohms.
# or 300000000 --> 300M ohms
# currently the value is printed as: 300000000.0
from os import system
import re
# value entered:
color_mapping = {
"0": "Black",
"1": "Brown",
"2": "Red",
"3": "Orange",
"4": "Yellow",
"5": "Green",
"6": "Blue",
"7": "Violet",
"8": "Grey",
"9": "White",
"a": "Gold",
"b": "Silver"
}
ohm_multiplier_mapping = {
"Black": 1,
"Brown": 10,
"Red": 100,
"Orange": 1000,
"Yellow": 10000,
"Green": 100000,
"Blue": 1000000,
"Violet": 10000000,
"Grey": 100000000,
"White": 10000000000,
"Gold": .1,
"Silver": .01
}
tolerance_mapping = {
"Brown": "+/- 1%",
"Red": "+/- 2%",
"Green": "+/- 0.5%",
"Blue": "+/- 0.25%",
"Violet": "+/- 0.1%",
"Grey": "+/- 0.05%",
"Gold": "+/- 5%",
"Silver": "+/- 10%"
}
multiplier_list = [
1,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000,
.1,
.01
]
def clear():
_ = system('clear')
def colors_to_value(user_input):
# This function expects a string value.
# for example: "564J"
# first band is the first digit of the resistor value
# look up the key value corresponding to the color value
band1_color = color_mapping.get(user_input[0])
band1_key = list(color_mapping.keys())[
list(color_mapping.values()).index(band1_color)]
# second band is the second digit of the resistor value
# look up the key value corresponding to the color value
band2_color = color_mapping.get(user_input[1:2])
band2_key = list(color_mapping.keys())[
list(color_mapping.values()).index(band2_color)]
# third band is the multiplier of the resistor value
band3_color = color_mapping.get(user_input[2:3])
band3_multiplier = ohm_multiplier_mapping.get(band3_color)
# fourth band is the tolerance of the resistor value
band4_color = color_mapping.get(user_input[-1])
band4_tolerance = tolerance_mapping.get(band4_color)
# Build the value using the multipler
resistor_value = float(band1_key + band2_key) * band3_multiplier
# return the resistor value along with the tolerance
return f"{str(resistor_value)} ohms {band4_tolerance}"
# This function displays the menu for selection, validates the user input, calls
# the colors_to_value function and displays the result
def color_band_selection():
# Print out the color selection menu for the user to select.
for key, value in color_mapping.items():
print(f'{key}) {value}')
print("r) Return to main menu")
# a color code is entered here
user_input = input("Enter your selection: ")
user_input = user_input.lower()
# TODO more error checking
if 'r' in user_input:
# return to calling function
return
elif len(user_input) is not 4:
print("You must enter exactly 4 characters")
input("Press any key to return to main menu...")
else:
# return a string that identifies the value
msg = colors_to_value(user_input)
clear()
print(f"Your resistor value is: {msg}")
print("")
input("Press enter to continue...")
def value_to_colors(first_digit, second_digit, multiplier_list_index):
band1_color = color_mapping.get(first_digit)
band2_color = color_mapping.get(second_digit)
multiplier_value = multiplier_list[multiplier_list_index]
band3_color = list(ohm_multiplier_mapping.keys())[
list(ohm_multiplier_mapping.values()).index(multiplier_value)]
value = float(first_digit + second_digit) * multiplier_value
print("")
print("*" * 50)
print(
f'Your resistor color coding is: {band1_color} {band2_color} {band3_color}: {value} ohms')
print("*" * 50)
print("")
print("Select the 4th band color for specific tolerance:")
for key, value in tolerance_mapping.items():
print(f'{key}: {value}')
input("Press enter to continue...")
def validate_character(user_input):
validated = True
if (len(user_input) > 1):
print("input error --> Too many characters. Try again.")
return not validated
if not re.match("^[0-9]*$", user_input):
print("input error --> Use only number values 0-9. Try again.")
return not validated
return validated
while(True):
clear()
print("=== 4 Band Resistor selection ===")
print("1. Choose by color bands (tell me the value)")
print("2. Choose by value (tell me the color bands)")
print("3. Quit")
try:
choice = int(input("> "))
if choice is 1:
# Choose by color bands
clear()
print("Select the color by entering the corresponding number value.")
print(" e.g. for a color band of green, blue, yellow, gold --> enter 564a")
color_band_selection()
elif choice is 2:
# Choose by value
clear()
valid_value = False
while(not valid_value):
first_digit = input(
"Enter the FIRST digit of the value of the resistor (e.g. 5 for 56000): ")
valid_value = validate_character(first_digit)
valid_value = False
while(not valid_value):
second_digit = input(
"Enter the SECOND digit of the value of the resistor (e.g. 6 for 56000): ")
valid_value = validate_character(second_digit)
for i, item in enumerate(multiplier_list):
print(f'{i}) {item}')
while (True):
multiplier_list_index = int(input(
"Select the multiplier value of the resistor (e.g. 4 for 10000, or think 4 zeros): "))
if multiplier_list_index in range(0, len(multiplier_list) + 1):
break
else:
input(
"input error --> Incorrect selection. Press enter to try again.")
value_to_colors(first_digit, second_digit, multiplier_list_index)
else:
clear()
break
except ValueError:
continue
| ejrach/my-python-utilities | ResistorConverter/resistor-converter.py | resistor-converter.py | py | 6,426 | python | en | code | 0 | github-code | 13 |
30606742026 | #NB: python is case sensitive
class clsCoordinate:
def __init__(self, xx,yy,zz):
key = 0
self.x = xx
self.y = yy
self.z = zz
def ptStr2(self):
return( format(self.x,'.4f') + ',' + format(self.y,'.4f') )
def ptStr(self):
return( format(self.x,'.4f') + ',' + format(self.y,'.4f') + ',' + format(self.z,'.4f') )
def cprint(self):
print(self.ptStr()+'\n')
def fprint(self,fp):
fp.write(self.ptStr()+'\n')
def pointStr2(x,y):
return(format(x, '.4f') + ',' + format(y, '.4f'))
def startLine(fp):
fp.write('LINE\n')
def StartPline(fp):
fp.write('PLINE\n')
def closeLines(fp):
fp.write('C\n')
def drawLine(fp,pt1,pt2):
fp.write('LINE ' & pt1.ptStr() & ' ' & pt2.ptStr() & ' ')
def drawBox3(fp, ptA, Lx, Ly):
dist = clsCoordinate(0,0,0)
StartPline(fp)
#fp.write(ptA.ptStr2() + '\n')
#Bottom LH Corner
ptA.fprint(fp)
#Top LH Corner
dist.x = 0
dist.y = Ly
fp.write('@' + dist.ptStr2() + '\n' )
#Top RH Corner
dist.x = Lx
dist.y = 0
fp.write('@' + dist.ptStr2() + '\n' )
#Bottom RH Corner
dist.x = 0
dist.y = -Ly
fp.write('@' + dist.ptStr2() + '\n' )
closeLines(fp)
def drawBox4(fp, ptA, Lx, Ly):
StartPline(fp)
ptA.fprint(fp)
fp.write('@' + pointStr2(0,Ly) + '\n' )
fp.write('@' + pointStr2(Lx,0) + '\n' )
fp.write('@' + pointStr2(0,-Ly) + '\n' )
closeLines(fp)
def ScriptWriter1(scrFile):
ptA = clsCoordinate(0,0,0)
#ptA.x = 5
#ptA.y=7
#print(ptA.ptStr2())
Ly=16
Lx=2*Ly
drawBox4(scrFile,ptA,Lx,Ly)
scrFile.write('ZOOM E')
def ScriptWriter2(scrFile):
pt0 = clsCoordinate(0,0,0)
ptA = clsCoordinate(0,0,0)
BuildingHeight=2.4
BuildingWidth=8
BuildingLength=2*BuildingWidth
#Plan
drawBox4(scrFile,ptA,BuildingLength,BuildingWidth)
#Elevation 1
ptA.y = ptA.y-BuildingHeight
drawBox4(scrFile,ptA,BuildingLength,BuildingHeight)
#Elevation 2
ptA.x = ptA.x + BuildingLength
drawBox4(scrFile,ptA,BuildingWidth,BuildingHeight)
#Elevation 3
ptA.x = pt0.x - BuildingWidth
drawBox4(scrFile,ptA,BuildingWidth,BuildingHeight)
#Elevation 4
ptA.x = ptA.x - BuildingLength
drawBox4(scrFile,ptA,BuildingLength,BuildingHeight)
#Section
ptA.x = pt0.x + BuildingLength + BuildingWidth
drawBox4(scrFile,ptA,BuildingWidth,BuildingHeight)
scrFile.write('ZOOM E')
def CmdMain():
scrFile = open('DrawBox2.scr', 'w')
ScriptWriter2(scrFile)
scrFile.close()
print('All Done!')
#-------------------------
#MAIN
#-------------------------
CmdMain()
# END MAIN
#========================= | Metamorphs96/cadd | python/MkScript1.py | MkScript1.py | py | 2,797 | python | en | code | 0 | github-code | 13 |
28619585410 | import platform
import logging
import asyncio
from bleak import BleakClient
from bleak import BleakClient
from bleak import _logger as logger
from bleak.uuids import uuid16_dict
from adq import Adq_save, time, graf, show, sleep
from threading import Thread
UART_TX_UUID = "6e400002-b5a3-f393-e0a9-e50e24dcca9e" #Nordic NUS characteristic for TX
UART_RX_UUID = "6e400003-b5a3-f393-e0a9-e50e24dcca9e" #Nordic NUS characteristic for RX
dataFlag = False
AS = Adq_save()
d1_connected = False
d2_connected = False
#g = graf([],[])
c_address = ""
to1=float()
to2=float()
desfase_adq = 0
def notification_handler(sender, data):
"""Simple notification handler which prints the data received."""
global dataFlag, c_address, AS, g
#print(f"{c_address}: {data}")
AS.decode(data, c_address)
#Thread(target=g.update_graf,args=(AS.dev1['ay'], AS.dev2['ay'],)).start()
dataFlag = True
async def connect_device1(address, loop):
global to1
async with BleakClient(address, loop=loop) as client:
#wait for BLE client to be connected
x = await client.is_connected()
print(f"Connected: {address}")
to1 = time()
#wait for data to be sent from client
await client.start_notify(UART_RX_UUID, notification_handler)
while True :
#give some time to do other tasks
await asyncio.sleep(0.05)
#check if we received data
global dataFlag, c_address
if dataFlag :
dataFlag = False
c_address = address
data = await client.read_gatt_char(UART_RX_UUID)
async def connect_device2(address, loop):
global to1,to2,g
await asyncio.sleep(10)
async with BleakClient(address, loop=loop) as client:
#wait for BLE client to be connected
x = await client.is_connected()
print(f"Connected: {address}")
to2 = time()
#wait for data to be sent from client
await client.start_notify(UART_RX_UUID, notification_handler)
while True :
#give some time to do other tasks
await asyncio.sleep(0.05)
#check if we received data
global dataFlag, c_address
if dataFlag :
dataFlag = False
c_address = address
data = await client.read_gatt_char(UART_RX_UUID)
if __name__ == "__main__":
#this is MAC of our BLE device
"""
Gera1: 08:3A:F2:B7:6C:22
Gera2: C8:F0:9E:9E:72:DA
"""
address1 = (
"08:3A:F2:B7:6C:22"
)
address2 = (
"C8:F0:9E:9E:72:DA"
)
loop = asyncio.get_event_loop()
loop.create_task(connect_device1(address1, loop))
loop.create_task(connect_device2(address2, loop))
try:
loop.run_forever()
except KeyboardInterrupt:
print(f"desfase_adq: {to2-to1}")
loop.stop()
AS.s_device1()
AS.s_device2() | Gwerr1002/ip_22o | IMU/marcha1/readESP32.py | readESP32.py | py | 3,024 | python | en | code | 0 | github-code | 13 |
72337987539 | """Set configuration for the model
"""
import argparse
import multiprocessing
import torch
def str2float(s):
if '/' in s:
s1, s2 = s.split('/')
s = float(s1)/float(s2)
return float(s)
def parser_setting(parser):
"""Set arguments
"""
base_args = parser.add_argument_group('base arguments')
base_args.add_argument(
'--local_rank', type=int, default=-1, metavar='N', help='Local process rank.'
)
base_args.add_argument(
'--save-path', type=str, default='./baselineCNN/best_model',
help='save path for best model'
)
base_args.add_argument(
'--workers', type=int, default=multiprocessing.cpu_count()-1, metavar='N',
help='dataloader threads'
)
base_args.add_argument(
'--padding', type=int, default=4, help='base padding size'
)
base_args.add_argument(
'--img-size', type=int, default=32, help='cropped image size'
)
base_args.add_argument(
'--dataset', type=str, default='cifar10',
choices=['mnist', 'fmnist', 'cifar10', 'cifar100', 'svhn'],
help='Dataset name'
)
base_args.add_argument(
'--model', type=str, default='resnet',
choices=['resnet', 'vgg', 'custom']
)
base_args.add_argument(
"--data-root-path", type=str, default='/media/data/benchmark', help='data path'
)
base_args.add_argument(
"--n_cpu", type=int, default=multiprocessing.cpu_count(),
help="number of cpu threads to use during batch generation"
)
base_args.add_argument(
"--device-ids", type=int, nargs='*', default=[0], help="device id"
)
trn_args = parser.add_argument_group('training hyper params')
trn_args.add_argument(
'--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: auto)'
)
trn_args.add_argument(
'--batch-size', type=int, default=256,
help='input batch size for training (default: auto)'
)
trn_args.add_argument(
'--test-batch-size', type=int, default=256,
help='input batch size for testing (default: auto)'
)
trn_args.add_argument(
'--seed', type=int, default=22 #2256, help='Seed for reproductibility'
)
trn_args.add_argument(
'--resume', action='store_true', default=False, help='if resume or not'
)
trn_args.add_argument(
'--resume-model', type=str, default=None, help='resume model path'
)
opt_args = parser.add_argument_group('optimizer params')
opt_args.add_argument(
'--lr', type=float, default=0.1, metavar='LR', help='learning rate (default: auto)'
)
opt_args.add_argument(
'--b1', type=float, default=0.5, help='momentum (default: 0.9)'
)
opt_args.add_argument(
'--b2', type=float, default=0.99, help='momentum (default: 0.9)'
)
oth_args = parser.add_argument_group('others')
oth_args.add_argument(
"--sample-interval", type=int, default=1000, help="interval between image samples"
)
oth_args.add_argument(
"--dev-interval", type=int, default=500, help="interval between image samples"
)
detector_args = parser.add_argument_group('Detector')
detector_args.add_argument(
'--lid-k', default=30, help='the number of k (k-nearnest) in LID'
)
attack_args = parser.add_argument_group('Attack')
# Deeply Supervised Discriminative Learning for Adversarial Defense (baseline)의
# setting을 최대한 따를 것
attack_args.add_argument(
'--save-adv', action='store_true', default=False, help='if save adversarial examples'
)
attack_args.add_argument(
'--attack-name', type=str, default='FGSM',
choices=['Clean', 'FGSM', 'BIM', 'CW', 'PGD', 'DF', 'FAB', 'SQUARE', 'PGD2', 'AUTO']
)
attack_args.add_argument(
'--target-attack', type=str, default='FGSM',
choices=['Clean', 'FGSM', 'BIM', 'CW', 'PGD', 'DF', 'FAB', 'SQUARE', 'PGD2', 'AUTO']
)
attack_args.add_argument(
'--eps', type=str2float, default='8/255', help="For bound eta"
)
# arguments for PGD
attack_args.add_argument(
'--pgd-iters', type=int, default=10, help="# of iteration for PGD attack"
)
attack_args.add_argument(
'--pgd-alpha', type=float, help="Magnitude of perturbation"
)
attack_args.add_argument(
'--pgd-random-start', action='store_true', default=False,
help="If ture, initialize perturbation using eps"
)
# arguments for C&W
attack_args.add_argument(
'--cw-c', type=str2float, default=1e-4, help="loss scaler"
)
attack_args.add_argument(
'--cw-kappa', type=float, default=0, help="minimum value on clamping"
)
attack_args.add_argument(
'--cw-iters', type=int, default=100, help="# of iteration for CW grdient descent"
)
attack_args.add_argument(
'--cw-lr', type=float, default=0.1, help="learning rate for CW attack"
)
attack_args.add_argument(
'--cw-targeted', action='store_true', default=False, help="d"
)
# arguments for i-FGSM
attack_args.add_argument(
'--bim-step', type=int, default=10, help="Iteration for iterative FGSM"
)
# argument for DeepFool
attack_args.add_argument(
'--df-step', type=int, default=30, help="Iteration for DeepFool"
)
attack_args.add_argument(
'--df-overshoot', type=float, default=0.02, help="parameter for enhancing the noise"
)
# argument for FAB
attack_args.add_argument(
'--fab-step', type=int, default=30, help="Iteration for DeepFool"
)
attack_args.add_argument(
'--fab-n-restarts', type=int, default=3, help="Iteration for DeepFool"
)
return parser
def get_config():
parser = argparse.ArgumentParser(description="PyTorch Defense by distance-based model")
default_parser = parser_setting(parser)
args, _ = default_parser.parse_known_args()
args.device = torch.device(f'cuda:{args.device_ids[0]}' if torch.cuda.is_available else 'cpu')
# number of input classes
# CelebA: Female/Male
# Cifar100: A hundred classes
# The rest: Ten classes
args.num_class = 100 if args.dataset == 'cifar100' else 10
args.fab_num_class = args.num_class
# consider low/high confidence in C&W attack
if args.attack_name.lower() == 'cw':
args.attack_save_path = f"{args.attack_name}_{args.cw_kappa}_{args.cw_lr}"
elif args.attack_name.lower() == 'pgd':
args.attack_save_path = f"{args.attack_name}_{args.eps}"
else:
args.attack_save_path = f"{args.attack_name}"
if args.target_attack.lower() == 'cw':
args.target_attack_save_path = f"{args.target_attack}_{args.cw_kappa}_{args.cw_lr}"
elif args.target_attack.lower() == 'pgd':
args.target_attack_save_path = f"{args.target_attack}_{args.eps}"
else:
args.target_attack_save_path = f"{args.target_attack}"
return args
| lepoeme20/Adversarial-Detection | config.py | config.py | py | 7,062 | python | en | code | 0 | github-code | 13 |
3922552755 | import argparse
import json
from typing import Mapping
from typing import Tuple
import jschon
from ._main import process_json_doc
from ._yaml import create_yaml_processor
from ._yaml import YamlIndent
def _make_parser(*, prog: str, description: str) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
prog=prog,
description=description,
)
parser.add_argument('path', help='path to the JSON / YAML document')
parser.add_argument(
'--schema', required=True, metavar='/path/to/schema.json', help='path to the JSON Schema document'
)
parser.add_argument(
'--dry-run',
'-n',
help='if set, result is not persisted back to the original file',
action='store_true',
)
parser.add_argument('--indent', type=int, default=4, help='indent size')
parser.add_argument(
'--yaml-indent',
type=lambda s: YamlIndent(*map(int, s.split(','))),
metavar='MAPPING,SEQUENCE,OFFSET',
default=YamlIndent(2, 4, 2),
help='YAML indent size',
)
return parser
def _is_yaml_path(path: str) -> bool:
return path.endswith('.yaml') or path.endswith('.yml')
def _load_doc_and_schema(
args: argparse.Namespace,
) -> Tuple[jschon.json.JSONCompatible, Mapping[str, jschon.json.JSONCompatible]]:
with open(args.path) as f:
if _is_yaml_path(args.path):
yaml = create_yaml_processor(indent=args.yaml_indent)
doc_data = yaml.load(f)
else:
doc_data = json.load(f)
with open(args.schema) as f:
schema_data = json.load(f)
return doc_data, schema_data
def _maybe_persist(doc_data: jschon.json.JSONCompatible, args: argparse.Namespace) -> None:
if args.dry_run:
return
if _is_yaml_path(args.path):
with open(args.path, 'w') as f:
yaml = create_yaml_processor(indent=args.yaml_indent)
yaml.dump(doc_data, f)
else:
with open(args.path, 'w') as f:
json.dump(doc_data, f, indent=args.indent)
def sort_main() -> None:
jschon.create_catalog('2020-12')
parser = _make_parser(
prog='jschon-sort',
description="Sorts a JSON or YAML document to match a JSON Schema's order of properties",
)
args = parser.parse_args()
doc_data, schema_data = _load_doc_and_schema(args)
doc_data = process_json_doc(doc_data=doc_data, schema_data=schema_data, sort=True)
_maybe_persist(doc_data, args)
def remove_additional_props_main() -> None:
jschon.create_catalog('2020-12')
parser = _make_parser(
prog='jschon-remove-additional-props',
description="Processes a JSON or YAML document to remove additional properties not defined in the schema",
)
args = parser.parse_args()
doc_data, schema_data = _load_doc_and_schema(args)
doc_data = process_json_doc(doc_data=doc_data, schema_data=schema_data, remove_additional_props=True)
_maybe_persist(doc_data, args)
| ikonst/jschon-sort | jschon_tools/cli.py | cli.py | py | 2,994 | python | en | code | 3 | github-code | 13 |
35909073758 | from datetime import datetime
from uuid import uuid4
from app.domain.entities.enrolment import Enrolment
def test_enrolment_init():
"""
Ensure the enrollment data matches constructor values
and the status is appropriately set.
"""
# dummy values
e = str(uuid4())
k = str(uuid4())
ir = str(uuid4())
c = datetime.now()
enrolment = Enrolment(
enrolment_id=e, internal_reference=ir, shared_secret=k, created=c
)
assert enrolment.enrolment_id == e
assert enrolment.internal_reference == ir
assert enrolment.shared_secret == k
assert enrolment.created == c
| ACWIC/employer-callback | tests/domain/entities/test_enrolment.py | test_enrolment.py | py | 624 | python | en | code | 0 | github-code | 13 |
35401680575 | import unittest
import warnings
try:
# Suppress warning from inside tensorflow
warnings.filterwarnings("ignore", message="module 'sre_constants' is deprecated")
import tensorflow as tf
tf.random.set_seed(1234)
except ImportError:
tf = None
from matrepr import to_html, to_latex, to_str
def generate_fixed_value(m, n):
row_factor = 10**(1+len(str(n)))
data = []
for r in range(m):
data.append([1] * n)
for c in range(n):
data[r][c] = (r+1)*row_factor + c
return tf.constant(data, dtype=tf.int64), data
@unittest.skipIf(tf is None, "TensorFlow not installed")
class TensorFlowTests(unittest.TestCase):
def setUp(self):
rand1d = tf.random.uniform(shape=(50,)).numpy()
rand1d[rand1d < 0.6] = 0
self.rand1d = tf.convert_to_tensor(rand1d)
rand2d = tf.random.uniform(shape=(50, 30)).numpy()
rand2d[rand2d < 0.6] = 0
self.rand2d = tf.convert_to_tensor(rand2d)
rand3d = tf.random.uniform(shape=(50, 30, 10)).numpy()
rand3d[rand3d < 0.6] = 0
self.rand3d = tf.convert_to_tensor(rand3d)
self.tensors = [
(True, tf.constant(5)),
(False, tf.constant([])),
(False, tf.constant([1, 2, 3, 4])),
(False, tf.constant([[1, 2], [1003, 1004]])),
(False, tf.sparse.from_dense(tf.constant([[1, 2], [1003, 1004]]))),
(False, self.rand1d),
(False, tf.sparse.from_dense(self.rand1d)),
(False, self.rand2d),
(False, tf.sparse.from_dense(self.rand2d)),
(True, self.rand3d),
(False, tf.sparse.from_dense(self.rand3d)),
(False, tf.sparse.SparseTensor(indices=[[0, 3], [2, 4]], values=[10, 20], dense_shape=[3, 10])),
]
def test_no_crash(self):
for fallback_ok, tensor in self.tensors:
res = to_str(tensor, title=True)
self.assertGreater(len(res), 5)
res = to_html(tensor, title=True)
self.assertGreater(len(res), 5)
if not fallback_ok:
self.assertNotIn("<pre>", res)
res = to_latex(tensor, title=True)
self.assertGreater(len(res), 5)
def test_contents_2d(self):
source_tensor, data = generate_fixed_value(8, 8)
for to_sparse in (False, True):
tensor = tf.sparse.from_dense(source_tensor) if to_sparse else source_tensor
res = to_html(tensor, notebook=False, max_rows=20, max_cols=20, title=True, indices=True)
for row in data:
for value in row:
self.assertIn(f"<td>{value}</td>", res)
trunc = to_html(tensor, notebook=False, max_rows=5, max_cols=5, title=True, indices=True)
for value in (data[0][0], data[0][-1], data[-1][0], data[-1][-1]):
self.assertIn(f"<td>{value}</td>", trunc)
if __name__ == '__main__':
unittest.main()
| alugowski/matrepr | tests/test_tensorflow.py | test_tensorflow.py | py | 2,963 | python | en | code | 3 | github-code | 13 |
21374406472 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 17 12:42:46 2018
@author: Keerthi
"""
import numpy as np
import cv2
import time
img = cv2.imread('lena_gray.jpg', 0)
img_arr= np.asarray(img)
gx1 = np.random.rand(101,1)
gx2 = np.random.rand(1,101)
K = gx1.shape[0]
L = gx2.shape[1]
N = img_arr.shape[0]
M = img_arr.shape[1]
store_gx1 = np.zeros((N,M))
store_gx2 = np.zeros((N,M))
start = time.clock()
img_padx1 = np.pad(img_arr, 50, 'constant')
for i in range(1,N+1):
for j in range(1,M+1):
gx1_pixel = np.sum(np.multiply(gx1,img_padx1[i-1:i-1+K , j+49:j+50]))
store_gx1[i-1, j-1] = gx1_pixel
img_padx2 = np.pad(store_gx1, 50, 'constant')
for i in range(1,N+1):
for j in range(1,M+1):
gx2_pixel = np.sum(np.multiply(gx2,img_padx2[i+49:i+50 , j-1:j-1+L]))
store_gx2[i-1, j-1] = gx2_pixel
end = time.clock() - start
print("Time taken for 1D convolution: ", end)
cv2.imshow('g', store_gx2)
cv2.waitKey(0)
cv2.destroyAllWindows() | keerthana-kannan/UB-Projects | computer vision/PA1/1d_101.py | 1d_101.py | py | 1,049 | python | en | code | 0 | github-code | 13 |
17113938934 | import logging
import gzip
import re
import time
import requests
from dotenv import load_dotenv
from os import environ, makedirs, path, remove
import shutil
from agr_literature_service.lit_processing.utils.sqlalchemy_utils import create_postgres_session
from agr_literature_service.lit_processing.data_ingest.pubmed_ingest.pubmed_update_resources_nlm import \
update_resource_pubmed_nlm
from agr_literature_service.lit_processing.data_ingest.utils.file_processing_utils import \
download_file
from agr_literature_service.lit_processing.data_ingest.pubmed_ingest.pubmed_update_references_single_mod \
import update_data
from agr_literature_service.lit_processing.utils.db_read_utils import sort_pmids, \
retrieve_all_pmids, get_mod_abbreviations
from agr_literature_service.lit_processing.utils.tmp_files_utils import init_tmp_dir
logging.basicConfig(format='%(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
updatefileRootURL = "https://ftp.ncbi.nlm.nih.gov/pubmed/updatefiles/"
sleep_time = 30
default_days = 8
init_tmp_dir()
def update_all_data(): # pragma: no cover
## take 18 sec
logger.info("Updating resource:")
try:
update_resource_pubmed_nlm()
except Exception as e:
logger.info("Error occurred when updating resource info.\n" + str(e))
return
db_session = create_postgres_session(False)
## take 8 sec
logger.info("Retrieving all pmids:")
try:
pmids_all = retrieve_all_pmids(db_session)
except Exception as e:
logger.info("Error occurred when retrieving pmid list from database.\n" + str(e))
db_session.close()
return
logger.info("Retrieving pmids from PubMed daily update file:")
(updated_pmids_for_mod, deleted_pmids_for_mod) = download_and_parse_daily_update(db_session,
set(pmids_all))
db_session.close()
resourceUpdated = 1
for mod in [*get_mod_abbreviations(), 'NONE']:
if mod == 'NONE':
logger.info("Updating pubmed papers that are not associated with a mod:")
else:
logger.info("Updating pubmed papers for " + mod + ":")
pmids = updated_pmids_for_mod.get(mod, set())
try:
update_data(mod, '|'.join(list(pmids)), resourceUpdated)
except Exception as e:
logger.info("Error occurred when updating pubmed papers for " + mod + "\n" + str(e))
time.sleep(sleep_time)
def download_and_parse_daily_update(db_session, pmids_all): # pragma: no cover
load_dotenv()
base_path = environ.get('XML_PATH', "")
xml_path = base_path + "pubmed_xml/"
json_path = base_path + "pubmed_json/"
try:
if path.exists(xml_path):
shutil.rmtree(xml_path)
if path.exists(json_path):
shutil.rmtree(json_path)
except OSError as e:
logger.info("Error deleting old xml/json: %s" % (e.strerror))
makedirs(xml_path)
makedirs(json_path)
updated_pmids_for_mod = {}
deleted_pmids = []
dailyfileNames = get_daily_update_files()
for dailyfileName in dailyfileNames:
updated_pmids = []
dailyFileUrl = updatefileRootURL + dailyfileName
dailyFile = base_path + dailyfileName
download_file(dailyFileUrl, dailyFile)
with gzip.open(dailyFile, 'rb') as f_in:
decompressed_content = f_in.read()
records = decompressed_content.decode('utf-8').split("</PubmedArticle>")
endOfcontent = records.pop()
deleteRecords = []
if "<DeleteCitation>" in endOfcontent:
deleteRecords = endOfcontent.split("<DeleteCitation>")[1].split("</deleteRecords>")[0].split('\n')
header = None
for record in records:
if header is None:
header_lines = record.split("<PubmedArticleSet>")
header = header_lines[0].replace('\n', '')
record = header_lines[1]
lines = record.split('\n')
for line in lines:
if '<PMID Version="1">' in line:
pmid = line.split('>')[1].split('<')[0]
if pmid in pmids_all:
updated_pmids.append(pmid)
logger.info(f"generating xml file for PMID:{pmid}")
record = re.sub(r'\s*\n\s*', '', record)
record = record.strip()
with open(xml_path + pmid + ".xml", "w") as f_out:
f_out.write(header + "<PubmedArticleSet>" + record + "</PubmedArticle></PubmedArticleSet>\n")
for record in deleteRecords:
if record.startswith('<PMID Version'):
pmid = record.split('>')[1].split('<')[0]
if pmid in pmids_all and pmid not in deleted_pmids:
deleted_pmids.append(pmid)
logger.info(f"{dailyfileName}: {len(updated_pmids)} PMIDs")
if len(updated_pmids) > 0:
sort_pmids(db_session, updated_pmids, updated_pmids_for_mod)
remove(dailyFile)
logger.info(f"deleted PMIDs: {len(deleted_pmids)}")
deleted_pmids_for_mod = {}
if len(deleted_pmids) > 0:
sort_pmids(db_session, deleted_pmids, deleted_pmids_for_mod)
for mod in updated_pmids_for_mod:
print(mod, len(updated_pmids_for_mod[mod]))
return (updated_pmids_for_mod, deleted_pmids_for_mod)
def get_daily_update_files(days=None):
"""
some examples of pubmed daily update files:
pubmed23n1424.xml.gz 2023-07-27
pubmed23n1425.xml.gz 2023-07-28
pubmed23n1426.xml.gz 2023-07-29
pubmed23n1427.xml.gz 2023-07-30
pubmed23n1428.xml.gz 2023-07-31
pubmed23n1429.xml.gz 2023-08-01
pubmed23n1430.xml.gz 2023-08-02
pubmed23n1431.xml.gz 2023-08-03
pubmed23n1432.xml.gz 2023-08-04
pubmed23n1433.xml.gz 2023-08-05
pubmed23n1434.xml.gz 2023-08-06
pubmed23n1435.xml.gz 2023-08-07
pubmed23n1436.xml.gz 2023-08-08
"""
if days is None:
days = default_days
response = requests.request("GET", updatefileRootURL)
files = response.text.split("<a href=")
dailyFiles = []
files.pop()
while len(files) > 0:
file = files.pop()
if len(dailyFiles) > days:
break
if ".html" not in file and ".gz.md5" not in file and ".xml.gz" in file:
dailyFiles.append(file.split(">")[0].replace('"', ''))
return dailyFiles
if __name__ == "__main__":
update_all_data()
| alliance-genome/agr_literature_service | agr_literature_service/lit_processing/data_ingest/pubmed_ingest/pubmed_update_references_all_mods.py | pubmed_update_references_all_mods.py | py | 6,731 | python | en | code | 1 | github-code | 13 |
36929261535 | from utils import *
from loader import *
from config import *
from segmentation_models_pytorch import UnetPlusPlus
import torch
import numpy as np
import torch.optim as optim
from tqdm import tqdm
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data.sampler import SubsetRandomSampler
import warnings
warnings.filterwarnings("ignore")
def main():
Model = UnetPlusPlus(
encoder_name="efficientnet-b1",
encoder_depth=5,
encoder_weights=None,
in_channels=3,
classes=35,
activation=None,
)
Model.cuda()
train_dataset = GTAVDataset(mode="train", dataset_path=args.dataset_path)
indices = list(range(len(train_dataset)))
np.random.shuffle(indices)
split = int(np.floor(args.validation_split * len(train_dataset)))
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = DataLoader(
train_dataset, batch_size=args.batch_size, num_workers=2, sampler=train_sampler
)
val_loader = DataLoader(
train_dataset, batch_size=args.batch_size, num_workers=2, sampler=valid_sampler
)
if args.optimizer == "adam":
optimizer = optim.Adam(Model.parameters(), lr=args.learning_rate)
scheduler = ReduceLROnPlateau(
optimizer=optimizer, mode="min", patience=10, verbose=True, factor=0.5
)
elif args.optimizer == "sgd":
optimizer = optim.SGD(Model.parameters(), lr=args.learning_rate, momentum=0.9)
criterion = nn.CrossEntropyLoss()
criterion.cuda()
best_Model = 1.0
tb = SummaryWriter("logs")
code2id, id2code, name2id, id2name = one_hot_encoded()
if not os.path.isdir(args.ModelSavePath):
os.makedirs(args.ModelSavePath)
print("Start Training")
for epoch in range(args.epochs):
for i, data in enumerate(train_loader):
optimizer.zero_grad()
inputs, labels = data
labels = [
rgb_to_onehot(labels[X, :, :, :], id2code)
for X in range(labels.shape[0])
]
labels = torch.from_numpy(np.asarray(labels))
true_labels = labels.cuda()
inputs, labels = Variable(inputs.cuda()), Variable(
torch.argmax(labels, -1).cuda()
)
outputs = Model(inputs)
preds = torch.softmax(outputs, dim=1)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print(
"Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f"
% (epoch + 1, args.epochs, i + 1, len(train_loader), loss.item())
)
tb.add_scalar(
"train_loss_iter", loss.item(), epoch * len(train_loader) + i
)
tb.add_figure(
"predictions vs. actuals",
plot_net_predictions(
inputs, true_labels, preds, args.batch_size, id2code
),
global_step=i + len(train_loader) * epoch,
)
with torch.no_grad():
Model.eval()
val_loss = 0.0
val_dice = 0.0
val_iou = 0.0
for i, data in enumerate(tqdm(val_loader)):
inputs, labels = data
labels = [
rgb_to_onehot(labels[X, :, :, :], id2code)
for X in range(labels.shape[0])
]
labels = torch.from_numpy(np.asarray(labels))
inputs = Variable(inputs.cuda())
outputs = Model(inputs)
preds = torch.softmax(outputs, dim=1)
val_loss += criterion(preds, labels).item()
val_dice += dice_score(preds, labels).item()
val_iou += iou_score(preds, labels).item()
val_loss /= len(val_loader)
val_dice /= len(val_loader)
val_iou /= len(val_loader)
print(
"Validation Dice: %.4f, Validation Loss: %.4f, Validation IoU: %.4f"
% (val_dice, val_loss, val_iou)
)
tb.add_scalar("Validation Dice", val_dice, epoch)
tb.add_scalar("Validation Loss", val_loss, epoch)
tb.add_scalar("Validation IoU", val_iou, epoch)
Model.train()
if val_loss < best_Model:
best_Model = val_loss
torch.save(Model.state_dict(), args.ModelSavePath + "best_Model.pkl")
print("Saving best Model")
scheduler.step(val_loss)
if __name__ == "__main__":
main()
| Rituraj-commits/Semantic-Segmentation | train.py | train.py | py | 4,951 | python | en | code | 7 | github-code | 13 |
20524269740 | import hashlib
input = "qzyelonm"
input = "abc"
index = 0
found = 0
def hash(str2hash):
result = hashlib.md5(str2hash.encode())
return result.hexdigest().lower()
def next1000(r):
for j in range(1000):
result2 = hash(input + str(index + 1 + j))
for k in range(len(result2)-4):
if r == result2[k] and r == result2[k+1] and r == result2[k+2] and r == result2[k+3] and r == result2[k+4]:
print(index, found)
# print(result2)
return True
return False
def check(result):
global found
for i in range(len(result)-2):
r = result[i]
if r == result[i+1] and r == result[i+2]:
if next1000(r):
# print(result)
found += 1
return
# while found < 64:
while found < 68:
result = hash(input + str(index))
check(result)
index += 1
print(index-1)
# part 1: 15168 but in wrong place index 66 and example at index 67
| Lesley55/AdventOfCode | 2016/14/part1.py | part1.py | py | 994 | python | en | code | 1 | github-code | 13 |
39248832582 | def build_response_dictionary(response):
"""
Builds a dictionary with the following format,
{'question_id':'answer'}
"""
question_ids = response.poll_response_questions.split(",")
question_answers = response.poll_response_answers.split(",")
response = {}
for i in range(0, len(question_ids)):
response[question_ids[i]] = question_answers[i]
return response
def build_questionid_list(questions):
ids = []
for question in questions:
ids.append(question.question_id)
return ids
def get_question_type(questions, question_id):
""" extract a questions type from a list of questions (used to prevent tons of database calls)"""
for question in questions:
if question.question_id == question_id:
return question.question_type
def get_question_from_list(questions, q_id):
for question in questions:
if question.question_id == q_id:
return question
| porowns/geopoll | utils/poll.py | poll.py | py | 956 | python | en | code | 0 | github-code | 13 |
11598986930 | #!/usr/bin/env python3
# vim: ts=4 sw=4 et:
'''
cli implementations of many mutagen metadata functions,
created for several compressed audio formats, with the
intention of mainly being used to tag recordings of live
concerts, & convert filepaths to tags or tags to filenames
'''
import re
import sys
import os
from optparse import OptionParser as OP
from mutagen import File
from mutagen.mp3 import EasyMP3 as MP3
from libspatter.taggr import Subster
from libspatter import __version__
class Confirmer():
''' user confirmation agent '''
def __init__(self, optt):
self.opt = optt
if any([self.opt.tag, self.opt.add, self.opt.remove, self.opt.clear,
self.opt.pattern, self.opt.tag2fn]):
self.use = True
else:
self.use = False
self._all = False
def confirm(self):
''' honor certain options regarding actual file-writing,
and input of y or n to proceed with file-writing. '''
if self.use:
if self.opt.noact:
return False
if self._all:
return True
if not self.opt.confirm:
return True
resp = raw_input('confirm changes? (y/a/[n])')
if resp[0].lower() == 'y':
return True
if resp[0].lower() == 'a':
self._all = True
return True
return False
class Speaker():
''' wrapper to simplify printing user messages '''
def __init__(self, quiet):
self.x = 0
self.quiet = quiet
def speak(self, strng):
''' print formatted messages '''
if not self.quiet:
if strng[:2] == '\n\t':
print(' ' *48, strng[2:],)
self.x += len(strng)
elif strng[:1] == '\t':
strng += ' '*120
strng = strng[self.x:]
print(strng[1:],)
else:
print(strng,)
def main():
args = sys.argv
err = 0
if 'id3help' in args:
from mutagen.easyid3 import EasyID3
for key in EasyID3.valid_keys.keys():
print(key,)
from optparse import OptionParser as OP
OP = OP()
OP.usage = ("%prog [options] filenames")
OP.epilog = '%s id3help: for help with id3 tags' % os.path.basename(args[0])
OP.add_option('-t', '--tag', dest='tag', action='append',
help="set a tag", metavar='tag=value')
OP.add_option('-a', '--add', dest='add', action='append',
help='set/add values to a tag, without removing any existing values',
metavar='tag=value')
OP.add_option('-p', '--pattern', dest='pattern', action='store',
help='substitution pattern from filename', metavar="'%n %t.flac'")
OP.add_option('--fn2tag', dest='pattern', action='store',
help='same as -p | --pattern')
OP.add_option('-r', '--remove', dest='remove', action='append',
help='remove a tag value or entire tag', metavar="'tag' or 'tag=value'")
OP.add_option('-j', '--justify', dest='justify', action='store_true',
help='zero-justify tracknumbers')
OP.add_option('--clear', dest='clear', action='store_true', help='clear all tags')
OP.add_option('-n', '--noact', dest='noact', action='store_true',
help="just show what changes would be made")
OP.add_option('-c', '--confirm', dest='confirm', action='store_true',
help='show changes and prompt for confirmation to save')
OP.add_option('-f', '--files', dest='filenames', action='append',
help='one or more filenames/globs')
OP.add_option('-q', '--quiet', dest='quiet', action='store_true', help='no output to stdout')
OP.add_option('--tag2fn', dest='tag2fn', action='store',
help='substitution pattern from tags', metavar="'%n %t.flac'")
OP.add_option('-s', '--filter', dest='symbols', action='store',
help='one or more characters to filter from tags used to build filenames',
metavar="'!@$&*/\?'")
OP.add_option('-m', '--map', dest='map', action='store',
help='replace all instances of a char with another char\nin conjunction with --tag2fn',
metavar="/ -")
OP.add_option('-i', '--index', dest='idx', action='store_true',
help='index files by filename order (persistent file order)')
OP.add_option('-v', '--version', dest='vers', action='store_true', help='show version')
argstr = ' '.join(args)
if len(args) < 2:
OP.print_usage()
# print("version %s" % __version__)
print('-h|--help for help')
sys.exit(1)
p = '(-t|--tag|-a|--add|-p|--pattern|-r|--remove|-f|--files)\ +?\-[^\ ]*'
mo = re.search(p, argstr)
if mo:
print('illegal option combination: ', mo.group())
sys.exit(1)
(opt, fnames) = OP.parse_args()
if opt.vers:
print ('%s %s' % (OP.get_prog_name(), __version__))
if opt.filenames:
fnames += opt.filenames
for fname in fnames:
if not os.path.exists(fname):
print('%s: no such file' % fname)
err += 1
if err:
sys.exit(err)
cfmr = Confirmer(opt)
fnum = 0
idx = 0
if opt.pattern:
subster = Subster(opt.pattern)
elif opt.tag2fn:
subster = Subster(opt.tag2fn, 'tag2fn')
else:
subster = Subster('', '')
modded = any([opt.clear, opt.remove, opt.add, opt.tag, opt.pattern, opt.justify])
spkr = Speaker(opt.quiet)
top_length = 0
for fname in fnames:
bfname = os.path.basename(fname)
top_length = len(bfname) if len(bfname) > top_length else top_length
for fname in fnames:
fnum += 1
vals = {}
keys = []
origfn = fname
if os.path.splitext(fname)[1] == '.mp3':
try:
mf = MP3(fname)
except IOError:
spkr.speak("\ncan't open %s" % fname)
continue
spkr.speak("processing %s" % fname)
if opt.clear:
mf.clear()
for action in opt.remove or []:
k, v = (action.split('=', 1)+[''])[:2]
vals[k] = mf.pop(k, [])
if k and not v:
vals[k] = []
elif v and v in vals[k]:
vals[k].remove(v)
for action in opt.tag or []:
k, v = (action.split('=', 1)+[''])[:2]
vals[k] = [v]
for action in opt.add or []:
k, v = (action.split('=', 1)+[''])[:2]
if vals.get(k, []):
vals[k] += mf.pop(k, [])
else:
vals[k] = mf.pop(k, [])
vals[k].extend([v])
if subster.pattern:
d = subster.getdict(fname)
for k in d:
values = d.get(k, [])
if not isinstance(values, list):
values = [values]
try:
vals[k].extend(values)
except KeyError:
vals[k] = values
if opt.justify:
if not vals.get('tracknumber'):
vals['tracknumber'] = fnum
width = len(str(len(fnames)))
n = width - len(str(vals['tracknumber']))
vals['tracknumber'] = [n*'0' + str(vals['tracknumber'])]
if not modded:
if not opt.quiet:
print(mf.pprint())
continue
if opt.noact or opt.confirm:
for k in vals:
print(k+'='+str(vals[k]))
if opt.noact:
continue
if opt.confirm and not cfmr.confirm():
continue
for k in vals:
try:
mf.update({k:vals[k]})
# mf.save( )
except ValueError:
pass
mf.save()
else:
try:
# print(fname)
mf = File(fname)
except IOError:
spkr.speak("can't open %s" % fname)
continue
spkr.speak(os.path.basename(fname))
if opt.idx:
trn = mf.get('tracknumber', None)
mf['idx'] = unicode(fnum)
if trn:
mf['idx'] += trn
mf.save()
print(' indexed')
if opt.clear:
mf.clear()
spkr.speak('\n\ttags cleared..')
for action in opt.remove or []:
k, v = (action.split('=', 1)+[''])[:2]
t = mf.pop(k, [])
if v and v in t:
t.remove(v)
spkr.speak(str(k) + ' removes ' + str(v))
if v and t:
mf.update({k:t})
for action in opt.tag or []:
if '=' in action:
k, v = action.split('=', 1)
if k and v:
mf.update({k:[v]})
spkr.speak('\t\ttag set: ' + k + '=' + v)
for action in opt.add or []:
if '=' in action:
k, v = action.split('=', 1)
mf.update({k:mf.get(k, [])+[v]})
spkr.speak('\n\ttag appended: ' + k + '=' + v)
if subster.mode == 'fn2tag':
d = subster.getdict(fname)
for k in d:
mf.update({k:d[k]})
spkr.speak('\n\tfrom filename: ' + k + '=' + d[k])
if subster.mode == 'tag2fn':
fname = ''
fnlist = subster.getfnlist()
if 'tracknumber' in fnlist:
tn = 1
else:
tn = 0
lit = True
for item in fnlist:
lit = not lit
if lit:
if not tn and item == 'tracknumber':
item = 'track'
if tn and item == 'track':
item = 'tracknumber'
if item.startswith('track') and opt.justify:
subst = mf[item][0].rjust(2, '0')
else:
subst = mf[item][0]
if opt.symbols:
pat = '['+opt.symbols+']'
subst = re.sub(pat, '', subst)
subst = subst.strip()
fname += subst
else:
fname += item
if '/' in fname:
fname = re.sub('/', '-', fname)
# if opt.map:
# fname = map(fname,opt.map)
if opt.noact or opt.confirm:
pass
if not any([modded, opt.tag2fn, opt.quiet]):
print(mf.pprint(),)
if cfmr.confirm():
if opt.tag2fn:
if opt.map:
a, b = opt.map.split()
fname = re.sub(a, b, fname)
pth = os.path.join(os.path.dirname(origfn), fname)
second_column = top_length+2
tab = (second_column-len(os.path.basename(origfn)))*' '
try:
os.rename(origfn, pth)
print(tab + '--> ' + fname),
# spkr.speak( 'renamed... ' + fname )
except IOError:
raise IOError
else:
mf.save()
spkr.speak('\tsaved!')
| balinbob/spatter | libspatter/spatter.py | spatter.py | py | 11,979 | python | en | code | 0 | github-code | 13 |
38586812823 | # GetAppStats
#
import requests
import os
import datetime, time
import mysql.connector as mysql
from biokbase.catalog.Client import Catalog
from biokbase.narrative_method_store.client import NarrativeMethodStore
requests.packages.urllib3.disable_warnings()
"""
THIS IS A SCRIPT MADE TO BACKFILL THE QUEUE TIMES FOR THE APP STATS RETRIEVED FROM APP CATALOG.
THIS PROBABLY ONLY NEEDED TO BE RUN THE ONE TIME, AND WILL NOT BE RELEVANT ONCE WE SWITCH OVER TO EE2.
"""
catalog = Catalog(url=os.environ["CATALOG_URL"], token=os.environ["METRICS_USER_TOKEN"])
nms = NarrativeMethodStore(url=os.environ["NARRATIVE_METHOD_STORE"])
sql_host = os.environ["SQL_HOST"]
query_on = os.environ["QUERY_ON"]
# Insures all finish times within last day.
yesterday = datetime.date.today() - datetime.timedelta(days=1)
def get_user_app_stats(
start_date=datetime.datetime.combine(yesterday, datetime.datetime.min.time()),
end_date=datetime.datetime.combine(yesterday, datetime.datetime.max.time()),
):
"""
Gets a data dump from the app cataloge for a certain date window.
If no statt and end date are entered it will default to the last 15 calendar days (UTC TIME).
It is 15 days because it uses an underlying method that
filters by creation_time and not finish_time
"""
# From str to datetime, defaults to zero time.
if type(start_date) == str:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
# Due to issue with method filtering only by creation_time need to grab
# all 14 days before begin date to insure getting all records with a possible
# finish_time within the time window specified. (14 days, 24 hours, 60 mins, 60 secs)
begin = int(start_date.strftime("%s")) - (14 * 24 * 60 * 60)
end = int(end_date.strftime("%s"))
# print("BEGIN: " + str(begin))
# print("END: " + str(end))
# time_interval = {'begin': begin , 'end': end}
time_interval = {"begin": 0, "end": end}
stats = catalog.get_exec_raw_stats(time_interval)
return stats
def helper_concatenation(var_pre, var_post):
""" Simple helper method for concatenationg fields (Module and app/func name) """
return_val = None
if var_pre is None:
var_pre = "Not Specified"
if var_post is None:
var_post = "Not Specified"
if var_pre != "Not Specified" or var_post != "Not Specified":
return_val = var_pre + "/" + var_post
return return_val
def backfill_queue_times_in_app_stats(start_date=None, end_date=None):
"""
Uploads the catalog app records into the MySQL back end.
Uses the other functions
"""
if start_date is not None or end_date is not None:
if start_date is not None and end_date is not None:
app_usage_list = get_user_app_stats(start_date, end_date)
else:
raise ValueError("If start_date or end_date is set, then both must be set.")
else:
app_usage_list = get_user_app_stats()
# print("RECORD: " + str(app_usage_list[0]))
# return 1;
metrics_mysql_password = os.environ["METRICS_MYSQL_PWD"]
# connect to mysql
db_connection = mysql.connect(
host=sql_host, user="metrics", passwd=metrics_mysql_password, database="metrics"
)
cursor = db_connection.cursor()
query = "use " + query_on
cursor.execute(query)
wo_job_prep_cursor = db_connection.cursor(prepared=True)
update_queue_times_without_job_id = (
"update metrics.user_app_usage "
"set queue_time = %s "
"where job_id is null and username = %s "
"and app_name = %s "
"and start_date = FROM_UNIXTIME(%s) "
"and finish_date - FROM_UNIXTIME(%s) "
"and run_time = %s and is_error = %s "
"and git_commit_hash = %s and func_name = %s ;"
)
with_job_prep_cursor = db_connection.cursor(prepared=True)
update_queue_times_with_job_id = (
"update metrics.user_app_usage set queue_time = %s where job_id = %s ;"
)
num_wo_job_id_updates = 0
num_with_job_id_updates = 0
# update each record.
for record in app_usage_list:
is_error = False
if record["is_error"] == 1:
is_error = True
if "job_id" not in record:
input = [
round((record["exec_start_time"] - record["creation_time"])),
record["user_id"],
helper_concatenation(record["app_module_name"], record["app_id"]),
round(record["exec_start_time"]),
round(record["finish_time"]),
round((record["finish_time"] - record["exec_start_time"])),
is_error,
record["git_commit_hash"],
helper_concatenation(record["func_module_name"], record["func_name"]),
]
# DO update
wo_job_prep_cursor.execute(update_queue_times_without_job_id, input)
num_wo_job_id_updates += 1
else:
input = [
round((record["exec_start_time"] - record["creation_time"])),
record.get("job_id"),
]
with_job_prep_cursor.execute(update_queue_times_with_job_id, input)
num_with_job_id_updates += 1
db_connection.commit()
print("TOTAL WO JOB UPDATES : " + str(num_wo_job_id_updates))
print("TOTAL WITH JOB UPDATES : " + str(num_with_job_id_updates))
return 1
backfill_queue_times_in_app_stats()
| kbase/metrics | source/custom_scripts/backfill_app_stats_queue_times.py | backfill_app_stats_queue_times.py | py | 5,518 | python | en | code | 1 | github-code | 13 |
73607397456 | from typing import Iterable, Optional, TypeVar
import torch
from torcheval.metrics.functional.classification.precision import (
_binary_precision_update,
_precision_compute,
_precision_param_check,
_precision_update,
)
from torcheval.metrics.metric import Metric
TPrecision = TypeVar("TPrecision")
TBinaryPrecision = TypeVar("TBinaryPrecision")
class MulticlassPrecision(Metric[torch.Tensor]):
"""
Compute the precision score, the ratio of the true positives and the sum of
true positives and false positives.
Its functional version is :func:`torcheval.metrics.functional.multiclass_precision`.
We cast NaNs to 0 in case some classes have zero instances in the predictions.
See also :class:`BinaryPrecision <BinaryPrecision>`
Args:
num_classes (int):
Number of classes.
average (str):
- ``"micro"`` (default): Calculate the metrics globally.
- ``"macro"``: Calculate metrics for each class separately, and return their unweighted mean.
Classes with 0 true and predicted instances are ignored.
- ``"weighted"``: Calculate metrics for each class separately, and return their weighted sum.
Weights are defined as the proportion of occurrences of each class in "target".
Classes with 0 true and predicted instances are ignored.
- ``None``: Calculate the metric for each class separately, and return
the metric for every class.
Examples::
>>> import torch
>>> from torcheval.metrics import MulticlassPrecision
>>> metric = MulticlassPrecision(num_classes=4)
>>> input = torch.tensor([0, 2, 1, 3])
>>> target = torch.tensor([0, 1, 2, 3])
>>> metric.update(input, target)
>>> metric.compute()
tensor(0.5000)
>>> metric = MulticlassPrecision(average=None, num_classes=4)
>>> input = torch.tensor([0, 2, 1, 3])
>>> target = torch.tensor([0, 1, 2, 3])
>>> metric.update(input, target)
>>> metric.compute()
tensor([1., 0., 0., 1.])
>>> metric = MulticlassPrecision(average="macro", num_classes=2)
>>> input = torch.tensor([0, 0, 1, 1, 1])
>>> target = torch.tensor([0, 0, 0, 0, 1])
>>> metric.update(input, target)
>>> metric.compute()
tensor(0.5833)
>>> metric = MulticlassPrecision(num_classes=4)
>>> input = torch.tensor([[0.9, 0.1, 0, 0], [0.1, 0.2, 0.4, 0.3], [0, 1.0, 0, 0], [0, 0, 0.2, 0.8]])
>>> target = torch.tensor([0, 1, 2, 3])
>>> metric.update(input, target)
>>> metric.compute()
tensor(0.5)
"""
def __init__(
self: TPrecision,
*,
num_classes: Optional[int] = None,
average: Optional[str] = "micro",
device: Optional[torch.device] = None,
) -> None:
super().__init__(device=device)
_precision_param_check(num_classes, average)
self.num_classes = num_classes
self.average = average
if average == "micro":
self._add_state("num_tp", torch.tensor(0.0, device=self.device))
self._add_state("num_fp", torch.tensor(0.0, device=self.device))
self._add_state("num_label", torch.tensor(0.0, device=self.device))
else:
# num_classes has been verified as a positive integer. Add this line to bypass pyre.
assert isinstance(
num_classes, int
), f"num_classes must be a integer, but got {num_classes}"
self._add_state(
"num_tp",
torch.zeros(num_classes, device=self.device),
)
self._add_state(
"num_fp",
torch.zeros(num_classes, device=self.device),
)
self._add_state(
"num_label",
torch.zeros(num_classes, device=self.device),
)
@torch.inference_mode()
# pyre-ignore[14]: inconsistent override on *_:Any, **__:Any
def update(
self: TPrecision, input: torch.Tensor, target: torch.Tensor
) -> TPrecision:
"""
Update states with the ground truth labels and predictions.
Args:
input (Tensor): Tensor of label predictions.
It could be the predicted labels, with shape of (n_sample, ).
It could also be probabilities or logits with shape of (n_sample, n_class).
``torch.argmax`` will be used to convert input into predicted labels.
target (Tensor): Tensor of ground truth labels with shape of (n_sample, ).
"""
input = input.to(self.device)
target = target.to(self.device)
num_tp, num_fp, num_label = _precision_update(
input, target, self.num_classes, self.average
)
self.num_tp += num_tp
self.num_fp += num_fp
self.num_label += num_label
return self
@torch.inference_mode()
def compute(self: TPrecision) -> torch.Tensor:
"""
Return the precision score.
0 is returned if no calls to ``update()`` are made before ``compute()`` is called.
"""
return _precision_compute(
self.num_tp, self.num_fp, self.num_label, self.average
)
@torch.inference_mode()
def merge_state(self: TPrecision, metrics: Iterable[TPrecision]) -> TPrecision:
for metric in metrics:
self.num_tp += metric.num_tp.to(self.device)
self.num_fp += metric.num_fp.to(self.device)
self.num_label += metric.num_label.to(self.device)
return self
class BinaryPrecision(MulticlassPrecision):
"""
Compute the precision score for binary classification tasks, which is calculated
as the ratio of the true positives and the sum of true positives and false positives.
Its functional version is :func:`torcheval.metrics.functional.binary_precision`.
We cast NaNs to 0 when classes have zero positive instances in prediction labels
(when TP + FP = 0).
See also :class:`MulticlassPrecision <MulticlassPrecision>`
Args:
threshold (float, default = 0.5): Threshold for converting input into predicted labels for each sample.
``torch.where(input < threshold, 0, 1)`` will be applied to the ``input``.
Examples::
>>> import torch
>>> from torcheval.metrics import BinaryPrecision
>>> metric = BinaryPrecision()
>>> input = torch.tensor([0, 1, 0, 1])
>>> target = torch.tensor([1, 0, 1, 1])
>>> metric.update(input, target)
>>> metric.compute()
tensor(0.5) # 1 / 2
>>> metric = BinaryPrecision(threshold=0.7)
>>> input = torch.tensor([0, 0.9, 0.6, 0.7])
>>> target = torch.tensor([1, 0, 1, 1])
>>> metric.update(input, target)
>>> metric.compute()
tensor(0.5) # 1 / 2
"""
def __init__(
self: TBinaryPrecision,
*,
threshold: float = 0.5,
device: Optional[torch.device] = None,
) -> None:
super().__init__(num_classes=2, device=device)
self.threshold = threshold
@torch.inference_mode()
def update(
self: TBinaryPrecision,
input: torch.Tensor,
target: torch.Tensor,
) -> TBinaryPrecision:
"""
Update states with the ground truth labels and predictions.
input (Tensor): Tensor of the predicted labels/logits/probabilities, with shape of (n_sample, ).
``torch.where(input < threshold, 0, 1)`` will be used to convert input into predicted labels.
target (Tensor): Tensor of ground truth labels with shape of (n_sample,).
"""
input = input.to(self.device)
target = target.to(self.device)
num_tp, num_fp, num_label = _binary_precision_update(
input, target, self.threshold
)
self.num_tp += num_tp
self.num_fp += num_fp
self.num_label += num_label
return self
| pytorch/torcheval | torcheval/metrics/classification/precision.py | precision.py | py | 8,095 | python | en | code | 155 | github-code | 13 |
29030579564 | # -*- coding: utf-8 -*-
from dateutil import parser
import datetime
import matplotlib.colors as colors
import matplotlib.cm as cmx
import matplotlib.pylab as pl
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import matplotlib.font_manager as fm
import operator
from matplotlib import cm
from colorspacious import cspace_converter
from collections import OrderedDict
import matplotlib.ticker as tck
def css_astronauts():
cmaps = OrderedDict()
class Astronaut:
def __init__(self, name, uid, gender, DOB):
self.name = name
self.uid = uid
self.gender = gender
self.DOB = parser.parse(DOB)
self.missions = []
self.mission_time = []
self.total_time = 0
self.num_of_missions = len(self.missions)
self.age = (datetime.datetime.utcnow()+datetime.timedelta(hours=8)-self.DOB).days/365.2425
self.EVAs = []
self.EVA_time =[]
self.total_eva_time = 0
def getDetails(self):
print("Name: ", self.name)
print("Gender: ",self.gender)
print("Date of Birth: ",self.DOB)
print("Age: ",round(self.age,1)," Years Old")
print("Number of Missions: ",self.num_of_missions)
for mission in self.missions:
print(mission.name)
def addMission(self,mission):
self.missions.append(mission)
self.num_of_missions=len(self.missions)
self.mission_time.append(mission.duration)
self.total_time = sum(self.mission_time)
def addEVA(self,EVA):
self.EVAs.append(EVA)
self.num_of_evas = len(self.EVAs)
self.EVA_time.append(EVA.duration)
self.total_eva_time = sum(self.EVA_time)
class Mission:
def __init__(self,name,mid,start,end,crews):
self.name = name
self.mid = mid
self.start_date = parser.parse(start)
if end==0:
self.status = "Ongoing"
self.end_date = datetime.datetime.utcnow()+datetime.timedelta(hours=8)
else:
self.status = "Completed"
self.end_date = parser.parse(end)
self.crews = crews
self.duration = (self.end_date-self.start_date).total_seconds()/24/3600
for crew in crews:
crew.addMission(self)
self.number_of_crew = len(self.crews)
class EVA:
def __init__(self, eva_id,start_time, end_time, crews, mission):
self.eva_id = eva_id
self.crews = crews
self.start_time = parser.parse(start_time)
self.end_time = parser.parse(end_time)
self.duration = (self.end_time-self.start_time).total_seconds()/3600
for crew in crews:
crew.addEVA(self)
#%% Add Astronauts
# Name, uid, gender, date of birth
ylw = Astronaut('杨利伟','ylw','Male', '1965/06/21')
nhs = Astronaut('聂海胜','nhs','Male', '1964/10/16')
fjl = Astronaut('费俊龙','fjl','Male', '1965/5/5')
zzg = Astronaut('翟志刚','zzg','Male', '1966/10/10')
jhp = Astronaut('景海鹏','jhp','Male', '1966/10/24')
lbm = Astronaut('刘伯明','lbm','Male','1966/9/17')
lw = Astronaut('刘旺','lw','Male','1969/3/25')
ly = Astronaut('刘洋','ly','Female','1978/10/06')
zxg = Astronaut('张晓光','zxg','Male','1966/5/1')
wyp = Astronaut('王亚平','wyp','Female','1980/01/01')
cd = Astronaut('陈冬','cd','Male','1978/12/01')
thb = Astronaut('汤洪波','thb','Male','1975/10/1')
ygf = Astronaut('叶光富','ygf','Male', '1980/9/1')
astro = [ylw,nhs,fjl,zzg,lbm,jhp,lw,ly,zxg,wyp,cd,thb,ygf]
# Add Missions
# Name, MID, Start, End,[crew1, crew2, crew3]
sz5 = Mission("神舟五号",'sz5','2003/10/15 9:00:00','2003/10/16 6:22:00',[ylw])
sz6 = Mission("神舟六号",'sz6','2005/10/12 9:00:00','2005/10/17 4:33:00',[fjl,nhs])
sz7 = Mission("神舟七号",'sz7','2008/09/25 21:10:00','2008/09/28 17:37:00',[zzg,lbm,jhp])
sz9 = Mission("神舟九号",'sz9','2012/06/16 18:37:00','2012/06/29 10:03:00',[jhp,lw,ly])
sz10 = Mission("神舟十号",'sz10','2013/06/11 17:38:00','2013/06/26 8:07:00',[nhs,zxg,wyp])
sz11= Mission("神舟十一号",'sz11','2016/10/17 7:30:00','2016/11/18 13:59:00',[jhp,cd])
sz12= Mission("神舟十二号",'sz12','2021/06/17 9:22:00','2021/09/17 13:34:00',[nhs,lbm,thb])
sz13 = Mission("神舟十三号",'sz13','2021/10/16 00:23:56',0,[zzg,wyp,ygf])
missions = [sz5,sz6,sz7,sz9,sz10,sz11,sz12,sz13]
# Add EVAs
#EID,start, end,[crew1,crew2],mission
sz7eva1 = EVA("神舟七号第一次",'2008/9/27 16:35','2008/9/27 17:01',[zzg,lbm],sz7)
sz12eva1 = EVA("神舟十二号第一次",'2021/7/5 8:11','2021/7/5 14:57',[lbm,thb],sz12)
sz12eva2 = EVA("神舟十二号第二次",'2021/8/21 8:38','2021/8/21 14:33',[nhs,lbm],sz12)
sz13eva1 = EVA('神舟十三号第一次','2021/11/07/18:51','2021/11/08 01:16',[zzg,wyp],sz13)
EVAs = [sz7eva1,sz12eva1,sz12eva2,sz13eva1]
#%% Prepare Data for plot
sorted_astro = sorted(astro,key = operator.attrgetter("num_of_missions"))
fprop_title = fm.FontProperties(fname='font/ZhiMangXing-Regular.ttf')
fprop = fm.FontProperties(fname='font/NotoSansSC-Regular.otf')
y_names,x_vals = zip(*[(i.name,float(i.num_of_missions)) for i in sorted_astro])
y_pos = np.arange(len(sorted_astro))
#%% color palettes
color_eva= ['#1e295b','#193852','#3989b9','#79a49e','#3c7ba6','#161c37','#c6ca74','#1a425b']
#%% EVA Time
eva_total = [hty.total_eva_time for hty in astro]
c = np.zeros((len(EVAs),len(astro)))
fig1,ax = plt.subplots(figsize=(16,12),dpi=300)
astro_names = [hty.name for hty in astro]
legend_names =[eva.eva_id for eva in EVAs]
bottom = 0
for eva in EVAs:
idx_eva = EVAs.index(eva)
for crew in eva.crews :
idx_crew = astro.index(crew)
c[idx_eva,idx_crew] = eva.duration
ptt = plt.bar(astro_names,c[idx_eva],bottom=bottom,color = color_eva[idx_eva])
#axx.bar_label(ptt,label_type='center',fmt='%.2f')
bottom +=c[idx_eva]
I=plt.legend(legend_names,prop=fprop,loc='upper center',facecolor='black',ncol=len(astro),frameon=False)
plt.plot(astro_names,eva_total,'.r')
for text in I.get_texts():
text.set_color('white')
# add data labels
for rect in ax.patches:
height = rect.get_height()
width = rect.get_width()
x = rect.get_x()
y = rect.get_y()
label_text = f'{height:.2f}'
label_x = x+width/2
label_y = y+height/2
if height>0:
ax.text(label_x,label_y,label_text, color='white',ha = 'center', va = 'center',fontsize = 8)
#make labels
for i in range(len(astro)):
datastr = "{:.2f}".format(eva_total[i])
plt.annotate(datastr,xy=(astro_names[i],eva_total[i]),ha='center',va='bottom',color='white')
plt.xlabel("航天员", fontproperties=fprop,fontsize=20,color='white')
ymax = np.amax(eva_total)
ax.xaxis.set_ticks(np.arange(0,len(astro)))
ax.xaxis.set_ticklabels(astro_names,fontproperties=fprop,fontsize=16,color='white')
ax.set_yticks(np.arange(0,ymax+10,step=1))
ax.set_yticklabels(np.arange(0,ymax+10,step=1),fontproperties=fprop,fontsize=16,color='white')
#data labels
plt.ylabel("出舱时间(小时)", fontproperties=fprop,fontsize=20,color='white')
plt.ylim(0,np.amax(eva_total)+2)
plt.title("中国航天员出舱时间统计",fontproperties=fprop_title,fontsize=40,color='white')
now = datetime.datetime.utcnow()+datetime.timedelta(hours=8)
ax.yaxis.set_minor_locator(tck.AutoMinorLocator())
ax.text(.4, 0.95,"截至北京时间:"+ now.strftime("%Y/%m/%d %H:%M:%S.%f"), fontproperties=fprop,color="gray",transform=ax.transAxes,va='center')
ax.text(.45, 0.92,"绘制:@Vony7", fontproperties=fprop,color="gray", transform=ax.transAxes)
ax.set_facecolor("black")
plt.rcParams['savefig.facecolor']='black'
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('white')
ax.spines['right'].set_color('white')
ax.spines['left'].set_color('white')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', which='both',colors='white')
plt.savefig('astronauts-eva-time-stacked.png')
#%% plot different missions
#color_sets= ['#800000','#483d8b','#daa520','#79a49e','#3c7ba6','#194852','#161c37','#1a425b','#c6ca74']
color_sets=['#5899DA','#E8743B','#19A979','#ED4A7B','#945ECF','#13A4B4','#525DF4','#BF399E','#6C8893','#EE6868','#2F6497']
# plot bars in stack manner
c = np.zeros((len(missions),len(astro)))
figx,axx = plt.subplots(figsize=(16,12),dpi=300)
astro_names = [hty.name for hty in astro]
legend_names =[sz.name for sz in missions]
astro_total = [hty.total_time for hty in astro]
bottom = 0
for sz in missions:
idx_sz = missions.index(sz)
for crew in sz.crews:
idx_crew = astro.index(crew)
c[idx_sz,idx_crew] = sz.duration
#ptt = plt.bar(astro_names,c[idx_sz],bottom=bottom,color=color_sets[-idx_sz:])
ptt = plt.bar(astro_names,c[idx_sz],bottom=bottom,color=color_sets[idx_sz])
#axx.bar_label(ptt,label_type='center',fmt='%.2f')
bottom +=c[idx_sz]
I=plt.legend(legend_names,prop=fprop,loc='upper center',facecolor='black',ncol=len(astro),frameon=False)
plt.plot(astro_names,astro_total,'.r')
for text in I.get_texts():
text.set_color('white')
# add data labels
for rect in axx.patches:
height = rect.get_height()
width = rect.get_width()
x = rect.get_x()
y = rect.get_y()
label_text = f'{height:.2f}'
label_x = x+width/2
label_y = y+height/2
if height>0:
axx.text(label_x,label_y,label_text, color='white',ha = 'center', va = 'center',fontsize = 8)
#make labels
for i in range(len(astro)):
datastr = "{:.2f}".format(astro_total[i])
plt.annotate(datastr,xy=(astro_names[i],astro_total[i]),ha='center',va='bottom',color='white')
plt.xlabel("航天员", fontproperties=fprop,fontsize=20,color='white')
ymax = np.amax(astro_total)
axx.xaxis.set_ticks(np.arange(0,len(astro)))
axx.xaxis.set_ticklabels(astro_names,fontproperties=fprop,fontsize=16,color='white')
axx.set_yticks(np.arange(0,ymax+10,step=10))
axx.set_yticklabels(np.arange(0,ymax+10,step=10),fontproperties=fprop,fontsize=16,color='white')
#data labels
plt.ylabel("在轨时间(天)", fontproperties=fprop,fontsize=20,color='white')
plt.ylim(0,np.amax(astro_total)+10)
plt.title("中国航天员在轨时间统计",fontproperties=fprop_title,fontsize=40,color='white')
now = datetime.datetime.utcnow()+datetime.timedelta(hours=8)
axx.yaxis.set_minor_locator(tck.AutoMinorLocator())
axx.text(.4, 0.95,"截至北京时间:"+ now.strftime("%Y/%m/%d %H:%M:%S.%f"), fontproperties=fprop,color="gray",transform=axx.transAxes,va='center')
axx.text(.4, 0.92,"绘制:@Vony7", fontproperties=fprop,color="gray", transform=axx.transAxes)
axx.set_facecolor("black")
plt.rcParams['savefig.facecolor']='black'
axx.spines['bottom'].set_color('white')
axx.spines['top'].set_color('white')
axx.spines['right'].set_color('white')
axx.spines['left'].set_color('white')
axx.tick_params(axis='x', colors='white')
axx.tick_params(axis='y', which='both',colors='white')
plt.savefig('astronauts-mission-time-stacked.png')
if __name__=='__main__':
css_astronauts()
| Bourshevik0/Astronaut-Mission-Time | Astronaut_Times.py | Astronaut_Times.py | py | 12,074 | python | en | code | 0 | github-code | 13 |
6699028187 | from distutils.file_util import write_file
from multiprocessing import managers
import re
import csv
import nltk
from nltk.corpus import stopwords
import time
from ast import literal_eval
from datascience import *
import math
class text_filter():
def __init__(self,language):
self.language=language
self.manager=csv_manager()
self.utilities=utilities()
self.wnl = nltk.WordNetLemmatizer()
self.porter = nltk.PorterStemmer()
self.lancaster = nltk.LancasterStemmer()
pass
def cleaning_data(self,path):
reviews_raw,summarys_raw=self.manager.read_file(path,8,6)
summarys=self.filter(summarys_raw,"lem")
reviews=self.filter(reviews_raw,"lem")
self.manager.write_file("lem.csv",[summarys,reviews])
summarys=self.filter(summarys_raw,"stem")
reviews=self.filter(reviews_raw,"stem")
self.manager.write_file("stem.csv",[summarys,reviews])
summarys=self.filter(summarys_raw,"stem_lancaster")
reviews=self.filter(reviews_raw,"stem_lancaster")
self.manager.write_file("lancaster.csv",[summarys,reviews])
summarys=self.filter(summarys_raw,"stem_porter")
reviews=self.filter(reviews_raw,"stem_porter")
self.manager.write_file("porter.csv",[summarys,reviews])
def remove_stopwords(self,sentence):
#self.utilities.sayhi(self.remove_stopwords.__name__)
return [token for token in sentence if token.lower() not in stopwords.words(self.language)]
def remove_punctuation(self,sentence):
punc = r'''!()-[]{};:'"\,<>./?@#$%^&*_~'''
return [token for token in sentence if token.lower() not in punc]
def stem_word(self,word):
regexp = r'^(.*?)(ing|ly|ed|ious|ies|ive|es|s|ment)?$'
stem, suffix = re.findall(regexp, word)[0]
return stem
def stem_sentence(self,sentence):
#self.utilities.sayhi(self.stem_sentence.__name__)
return [self.stem_word(token) for token in sentence]
def lemmatization(self,sentence):
return [self.wnl.lemmatize(token) for token in sentence]
def stem_porter(self,sentence):
return [self.porter.stem(token) for token in sentence]
def stem_lancaster(self,sentence):
return [self.lancaster.stem(token) for token in sentence]
def filter(self, array,method):
self.utilities.sayhi(self.filter.__name__ +" "+method)
if method=="lem":
return [self.lemmatization(self.remove_punctuation(self.remove_stopwords(nltk.word_tokenize(sentence)))) for sentence in array]
elif method=="stem":
return [self.stem_sentence(self.remove_punctuation(self.remove_stopwords(nltk.word_tokenize(sentence)))) for sentence in array]
elif method=="stem_lancaster":
return [self.stem_porter(self.remove_punctuation(self.remove_stopwords(nltk.word_tokenize(sentence)))) for sentence in array]
elif method=="stem_porter":
return [self.stem_lancaster(self.remove_punctuation(self.remove_stopwords(nltk.word_tokenize(sentence)))) for sentence in array]
class text_analysis():
def __init__(self):
self.manager=csv_manager()
self.tdm_summarys={}
self.tdm_reviews={}
self.dtm_summarys={}
self.dtm_reviews={}
def generate_tdm(self,path):
reviews,summarys=self.manager.read_file(path,0,1)
reviewsDict={}
summarysDict={}
dtm_reviews={}
dtm_summarys={}
for doc in range(len(reviews)):
'''for word in literal_eval(reviews[doc]):
if word.lower() not in reviewsDict.keys():
reviewsDict[word.lower()]={"total":1,doc:1}
elif doc not in reviewsDict[word.lower()].keys():
reviewsDict[word.lower()][doc]=1
reviewsDict[word.lower()]["total"]+=1
else:
reviewsDict[word.lower()][doc]+=1
reviewsDict[word.lower()]["total"]+=1'''
for word in literal_eval(reviews[doc]):
#TDM
if word.lower() not in reviewsDict.keys():
reviewsDict[word.lower()]={"total":1,doc:1}
elif doc not in reviewsDict[word.lower()].keys():
reviewsDict[word.lower()][doc]=1
reviewsDict[word.lower()]["total"]+=1
else:
reviewsDict[word.lower()][doc]+=1
reviewsDict[word.lower()]["total"]+=1
#DTM
if doc not in dtm_reviews.keys():
dtm_reviews[doc]={"terms":{word.lower():{"times":1}}}
elif word.lower() not in dtm_reviews[doc].keys():
dtm_reviews[doc]["terms"][word.lower()]={"times":1}
else:
dtm_reviews[doc]["terms"][word.lower()]["times"]+=1
for word in literal_eval(summarys[doc]):
#TDM
if word.lower() not in summarysDict.keys():
summarysDict[word.lower()]={"total":1,doc:1}
elif doc not in summarysDict[word.lower()].keys():
summarysDict[word.lower()][doc]=1
summarysDict[word.lower()]["total"]+=1
else:
summarysDict[word.lower()][doc]+=1
summarysDict[word.lower()]["total"]+=1
#DTM
if doc not in dtm_summarys.keys():
dtm_summarys[doc]={"terms":{word.lower():{"times":1}}}
elif word.lower() not in dtm_summarys[doc].keys():
dtm_summarys[doc]["terms"][word.lower()]={"times":1}
else:
dtm_summarys[doc]["terms"][word.lower()]["times"]+=1
'''docs=[]
for index in range(len(summarys)):
print(index)
doc={'doc':index}
for word in summarysDict:
#print(summarysDict[word].keys())
if index in summarysDict[word].keys():
doc[word]=summarysDict[word][index]
else:
doc[word]=0
docs.append(doc)'''
#t = Table().from_records(docs)
'''t = Table().from_records([
{'column1':'data1','column2':1},
{'column1':'data2','column2':2},
{'column1':'data3','column2':3}
])'''
#summarysTable=Table().from_records([summarysDict])
'''for word in summarysDict:
summarysTable.append_column(word)'''
#print(t.rows)
self.tdm_summarys=summarysDict
self.tdm_reviews=reviewsDict
self.dtm_summarys=dtm_summarys
self.dtm_reviews=dtm_reviews
print(self.tdm_summarys)
print("Total terminos en summarys: "+str(len(self.tdm_summarys.keys())))
print("Total terminos en reviews: "+str(len(self.tdm_reviews.keys())))
def obtain_tf_idf(self):
total_docs=len(self.dtm_summarys.keys())
for doc in self.dtm_summarys:
total_terms_in_doc=len(self.dtm_summarys[doc]["terms"].keys())
for term in self.dtm_summarys[doc]["terms"]:
tf=self.dtm_summarys[doc]["terms"][term]["times"]/total_terms_in_doc
idf=math.log(total_docs/self.tdm_summarys[term]["total"])
tf_idf=tf*idf
self.dtm_summarys[doc]["terms"][term]["tf-idf"]=tf_idf
self.dtm_summarys[doc]["terms"][term]["tf"]=tf
self.dtm_summarys[doc]["terms"][term]["idf"]=idf
self.manager.write_file_tf_idf("dtm_summarys.csv",self.dtm_summarys)
for doc in self.dtm_reviews:
total_terms_in_doc=len(self.dtm_reviews[doc]["terms"].keys())
for term in self.dtm_reviews[doc]["terms"]:
tf=self.dtm_reviews[doc]["terms"][term]["times"]/total_terms_in_doc
idf=math.log(total_docs/self.tdm_reviews[term]["total"])
tf_idf=tf*idf
self.dtm_reviews[doc]["terms"][term]["tf-idf"]=tf_idf
self.dtm_reviews[doc]["terms"][term]["tf"]=tf
self.dtm_reviews[doc]["terms"][term]["idf"]=idf
self.manager.write_file_tf_idf("dtm_reviews.csv",self.dtm_reviews)
class csv_manager():
def __init__(self):
self.utilities=utilities()
def read_file(self,file,column_s,column_r):
self.utilities.sayhi(self.read_file.__name__)
reviews=[]
summarys=[]
with open(file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count > 0:
review=row[column_r]
summary=row[column_s]
reviews.append(review)
summarys.append(summary)
else:
line_count= 1
return reviews,summarys
def write_file(self,file,data):
self.utilities.sayhi(self.write_file.__name__)
with open(file, mode='w',newline="") as write_file:
writer = csv.writer(write_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for entry in range(len(data[0])):
writer.writerow([data[0][entry],data[1][entry]])
def write_file_tf_idf(self,file,data):
with open(file, mode='w',newline="") as write_file:
writer = csv.writer(write_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Document","Term","tf","idf","tf-idf"])
for doc in data:
for term in data[doc]["terms"]:
writer.writerow([doc,term,data[doc]["terms"][term]["tf"],data[doc]["terms"][term]["idf"],data[doc]["terms"][term]["tf-idf"]])
def write_file_words(self,file,data_docs,data_words):
with open(file, mode='w',newline="") as write_file:
writer = csv.writer(write_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
my_columns=["Word"]+data_docs.keys()
writer.writerow(my_columns)
for term in data_words:
index_control=1
aux_arr=[]
for doc in data_words[term]:
if doc=="total":
continue
index_control+=doc
writer.writerow([term]+[])
class utilities():
def __init__(self):
self.time=0
pass
def sayhi(self,function):
print(self.get_timer()+" : [ "+function+":executing ]")
#print("Executing "+function+" function")
def get_timer(self):
return str(time.perf_counter())
def main():
tf=text_filter("english")
#tf.cleaning_data("reviews_Baby_5_final_dataset.csv")
analyzer=text_analysis()
analyzer.generate_tdm("resources/stem.csv")
analyzer.obtain_tf_idf()
main() | Javier2405/nl-processing | nl_processing.py | nl_processing.py | py | 11,234 | python | en | code | 0 | github-code | 13 |
14478731454 | vertlg = run.recipe
fig, ax = plt.subplots(figsize=(8 ,8), subplot_kw=dict(aspect="equal",anchor='SE'))
#
data = [float(x.split()[0]) for x in vertlg]
ingredients = [x.split()[-1] for x in vertlg]
data = run.ser
print(data)
ingredients = ["Furcht\n Angst",
"Vertrauen\n Akzeptanz",
" Freude\n Heiterkeit",
"Erwartung\n Interesse",
"Wut\n Ärger",
"Ekel\n Abscheu",
"Traurigkeit\n Kummer",
"Überraschung\n Erstaunung"]
print(" ")
print("Furcht,Vertrauen,Freude;Erwartung,Wut,Ekel,Traurigkeit,Überraschung")
#print(ingredients)
print(" ")
#
#
def func(pct, allvals):
absolute = int(pct/100.*np.sum(allvals))
# print(pct)
return "{:.1f}%\n({:d} Wrd)".format(pct, absolute )
#
wedges, texts, autotexts = ax.pie(data, autopct=lambda pct: func(pct, data),
textprops=dict(color="b"))
#
plt.setp(autotexts, size=10, weight="bold")
plt.pie(data,labels=ingredients, colors=['yellowgreen','greenyellow','yellow','orange','red','purple','navy','green'])
#
#
ax.set_title("Plutschik 8 Emotionen: ")
my_circle=plt.Circle( (0,0), 0.7, color='lightgrey')
#
p=plt.gcf()
p.gca().add_artist(my_circle)
#plt.show()
plotname = f"Donut_{tsname}.png"
plt.savefig(f"Donut_{tsname}.png", bbox_inches='tight') | pdittric/g_repo | donut_plot.py | donut_plot.py | py | 1,361 | python | en | code | 0 | github-code | 13 |
72063070419 | '''
weight_in_lb =input('How much do you weigh in pounds? ')
weight_in_kg = 0.453592 * float(weight_in_lb)
weight_in_kg = str(weight_in_kg)
print('You weigh ' + weight_in_kg + 'Kg ')
'''
weight = input('What is your weight?')
unit = input('(L)bs or (K)g ? ')
if unit.lower() == 'l':
weight = 0.45 * int(weight)
print(f'You weigh {weight}Kg')
elif unit.lower() == 'k':
weight = int(weight) / 0.45
print(f'You weigh {weight}lbs') | 1realjoeford/learning-python | Exercises/weightconverter.py | weightconverter.py | py | 445 | python | en | code | 1 | github-code | 13 |
16429457481 | import subprocess,logging
from multiprocessing import Process, Queue
import sys,os,tempfile
sys.path.insert(0,os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from geotaste.imports import *
from dash.testing.application_runners import import_app
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import requests,bs4,flask
NAPTIME = int(os.environ.get('NAPTIME', 5))
def _nap(naptime=NAPTIME): time.sleep(naptime)
def test_showhide_components(dash_duo):
# app = import_app('geotaste.app')
app = get_app()
dash_duo.start_server(app.app)
# modal open
assert dash_duo.find_element('#welcome-modal .modal-title').text == WELCOME_HEADER
assert dash_duo.find_element('#donotcite').text == DONOTCITE
# close modal
dash_duo.multiple_click('#welcome-modal .btn-close', 1)
_nap()
# modal ought to be closed
try:
dash_duo.find_element('#welcome-modal .modal-title')
assert False, 'should be gone'
except Exception:
assert True, 'correctly not found'
# open modal another way
dash_duo.multiple_click('#welcome_modal_info_btn', 1)
_nap(5 if NAPTIME < 5 else NAPTIME)
# modal ought to be open
assert dash_duo.find_element('#welcome-modal .modal-title').text == WELCOME_HEADER
# close modal
dash_duo.multiple_click('#welcome-modal .btn-close', 1)
_nap()
# modal ought to be closed
try:
dash_duo.find_element('#welcome-modal .modal-title')
assert False, 'should be gone'
except Exception:
assert True, 'correctly not found'
# open modal ANOTHER way
dash_duo.multiple_click('#logo_popup', 1)
_nap(5 if NAPTIME < 5 else NAPTIME)
# modal ought to be open
assert dash_duo.find_element('#welcome-modal .modal-title').text == WELCOME_HEADER
# close modal
dash_duo.multiple_click('#welcome-modal .btn-close', 1)
_nap()
# modal ought to be closed
try:
dash_duo.find_element('#welcome-modal .modal-title')
assert False, 'should be gone'
except Exception:
assert True, 'correctly not found'
assert dash_duo.find_element('#logo_popup').text == SITE_TITLE
def test_button_showhide_l(button_showhide_ids, close=False):
for btn_id in button_showhide_ids:
body_id='#body-'+btn_id.split('-',1)[-1]
logger.debug(body_id)
assert dash_duo.wait_for_style_to_equal(body_id,'display','none')
logger.debug(btn_id)
dash_duo.multiple_click(btn_id, 1)
assert dash_duo.wait_for_style_to_equal(body_id,'display','block')
if close:
dash_duo.multiple_click(btn_id, 1)
assert dash_duo.wait_for_style_to_equal(body_id,'display','none')
button_showhide_ids = [
'#'+x.get_attribute('id')
for x in dash_duo.find_elements('.button_showhide')
]
member_cards1 = [x for x in button_showhide_ids if '-MP-' in x and 'Card' in x and 'Filter_1' in x]
member_cards2 = [x for x in button_showhide_ids if '-MP-' in x and 'Card' in x and 'Filter_2' in x]
book_cards1 = [x for x in button_showhide_ids if '-BP-' in x and 'Card' in x and 'Filter_1' in x]
book_cards2 = [x for x in button_showhide_ids if '-BP-' in x and 'Card' in x and 'Filter_2' in x]
top_panel_ids=[
'#button_showhide-Filter_1',
'#button_showhide-Filter_2',
]
mpbp_ids=[
'#button_showhide-MP-Filter_1',
'#button_showhide-BP-Filter_1',
'#button_showhide-MP-Filter_2',
'#button_showhide-BP-Filter_2'
]
test_button_showhide_l(top_panel_ids, close=False)
test_button_showhide_l(mpbp_ids, close=True)
## member cards
dash_duo.multiple_click('#button_showhide-MP-Filter_1', 1)
test_button_showhide_l(member_cards1, close=True)
dash_duo.multiple_click('#button_showhide-MP-Filter_1', 1)
dash_duo.multiple_click('#button_showhide-MP-Filter_2', 1)
test_button_showhide_l(member_cards2, close=True)
dash_duo.multiple_click('#button_showhide-MP-Filter_2', 1)
## book cards
dash_duo.multiple_click('#button_showhide-BP-Filter_1', 1)
test_button_showhide_l(book_cards1, close=True)
dash_duo.multiple_click('#button_showhide-BP-Filter_1', 1)
dash_duo.multiple_click('#button_showhide-BP-Filter_2', 1)
test_button_showhide_l(book_cards2, close=True)
dash_duo.multiple_click('#button_showhide-BP-Filter_2', 1)
def test_suites(dash_duo, done=True):
app = get_app()
dash_duo.start_server(app.app)
dash_duo.multiple_click('#welcome-modal .btn-close', 1)
_nap()
dash_duo.multiple_click('#tab_table', 1)
_nap()
dash_duo.wait_for_contains_text('#tblview','landmarks')
# open panels
for idx in [
'#button_showhide-Filter_1',
'#button_showhide-Filter_2',
'#button_showhide-MP-Filter_1',
'#button_showhide-MP-Filter_2',
]:
dash_duo.multiple_click(idx, 1)
_nap()
# show test suite buttons
dash_duo.multiple_click('#test_suite_btn', 1)
_nap()
if done:
# this btn puts {'member_nationalities':['France]} in L.member_panel.nation_card.store
dash_duo.multiple_click('#test_suite_btn1', 1)
_nap()
dash_duo.wait_for_contains_text('#store_desc-Filter_1', 'France')
dash_duo.wait_for_contains_text('#tblview','members')
# this btn puts {'member_nationalities':['United States]} in R.member_panel.nation_card.store
dash_duo.multiple_click('#test_suite_btn2', 1)
_nap()
dash_duo.wait_for_contains_text('#store_desc-Filter_2', 'United States')
dash_duo.wait_for_contains_text('#tblview','comparing')
# clear right by clicking filter clear
dash_duo.multiple_click('#button_clear-MemberNationalityCard-MP-Filter_2', 1)
_nap()
dash_duo.wait_for_text_to_equal('#store_desc-MemberNationalityCard-MP-Filter_2', BLANK)
dash_duo.wait_for_text_to_equal('#store_desc-Filter_2', BLANK)
# clear left by clicking top clear
dash_duo.multiple_click('#button_clear-Filter_1', 1)
_nap()
dash_duo.wait_for_text_to_equal('#store_desc-Filter_1', BLANK)
dash_duo.wait_for_text_to_equal('#store_desc-MemberNationalityCard-MP-Filter_1', BLANK)
# panel filter
dash_duo.multiple_click('#button_showhide-MemberNationalityCard-MP-Filter_1', 1)
_nap()
dash_duo.wait_for_contains_text('#graph-MemberNationalityCard-MP-Filter_1', '4031')
dash_duo.multiple_click('#test_suite_btn5', 1)
_nap()
try:
dash_duo.wait_for_contains_text('#graph-MemberNationalityCard-MP-Filter_1', '4031')
assert False, 'should not contain class'
except TimeoutException:
assert True
# clear left by clicking top clear
dash_duo.multiple_click('#button_clear-Filter_1', 1)
_nap()
# panel filter
dash_duo.multiple_click('#button_showhide-MemberNationalityCard-MP-Filter_1', 1)
dash_duo.multiple_click('#button_showhide-MembershipYearCard-MP-Filter_1', 1)
_nap()
dash_duo.wait_for_contains_text('#input_start-MembershipYearCard-MP-Filter_1', '1919')
dash_duo.multiple_click('#test_suite_btn6', 1)
_nap()
dash_duo.wait_for_contains_text('#input_start-MembershipYearCard-MP-Filter_1', '1932')
dash_duo.wait_for_contains_text('#input_end-MembershipYearCard-MP-Filter_1', '1939')
# dash_duo.multiple_click('#button_showhide-MembershipYearCard-MP-Filter_1', 1)
input1 = dash_duo.find_element('#input_start-MembershipYearCard-MP-Filter_1')
input2 = dash_duo.find_element('#input_end-MembershipYearCard-MP-Filter_1')
input1.send_keys(Keys.BACKSPACE,Keys.BACKSPACE,Keys.BACKSPACE,Keys.BACKSPACE)
input1.send_keys('1920')
input2.send_keys(Keys.BACKSPACE,Keys.BACKSPACE,Keys.BACKSPACE,Keys.BACKSPACE)
input2.send_keys('1930')
_nap()
dash_duo.multiple_click('#input_btn-MembershipYearCard-MP-Filter_1', 1)
_nap()
dash_duo.wait_for_contains_text('#input_start-MembershipYearCard-MP-Filter_1', '1920')
dash_duo.wait_for_contains_text('#input_end-MembershipYearCard-MP-Filter_1', '1930')
_nap()
dash_duo.multiple_click('#button_clear-MembershipYearCard-MP-Filter_1', 1)
_nap()
dash_duo.wait_for_text_to_equal('#store_desc-MembershipYearCard-MP-Filter_1', BLANK)
dash_duo.wait_for_text_to_equal('#store_desc-Filter_1', BLANK)
dash_duo.multiple_click('#button_showhide-MemberNameCard-MP-Filter_1', 1)
_nap()
input = dash_duo.find_element('#input-MemberNameCard-MP-Filter_1 input')
_nap()
bad='Ryan Heuser'
input.send_keys(bad)
_nap()
# No options to be found with `x` in them, should show the empty message.
dash_duo.wait_for_text_to_equal(".Select-noresults", "No results found")
input.send_keys(*[Keys.BACKSPACE for x in bad])
input.send_keys('James Joyce')
input.send_keys(Keys.ENTER)
dash_duo.wait_for_text_to_equal("#store_desc-MemberNameCard-MP-Filter_1", "James Joyce")
dash_duo.multiple_click('#button_clear-MemberNameCard-MP-Filter_1', 1)
_nap()
dash_duo.wait_for_text_to_equal('#store_desc-MemberNameCard-MP-Filter_1', BLANK)
dash_duo.wait_for_text_to_equal('#store_desc-Filter_1', BLANK)
def test_query_strings(dash_duo):
app = get_app()
dash_duo.start_server(app.app)
options = webdriver.ChromeOptions()
options.add_argument('--headless')
driver = webdriver.Chrome(options=options)#, executable_path="/usr/local/bin/chromedriver")
connected = False
for host in TEST_HOSTS:
try:
driver.get(f'{host}')
except Exception as e:
logger.error(e)
continue
connected = True
logger.debug('Testing no filters')
_nap()
assert driver.find_element_by_id('store_desc-Filter_1').text == BLANK
assert driver.find_element_by_id('store_desc-Filter_2').text == BLANK
logger.debug('Testing one filter')
driver.get(f'{host}?member_gender=Female')
_nap()
el = driver.find_element_by_id('store_desc-Filter_1')
assert 'Female' in el.text
driver.get(f'{host}?member_gender2=Male')
_nap()
el = driver.find_element_by_id('store_desc-Filter_2')
assert 'Male' in el.text
logger.debug('Testing two filters')
driver.get(f'{host}?member_gender=Female&member_gender2=Male')
_nap()
el = driver.find_element_by_id('store_desc-Filter_1')
assert 'Female' in el.text
el = driver.find_element_by_id('store_desc-Filter_2')
assert 'Male' in el.text
logger.debug('Testing tab')
driver.get(f'{host}?tab=table')
_nap()
el = driver.find_element_by_id('tblview')
assert el.is_displayed()
driver.get(f'{host}?tab=map')
_nap()
el = driver.find_element_by_id('tblview')
assert not el.is_displayed()
logger.debug('Testing tab2')
driver.get(f'{host}?tab=table&tab2=book&member_gender=Female&member_gender2=Male')
_nap()
el = driver.find_element_by_id('maintbl-container')
assert 'Fiction' in el.text
assert 'Poetry' in el.text
# logger.debug('Testing lat/long/zoom query params')
# driver.get(f'{host}?lat=48.85697&lon=2.32748&zoom=16.23372')
# _nap()
# el = driver.find_element_by_id('mainmap')
# assert el.is_displayed()
driver.get(f'{host}?tab=map')
_nap()
assert 'lat=' not in driver.current_url
driver.find_element_by_id('test_suite_btn').click()
_nap()
# driver.find_element_by_id('test_suite_btn4').click()
# _nap()
# assert 'lat=' in driver.current_url
# assert 'lon=' in driver.current_url
# assert 'zoom=' in driver.current_url
# close
driver.close()
assert connected, "Never connected"
def test_get_server():
server=get_server()
assert isinstance(server, flask.app.Flask)
# needs testing in separate thread when not a dry run?
runner=run(port=1991, dry_run=True)
assert isinstance(runner, flask.app.Flask)
app=get_app()
assert isinstance(app, DashApp)
assert isinstance(app.app, Dash)
assert isinstance(app.app.server, flask.app.Flask)
def _apprun(q):
q.put(run(port=1992))
def test_run():
try:
queue = Queue()
try:
from pytest_cov.embed import cleanup_on_sigterm
except ImportError:
pass
else:
cleanup_on_sigterm()
p = Process(target=_apprun, args=(queue,))
try:
p.start()
time.sleep(10)
finally:
p.terminate()
time.sleep(5)
p.join()
assert True, 'server running succeeded'
except Exception:
assert False, 'server running failed' | Princeton-CDH/geotaste | tests/test_app.py | test_app.py | py | 13,301 | python | en | code | 0 | github-code | 13 |
39148463108 | #!/bin/python3
#https://www.hackerrank.com/challenges/beautiful-binary-string/problem
import sys
def minSteps(n, B):
#string = ""
count = 0
i = 0
while i <= n - 3:
if B[i] == 0 and B[i + 1] == 1 and B[i + 2] == 0:
B[i + 2] = 1
count += 1
#string += str(B[i]) + str(B[i + 1]) + str(B[i + 2])
i += 3
else:
#string += str(B[i])
i += 1
continue
return count
n = int(input().strip())
B = input().strip()
l = [int(i) for i in B]
result = minSteps(n, l)
print(result) | saumya-singh/CodeLab | HackerRank/Strings/Beautiful_Binary_String.py | Beautiful_Binary_String.py | py | 589 | python | en | code | 0 | github-code | 13 |
38765272715 | import requests
import json
token = "MEDQ5xp/hAlwDei/yjIB2AlB38LRfEoVw9l40ge7tVO812AJ1oBn1wF7sAX9/uqN04K0hIbclbI//FIrFQrg6uWZk75yFI6LGO3sQ7EgOJAuBWuFFQfvKf8ZxBoRif3BvNPx3au68NAhH/UdP0jMqCOZ3Dnkp0DpaNpYUwS1nM8vNeC6l96tt8f0e0GW/3UtSaBg4PzK5SU8FTlXLCyL+YpObBmdrirCb5VsWy1nAbLkFESaVXEmwKOSB59kd"
base_uri = "https://www.momentoftop.com/dnd/v0/"
class Character:
def __init__(self, name):
self.name = name
self.all_characters = requests.get(base_uri + "characters",
headers={'Authorization':token}).json()
def create_character(self, char_class, alignment):
self.char_class = char_class
self.alignment = algnment
payload = {
'name':name,
'charClass':char_class,
'alignment':alignment,
}
return requests.put(base_uri + "characters",
data = json.dumps(payload),
headers={'Authorization':token})
def get_character(self):
attributes = requests.get(base_uri + "characters/" + self.name,
headers={'Authorization':token}).json()
return attributes
def attack(self, enemy):
payload = {"defender": enemy}
return requests.post(base_uri + "characters/" + self.name.replace(" ","%20") + "/attack",
data = json.dumps(payload),
headers={'Authorization':token})
| MarcScott/dungeons_and_dragons_client | dandipy/dandi.py | dandi.py | py | 1,496 | python | en | code | 0 | github-code | 13 |
2865229430 | def select_sort(alist):
"""
Алгоритм сортування вибором
"""
list_sort = []
while len(alist):
for x in alist:
if x == min(alist):
list_sort.append(x)
alist.remove(x)
return list_sort
l = [5, 6, 21, 1, 2, 1, 15, 3, 7, 16]
print('select ', select_sort(l))
def bubble_sort(alist):
"""
Алгоритм сортування бульбашкою
pass_number --> кількість проходів + 1 =
= кількість елементів - 2
"""
for pass_number in range(len(alist) - 1, 0, -1):
for i in range(pass_number):
if alist[i] > alist[i + 1]:
alist[i], alist[i + 1] = alist[i + 1], alist[i]
return alist
l = [5, 6, 1, 21, 1, 2, 15, 3, 7, 16]
print('bubble ', bubble_sort(l))
def insertion_sort(alist):
"""
Алгоритм сортування вставкою
"""
for i in range(1, len(alist)):
j = i - 1
value = alist.pop(i)
while (j >= 0) and (alist[j] > value):
j -= 1
alist.insert(j + 1, value)
return alist
l = [5, 6, 1, 21, 1, 2, 15, 3, 7, 16]
print('insertion1 ', insertion_sort(l))
# Insertion sorting algorithm second realization
def insertion_sort2(alist):
for i in range(1, len(alist)):
while i > 0 and alist[i] < alist[i - 1]:
alist[i], alist[i - 1] = alist[i - 1], alist[i]
i -= 1
return alist
l = [5, 6, 1, 21, 1, 2, 15, 3, 7, 16]
print('insertion2 ', insertion_sort2(l))
def qsort(alist):
if alist:
return qsort([x for x in alist if x < alist[0]]) + [x for x in alist if x == alist[0]] + qsort([x for x in alist if x > alist[0]])
return []
l = [5, 6, 1, 21, 1, 2, 15, 3, 7, 16]
print('quicksort ', qsort(l))
| slavkoBV/solved-tasks-SoftGroup-course | list_sort.py | list_sort.py | py | 1,916 | python | en | code | 1 | github-code | 13 |
23460490813 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from 爬虫.scrapy_lianjia.scrapy_lianjia import settings
import pymysql
from 爬虫.scrapy_lianjia.scrapy_lianjia.items import ScrapylianjiaItem
class ScrapyLianjiaPipeline(object):
def process_item(self, item, spider):
return item
class scrapyLianjiaPipeline(object):
def __init__(self):
self.conn = pymysql.connect(host=settings.MYSQL_HOSTS,user=settings.MYSQL_USER,passwd=settings.MYSQL_PASSWORD,port=
settings.MYSQL_PORT, db=settings.MYSQL_DB)
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
self.sql = 'insert into lianjia_spider(name,price,url,search_name) values (%(name)s,%(price)s,%(url)s,%(search_name)s);'
self.value = {
'name':item['name'],
'price':item['price'],
'url':item['url'],
'search_name':item['search_name']
}
try:
self.cursor.execute(self.sql, self.value)
self.conn.commit()
except:
self.conn.rollback()
print('数据插入失败')
return item | liuaichao/python-work | 爬虫/scrapy_lianjia/scrapy_lianjia/pipelines.py | pipelines.py | py | 1,294 | python | en | code | 6 | github-code | 13 |
70059112339 | #!/usr/bin/env python
""" Node label check for OpenShift V3 """
# Adding the ignore because it does not like the naming of the script
# to be different than the class name
# pylint: disable=invalid-name
# pylint: disable=wrong-import-position
# pylint: disable=broad-except
# pylint: disable=line-too-long
import argparse
import time
import logging
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.WARN)
# Our jenkins server does not include these rpms.
# In the future we might move this to a container where these
# libs might exist
#pylint: disable=import-error
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
ocutil = OCUtil()
def runOCcmd(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
oc_time = time.time()
oc_result = ocutil.run_user_cmd(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - oc_time))
return oc_result
def runOCcmd_yaml(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
ocy_time = time.time()
ocy_result = ocutil.run_user_cmd_yaml(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - ocy_time))
return ocy_result
def get_type(hostname):
"""get the host type , is the host is a master or node or a infra"""
host_type = 'compute'
if 'master' in hostname:
host_type = 'master'
elif 'infra' in hostname:
host_type = 'infra'
else:
host_type = 'compute'
return host_type
def check_label_on_host(host_labels):
"""according to the host type to check if the host missed any labels"""
result = True
#its turn out that some node don't even have hostname or type label
if host_labels['hostname'] and host_labels['type']:
pass
else:
return False
hostname = host_labels['hostname']
host_type = host_labels['type']
# if the node miss the hostname and type label
# the next step is make sure all the node have all the label that in the directory
need_labels = {}
ban_labels = {}
if host_type == 'master':
need_labels = {
#'beta.kubernetes.io/arch':'amd64',
#'beta.kubernetes.io/instance-type': 'm4.xlarge',
#'beta.kubernetes.io/os': 'linux',
#'failure-domain.beta.kubernetes.io/region': 'us-east-1',
#'failure-domain.beta.kubernetes.io/zone': 'us-east-1a',
'hostname': None, # required key, value not important
'kubernetes.io/hostname': None, # required key, value not important
#'network.openshift.io/not-enforcing-egress-network-policy':'true',
'node-role.kubernetes.io/master': "true",
'region': None, # required key, value not important
'type': 'master',
}
ban_labels = {
#'node-role.kubernetes.io/master': "true",
'node-role.kubernetes.io/infra': "true",
'node-role.kubernetes.io/compute': "true",
}
elif host_type == 'infra':
# seems the infra node and compute node have the same labels
need_labels = {
#'beta.kubernetes.io/arch': 'amd64',
#'beta.kubernetes.io/instance-type': 'r4.xlarge',
#'beta.kubernetes.io/os': 'linux',
#'failure-domain.beta.kubernetes.io/region': 'us-east-1',
#'failure-domain.beta.kubernetes.io/zone': 'us-east-1a',
'hostname': None, # required key, value not important
'kubernetes.io/hostname': None, # required key, value not important
#'logging-infra-fluentd': "true",
'node-role.kubernetes.io/infra': "true",
'region': None, # required key, value not important
'type': 'infra',
}
ban_labels = {
'node-role.kubernetes.io/master': "true",
#'node-role.kubernetes.io/infra': "true",
'node-role.kubernetes.io/compute': "true",
}
else:
# seems the infra node and compute node have the same labels
need_labels = {
#'beta.kubernetes.io/arch': 'amd64',
#'beta.kubernetes.io/instance-type': 'r4.xlarge',
#'beta.kubernetes.io/os': 'linux',
#'failure-domain.beta.kubernetes.io/region': 'us-east-1',
#'failure-domain.beta.kubernetes.io/zone': 'us-east-1a',
'hostname': None, # required key, value not important
'kubernetes.io/hostname': None, # required key, value not important
#'logging-infra-fluentd': "true",
'node-role.kubernetes.io/compute': "true",
'region': None, # required key, value not important
'type': 'compute',
}
ban_labels = {
'node-role.kubernetes.io/master': "true",
'node-role.kubernetes.io/infra': "true",
#'node-role.kubernetes.io/compute': "true",
}
for key, value in need_labels.iteritems():
# check if this node current has all the key we need
logger.debug("-----> checking for needed label: [" + key + "]")
if host_labels.has_key(key):
if value and (host_labels[key] != value):
# has key, requires value, but value not the same
logger.info('This node '+ hostname + ' needs label: [' + key + '] which does not match required:' + value)
logger.info('#Command to fix this on master : oc label node/'+host_labels["kubernetes.io/hostname"]+' '+key+':'+value)
result = False
else:
# as long as one key is missed ,we think this node is wrong
logger.info('This node '+ hostname + ' needs label: [' + key + ']')
logger.info('#Fix: ossh root@'+ hostname +' -c "grep '+key+' /etc/origin/node/node-config.yaml"')
logger.info('#And(master): oc label node/'+host_labels["kubernetes.io/hostname"]+' '+key+':THE_VALUE_YOU_GET_FROM_ABOVE')
result = False
for key, value in ban_labels.iteritems():
# check if this node current has all the key we need
logger.debug("-----> checking for banned label: [" + key + "]")
if host_labels.has_key(key):
# as long as one key is missed ,we think this node is wrong
logger.info('This node '+ hostname + ' has banned label: [' + key + ']')
logger.info('#Command to fix this on master : oc label node/'+host_labels["kubernetes.io/hostname"]+' '+key+'-')
result = False
else:
pass
return result
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Check all the nodes label Status')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='verbosity level, specify multiple')
parser.add_argument('--namespace', default="default", help='service namespace')
args = parser.parse_args()
if args.verbose > 0:
logger.setLevel(logging.INFO)
if args.verbose > 1:
logger.setLevel(logging.DEBUG)
return args
def check_label_status():
"""get all the info of all node """
result_ip = 0
result_status = 0
label_info = runOCcmd_yaml("get node ")
for item in label_info['items']:
labels = item['metadata']['labels']
#if the check result shows this node have all the label
if check_label_on_host(labels):
pass
else:
result_status = result_status + 1
#if everything fine , result_ip and result_status both should 0 , if not 0 , means something wrong
return result_ip + result_status
def main():
""" check all the node labels see if anything miss """
args = parse_args()
logger.debug("args: ")
logger.debug(args)
#result = test_saml_pod(args=args, )
label_status = check_label_status()
#send the value to zabbix
mts = MetricSender(verbose=args.verbose)
mts.add_metric({'openshift.nodes.label.status': label_status})
mts.send_metrics()
if __name__ == "__main__":
main()
| openshift/openshift-tools | scripts/monitoring/cron-send-node-labels-status.py | cron-send-node-labels-status.py | py | 8,303 | python | en | code | 161 | github-code | 13 |
21278400370 | import pandas as pd
import numpy as np
import os
import sys
import urllib
import argparse
from bs4 import BeautifulSoup
from tqdm import tqdm
import glob
import shutil
import numpy as np
import pandas as pd
from sklearn import *
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
def ParseArguments():
parser = argparse.ArgumentParser(description = 'Apply secondary cutoff filtering and one hot encode based on desired variable (must be a column name in uniprot annotation sheet).')
parser.add_argument('annotated_file', help = 'The name of the annotated file created by uniprot_annotation script.')
parser.add_argument('-pv', '-p-value_cutoff', help = 'Desired p-value cutoff for secondary filtering (default = <=0.05).', type = float, default = 0.05)
parser.add_argument('-fc', '-fold-change_cutoff', help = 'Desired fold-change cutoff value (default = >=1.5).', type = float, default = 1.5)
parser.add_argument('-et', '-encoding_target', help = 'Target for one-hot encoding analysis. Must be a column name in uniprot annotated file generated from uniprot_annotation script (default = GO Localization).', type = str, default = 'GO Localization')
parser.add_argument('-sc', '-score_cutoff', help = 'Desired cutoff for andromeda score. General values: >40 = high confidence, 10-40 = medium confidence, <10 = low confidence. Default = 10', type = float, default = 10)
parser.add_argument('-bh', '-bh_correction', help = 'Optionally, implement the Benjamini-Hochberg correction to filter proteins based on a FDR associated with P-Value testing. Supply a desired FDR cutoff as an argument. (i.e. 0.05) *NOTE* This means that of the data you filter 5 percent of it will be false-positives! Set a lower FDR value for more stringent filtering. Default = 0.0', type = float, default = 0.0)
parser.add_argument('-kmeans', '-kmeans_clustering', help = 'Argument for kmeans clustering. Default = False', type = str, default = False)
parser.add_argument('-top_f', '-top_features', help = 'Desired number of top ontological features to pass on to k-means clustering. Default = 100', type = int, default = 100)
parser.add_argument('-e_clusters', '-explicit_clusters', help = 'Explicitly set the desired number of clusters for k-means clustering. (default = False, uses optimized k)', type = int, default = False)
args = parser.parse_args()
return args
def FindIdealNumberOfClusters(Data, lower_lim, upper_lim, spacing):
clusters_to_try = np.arange(lower_lim, upper_lim + spacing, spacing)
silhouette_dict = {}
for cluster in clusters_to_try:
clusterer = KMeans(n_clusters=cluster, random_state=10)
clusterer.fit(Data)
cluster_labels = clusterer.predict(Data)
silhouette_avg = silhouette_score(Data, cluster_labels)
silhouette_dict.update({cluster:silhouette_avg})
return silhouette_dict
def PlotBarCharts():
return None
def CleanupAndGenerateNonRedundantList(file_df, et_arg):
master_GO_list = []
for index, row in file_df.iterrows():
encoding_target = str(row[et_arg])
encoding_target = encoding_target.split(', ')
clean_encoding_target_list = []
#remove evidence codes in the list
for target in encoding_target:
target = target[:-4]
clean_encoding_target_list.append(target)
file_df.loc[index, et_arg + ' Clean'] = ', '.join(clean_encoding_target_list)
for thing in clean_encoding_target_list:
master_GO_list.append(thing)
master_GO_list = list(set(master_GO_list))
#one hot encoding
encoded_df = pd.get_dummies(master_GO_list)
encoded_df = encoded_df.reindex(file_df['Fasta headers'])
encoded_df = encoded_df.fillna(0.0)
for index, row in file_df.iterrows():
GO_list = row[et_arg + ' Clean'].split(', ')
protein_id = row['Fasta headers']
for element in GO_list:
encoded_df[element][protein_id] = 1
try:
encoded_df = encoded_df[encoded_df['nan'] != 1.0]
except:
pass
GO_analysis_df = pd.DataFrame(encoded_df.sum(), columns = [et_arg + ' Sum'])
total_GO_count = GO_analysis_df[et_arg + ' Sum'].sum()
number_of_domains = len(GO_analysis_df)
total_protein_count = len(encoded_df)
n_largest = pd.DataFrame(GO_analysis_df[et_arg + ' Sum'].nlargest(top_features_arg))
n_largest_sum = n_largest[et_arg + ' Sum'].sum()
percentage_of_dataset = 100*(n_largest_sum/total_GO_count)
print(str(top_features_arg) + ' top GO features composes ' + str(percentage_of_dataset) + ' percent of total features')
encoded_df = encoded_df[n_largest.index]
encoded_df = encoded_df.loc[(encoded_df != 0.0).any(1)]
return encoded_df, n_largest
args = ParseArguments()
file_arg = args.annotated_file
pv_cut_arg = args.pv
fc_cut_arg = args.fc
et_arg = args.et
kmeans_arg = args.kmeans
sc_cut_arg = args.sc
top_features_arg = args.top_f
bh_cutoff_arg = args.bh
e_clusters_arg = args.e_clusters
file_df = pd.read_csv(file_arg, index_col = 0)
#test Benjamini-Hochberg
if bh_cutoff_arg != 0.0:
print('Implementing Benjamini-Hochberg corrections ... ')
print('FDR set to ' + str(bh_cutoff_arg))
file_df['P-Value Ranks'] = file_df['P-Value'].rank(axis = 0)
file_df['BH Correction'] = (file_df['P-Value Ranks']/len(file_df))*bh_cutoff_arg
BH_filter_list = []
for index, row in file_df.iterrows():
test_stat = row['BH Correction'] - row['P-Value']
if test_stat > 0:
BH_filter_list.append(row['P-Value Ranks'])
else:
pass
maximum_rank_cutoff = max(BH_filter_list)
bh_cutoff_filter = file_df['P-Value Ranks'] <= maximum_rank_cutoff
#filtering
if bh_cutoff_arg == 0.0:
print('No BH-correction implemented ... P-Values greater than ' + str(pv_cut_arg) + ' filtered out.')
pval_filter = file_df['P-Value'] <= pv_cut_arg
file_df = file_df[pval_filter]
else:
file_df = file_df[bh_cutoff_filter]
fc_filter = file_df['Imputed Average'] >= fc_cut_arg
sc_filter = file_df['Score'] > sc_cut_arg
file_df = file_df[fc_filter & sc_filter]
#domain cleanup and generation of non-redundant encoding target list
EncodingInformation = CleanupAndGenerateNonRedundantList(file_df, et_arg)
#kmeans clustering
encoded_df = EncodingInformation[0]
if kmeans_arg != False:
if e_clusters_arg == False:
max_clusters = top_features_arg - 1
else:
max_clusters = e_clusters_arg
silhouette_dictionary = FindIdealNumberOfClusters(encoded_df, 2, max_clusters, 1)
keys = list(silhouette_dictionary.keys())
values = list(silhouette_dictionary.values())
maximum = max(values)
value_max_idx = values.index(maximum)
num_clusters = keys[value_max_idx]
kmeans = KMeans(n_clusters=num_clusters, random_state=10)
kmeans.fit(encoded_df)
cluster_labels = kmeans.predict(encoded_df)
encoded_df['ClusterLabel'] = cluster_labels
encoded_df = encoded_df.sort_values('ClusterLabel')
cluster_domain_dictionaries = {}
for cluster in cluster_labels:
tmp_encoded_df = encoded_df[encoded_df['ClusterLabel'] == cluster]
tmp_encoded_df = tmp_encoded_df.drop(['ClusterLabel'], axis=1)
tmp_GO_stats = pd.DataFrame(tmp_encoded_df.sum(), columns=['GOsum'])
tmp_GO_stats = tmp_GO_stats[tmp_GO_stats['GOsum'] != 0.0]
tmp_GO_stats.index.name = et_arg
#tmp_domain_stats.plot(kind='bar')
cluster_domain_dictionaries.update({cluster : tmp_GO_stats.to_dict()})
encoded_df = pd.DataFrame(encoded_df['ClusterLabel'])
encoded_df.to_csv(et_arg.replace(' ', '') + 'KMeansClustering_' + str(num_clusters) + 'Clusters.csv')
file_df_fasta_indexed = file_df.set_index('Fasta headers')
detailed_clustered_df = file_df_fasta_indexed.loc[encoded_df.index]
detailed_clustered_df = detailed_clustered_df.merge(encoded_df, left_index = True, right_index = True)
detailed_clustered_df.to_csv(et_arg.replace(' ', '') + 'DetailedClusteredDF.csv')
else:
pass
| ejp-lab/EJPLab_Computational_Projects | PhotoCrosslinking/SecondaryFiltering_GOClustering.py | SecondaryFiltering_GOClustering.py | py | 8,137 | python | en | code | 9 | github-code | 13 |
15864958487 | """parkpow URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url
from django.urls import path, include
from django.contrib import admin
from main import views as v
from django.conf.urls.static import static
urlpatterns = [
path('', v.base, name='base'),
path('dashboard/', v.dashboard, name='demo_dashboard'),
path('vehicle/', v.vehicle, name='demo_vehicle'),
path('alert/', v.alert, name='demo_alert'),
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('authorization/', v.vehicleView, name='demo_authorization'),
path('authorization/vehicle_new/<str:auth_flag>', v.vehicleNew, name='vehicle_new'),
path('authorization/vehicle_view', v.vehicleView, name='vehicle_view'),
path('authorization/vehicle_delete/<int:vehicle_id>/<str:auth_flag>', v.vehicleDelete, name='vehicle_delete'),
path('authorization/vehicle_edit/<int:vehicle_id>/<str:auth_flag>', v.vehicleEdit, name='vehicle_edit'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| senwebdev/parking-power | parkpow/urls.py | urls.py | py | 1,852 | python | en | code | 0 | github-code | 13 |
3608500531 | import json
from time import sleep
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
from app.config import ConfigReader
from app.iptables import IPTables
from app.logging import get_logger
from app.storage import Storage
from app.utils import is_valid_ip, is_valid_uuid4, resolve_hostname
storage = Storage()
log = get_logger(__name__)
def build_response(message: str, code: int):
"""
Build JSON response.
"""
return Response(message, content_type='application/json', status=code)
def bad_token():
return build_response('"Could not verify access token."', code=403)
@Request.application
def application(request):
"""
Define the WSGI application to be run by the server.
"""
cfg = ConfigReader()
ipt = IPTables(cfg)
token = request.args.get('token')
src_ip = resolve_hostname(request.host)
if not (is_valid_uuid4(token) and is_valid_ip(src_ip)):
log.warning('Invalid Token <%s> or SRC IP <%s>', token, src_ip)
return bad_token()
token_instance = storage.verify_token(token)
if token_instance:
ipt.get_chain()
if not ipt.has_rule(src_ip):
ipt.add_rule(src_ip)
storage.log_access_request(src_ip, token_instance)
log.info('Allowing inbound traffic from new IP: %s', src_ip)
return build_response(
f'"Allowing inbound traffic from new IP: {src_ip}"', code=201
)
log.info('Allowing inbound traffic from existing IP: %s', src_ip)
return build_response(
f'"Allowing inbound traffic from existing IP: {src_ip}"', code=200
)
else:
log.warning('Invalid Token: %s', token)
return bad_token()
def run_main(cfg):
"""
Convenience method. Run a simple server and load the app.
"""
run_simple(cfg.api_host, int(cfg.api_port), application)
| radupotop/opensesame | api/api.py | api.py | py | 1,916 | python | en | code | 1 | github-code | 13 |
41963359913 | from unittest import TestCase
from py_stringmatching import SoftTfIdf, Jaro, JaroWinkler
from preprocessing.preprocessing import bag_of_words
from preprocessing.word_vector_similarity import WordVectorSimilarity
class TestWordVectorSimilarity(TestCase):
s1_simple = 'Ursin Brunner Tester Nonexisting'
s2_simple = 'Ursin Brunerx Test Randomword Second'
s3_simple = 'Peter Brunner Homeworld'
s1_complex = 'Linksys EtherFast 8-Port 10/100 Switch - EZXS88W,Linksys EtherFast 8-Port 10/100 Switch - EZXS88W/ 10/100 ' \
'Dual-Speed Per-Port/ Perfect For Optimizing 10BaseT And 100BaseTX Hardware On The Same Network/ Speeds Of ' \
'Up To 200Mbps In Full Duplex Operation/ Eliminate Bandwidth Constraints And Clear Up Bottlenecks '
s2_complex = 'Linksys EtherFast EZXS88W Ethernet Switch - EZXS88W,Linksys EtherFast 8-Port 10/100 Switch (New/Workgroup),' \
'LINKSYS '
def test_get_word_vector_similarities(self):
all_bag_of_words = []
s1_tokenized = bag_of_words(self.s1_simple)
s2_tokenized = bag_of_words(self.s2_simple)
s3_tokenized = bag_of_words(self.s3_simple)
all_bag_of_words.append(s1_tokenized)
all_bag_of_words.append(s2_tokenized)
all_bag_of_words.append(s3_tokenized)
sim_engine = WordVectorSimilarity(all_bag_of_words, sim_func=JaroWinkler().get_raw_score, threshold=0.8)
word_similarities_vector = sim_engine.get_word_vector_similarities_simple(s1_tokenized, s2_tokenized)
print(word_similarities_vector)
def test_get_word_vector_similarities_tf_idf_simple_examples(self):
all_bag_of_words = []
s1_tokenized = bag_of_words(self.s1_simple)
s2_tokenized = bag_of_words(self.s2_simple)
s3_tokenized = bag_of_words(self.s3_simple)
all_bag_of_words.append(s1_tokenized)
all_bag_of_words.append(s2_tokenized)
all_bag_of_words.append(s3_tokenized)
sim_engine = WordVectorSimilarity(all_bag_of_words, sim_func=Jaro().get_raw_score, threshold=0.8)
word_similarities_vector = sim_engine.get_word_vector_similarities_simple(s1_tokenized, s2_tokenized)
print(word_similarities_vector)
self.assertEqual(len(word_similarities_vector), 10)
def test_get_word_vector_similarities_tf_idf(self):
all_bag_of_words = []
s1_tokenized = bag_of_words(self.s1_complex)
s2_tokenized = bag_of_words(self.s2_complex)
all_bag_of_words.append(s1_tokenized)
all_bag_of_words.append(s2_tokenized)
sim_engine = WordVectorSimilarity(all_bag_of_words, sim_func=Jaro().get_raw_score, threshold=0.8)
word_similarities_vector = sim_engine.get_word_vector_similarities_tf_idf(s1_tokenized, s2_tokenized)
print(word_similarities_vector)
self.assertEqual(len(word_similarities_vector), 8) | brunnurs/PA1 | preprocessing/test_wordVectorSimilarity.py | test_wordVectorSimilarity.py | py | 2,881 | python | en | code | 0 | github-code | 13 |
3440316452 | from TrainStationClass import TrainStationLogic
class WagonLogic: #Waggons werden an Züge angehängt
def __init__(self, pStartTrainstation, pEndTrainstation, pType, pPlayer):
self.capacity = 50
self.type = pType
self.amount = 0
self.startTrainstation = pStartTrainstation
self.endTrainstation = pEndTrainstation
self.player = pPlayer
def loadWagon(self): #Belädt Waggon mit Gütern
print('vor beladen:', self.startTrainstation.logic.STORAGE[self.type])
if(self.startTrainstation.logic.PRICES[self.type] < self.endTrainstation.logic.PRICES[self.type]):
maxGood = self.startTrainstation.logic.STORAGE[self.type]
if self.capacity <= maxGood :
self.amount += self.capacity #füllt Wagon komplett
self.player.money -= self.capacity * self.startTrainstation.logic.PRICES[self.type]
self.startTrainstation.logic.STORAGE[self.type] -= self.capacity #entfernt Menge aus Lager
print('nach beladen:', self.startTrainstation.logic.STORAGE[self.type])
return True
else:
self.amount += maxGood
self.player.money -= maxGood * self.startTrainstation.logic.PRICES[self.type]
self.startTrainstation.logic.STORAGE[self.type] -= maxGood
print('nach beladen:', self.startTrainstation.logic.STORAGE[self.type])
return True
else:
print('kein Gewinn möglich')
return False
def unloadWagon(self): #Entlädt Waggon
trainstationGood = self.endTrainstation.logic.STORAGE[self.type]
if self.amount + trainstationGood <= self.endTrainstation.logic.maxStorage:
self.endTrainstation.logic.STORAGE[self.type] += self.amount
self.player.money += self.amount * self.endTrainstation.logic.PRICES[self.type]
return True
elif self.amount + trainstationGood > self.endTrainstation.logic.STORAGE[self.type]:
self.endTrainstation.logic.STORAGE[self.type] = self.endTrainstation.logic.maxStorage
self.player.money += self.amount * self.endTrainstation.logic.PRICES[self.type]
return False
| davidtraum/swt | game/src/server/WagonClass.py | WagonClass.py | py | 2,282 | python | en | code | 2 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.