seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19167010026 | #!/usr/bin/env python3
"""OpenCV-based frame viewer that replays recordings and assign time-based labels"""
import argparse
from pathlib import Path
import time
import cv2
import numpy as np
import derp.util
class Labeler:
"""OpenCV-based frame viewer that replays recordings and assign time-based labels"""
def __init__(self, folder, scale=1, bhh=40):
"""Load the topics and existing labels from the folder, scaling up the frame"""
self.folder = folder
self.scale = scale
self.bhh = bhh
self.config_changed = False
self.quality = None
self.config_path = self.folder / "config.yaml"
self.window_name = "Labeler %s" % self.folder
self.config = derp.util.load_config(self.config_path)
self.quality_colors = [(0, 0, 255), (0, 128, 255), (0, 255, 0)]
self.topics = derp.util.load_topics(folder)
self.frame_id = 0
self.n_frames = len(self.topics["camera"])
self.seek(self.frame_id)
self.f_h = self.frame.shape[0]
self.f_w = self.frame.shape[1]
self.l_h = int(self.bhh // 5)
self.window = np.zeros(
[self.f_h + self.bhh * 2 + self.l_h + 2, self.f_w, 3], dtype=np.uint8
)
self.paused = True
self.show = False
# Prepare labels
self.autonomous_bar = np.ones((self.f_w, 3), dtype=np.uint8) * (255, 255, 255)
self.quality_bar = np.ones((self.f_w, 3), dtype=np.uint8) * (128, 128, 128)
if "quality" in self.topics and len(self.topics["quality"]) >= self.n_frames:
self.qualities = [str(msg.quality) for msg in self.topics["quality"]]
else:
self.qualities = ["junk" for _ in range(self.n_frames)]
for i, quality in enumerate(self.qualities):
self.update_quality(i, i, quality)
# Prepare state messages
self.camera_times = [msg.publishNS for msg in self.topics["camera"]]
self.camera_autos = []
auto = False
for timestamp, topic, msg in derp.util.replay(self.topics):
if topic == 'controller':
auto = msg.isAutonomous
elif topic == 'camera':
self.camera_autos.append(auto)
actions = derp.util.extract_car_actions(self.topics)
self.camera_speeds = derp.util.extract_latest(self.camera_times,
actions[:, 0], actions[:, 1])
self.camera_steers = derp.util.extract_latest(self.camera_times,
actions[:, 0], actions[:, 2])
window_Xs = np.linspace(self.camera_times[0], self.camera_times[-1], self.f_w)
self.window_speeds = np.array(np.interp(window_Xs, self.camera_times, self.camera_speeds)
* -self.bhh, dtype=np.int)
self.window_steers = np.array(np.interp(window_Xs, self.camera_times, self.camera_steers)
* -self.bhh, dtype=np.int)
self.autonomous_bar *= np.array(np.interp(window_Xs, self.camera_times,
self.camera_autos), dtype=np.uint8)[:, None]
self.window_steers[self.window_steers > self.bhh] = self.bhh
self.window_steers[self.window_steers < -self.bhh] = -self.bhh
cv2.namedWindow(self.window_name)
cv2.setMouseCallback(self.window_name, self.click_handler)
# Print some statistics
duration = (self.camera_times[-1] - self.camera_times[0]) / 1e9
fps = (len(self.camera_times) - 1) / duration
print("Duration of %.0f seconds at %.0f fps" % (duration, fps))
def __del__(self):
"""Deconstructor to close window"""
cv2.destroyAllWindows()
def click_handler(self, event, x, y, flags, param):
""" Handle clicks on the window """
if event == cv2.EVENT_LBUTTONDOWN:
if y > self.f_h:
frame_id = int((x / self.f_w) * self.n_frames)
self.seek(frame_id)
self.show = True
def update_quality(self, first_index, last_index, quality=None):
"""Update the label bar to the given quality"""
if quality is None:
return False
first_index, last_index = min(first_index, last_index), max(first_index, last_index)
for index in range(first_index, last_index + 1):
self.qualities[index] = quality
beg_pos = self.frame_pos(first_index)
end_pos = self.frame_pos(last_index + (self.n_frames < len(self.quality_bar)))
self.quality_bar[beg_pos : end_pos + 1] = self.bar_color(quality)
return True
def seek(self, frame_id=None):
"""Update the current frame to the given frame_id, otherwise advances by 1 frame"""
if frame_id is None:
frame_id = self.frame_id + 1
if frame_id < 0:
frame_id = 0
self.paused = True
if frame_id >= self.n_frames:
frame_id = self.n_frames - 1
self.paused = True
self.update_quality(self.frame_id, frame_id, self.quality)
self.frame = cv2.resize(
derp.util.decode_jpg(self.topics["camera"][frame_id].jpg),
None,
fx=self.scale,
fy=self.scale,
interpolation=cv2.INTER_AREA,
)
self.frame_id = frame_id
return True
def bar_color(self, quality):
"""Figure out the color for the given quality"""
if quality is None:
return (128, 128, 128)
return self.quality_colors[derp.util.TOPICS["quality"].QualityEnum.__dict__[quality]]
def display(self):
"""Blit all the status on the screen"""
self.window[: self.frame.shape[0], :, :] = self.frame
horizon_percent = self.config["camera"]["pitch"] / self.config["camera"]["vfov"] + 0.5
# Horizon line
self.window[int(self.f_h * horizon_percent), :, :] = (255, 0, 255)
# Clear status buffer
self.window[self.f_h :, :, :] = 0
# Draw label bar
self.window[self.f_h : self.f_h + self.l_h // 2, :, :] = self.autonomous_bar
self.window[self.f_h + self.l_h // 2 : self.f_h + self.l_h, :, :] = self.quality_bar
# Draw current timestamp vertical line
current_x = self.frame_pos(self.frame_id)
self.window[self.f_h + self.l_h :, current_x, :] = self.bar_color(self.quality)
# Draw zero line
self.window[self.f_h + self.l_h + self.bhh, :, :] = (96, 96, 96)
offset = self.f_h + self.bhh + self.l_h
self.window[self.window_speeds + offset, np.arange(self.f_w), :] = (255, 64, 255)
self.window[self.window_steers + offset, np.arange(self.f_w), :] = (64, 255, 255)
text = "%05i %07.3f %06.3f %06.3f" % (self.frame_id,
(self.camera_times[self.frame_id] / 1E9) % 100,
self.camera_steers[self.frame_id],
self.camera_speeds[self.frame_id])
font = cv2.FONT_HERSHEY_SIMPLEX
pink = (255, 128, 255)
offset = (0, int(self.scale * 30))
cv2.putText(self.window, text, offset, font, self.scale, pink, 1, cv2.LINE_AA)
cv2.imshow(self.window_name, self.window)
def save_labels(self):
"""Write all of our labels to the folder as messages"""
with derp.util.topic_file_writer(self.folder, "quality") as quality_fd:
for quality_i, quality in enumerate(self.qualities):
msg = derp.util.TOPICS["quality"].new_message(
createNS=derp.util.get_timestamp(),
publishNS=self.topics["camera"][quality_i].publishNS - 1,
writeNS=derp.util.get_timestamp(),
quality=quality,
)
msg.write(quality_fd)
print("Saved quality labels in", self.folder)
if self.config_changed:
derp.util.dump_config(self.config, self.config_path)
print("Saved changes to config")
def handle_keyboard_input(self):
"""Fetch a new keyboard input if one exists"""
key = cv2.waitKey(1) & 0xFF
if key == 255:
return True
if key == 27:
return False # ESC
if key == ord(" "):
self.paused = not self.paused
elif key == ord("g"):
self.quality = "good"
elif key == ord("r"):
self.quality = "risk"
elif key == ord("t"):
self.quality = "junk"
elif key == ord("c"):
self.quality = None
elif key == ord("s"):
self.save_labels()
elif key == 82:
self.seek(self.frame_id + 10) # up
elif key == 84:
self.seek(self.frame_id - 10) # down
elif key == 81:
self.seek(self.frame_id - 1) # left
elif key == 83:
self.seek(self.frame_id + 1) # right
elif key == 85:
self.config["camera"]["pitch"] -= 0.1 # page up
self.config_changed = True
elif key == 86:
self.config["camera"]["pitch"] += 0.1 # page down
self.config_changed = True
elif ord("1") <= key <= ord("5"):
self.seek(int(self.n_frames * (key - ord("0") - 1) / 4))
elif key != 255:
print("Unknown key press: [%s]" % key)
self.show = True
return True
def frame_pos(self, frame_id):
"""Position of current camera frame on the horizontal status bars"""
return min(self.f_w - 1, int(frame_id / self.n_frames * self.f_w))
def run(self):
"""Run the labeling program in a forever loop until the user quits"""
self.display()
while True:
if not self.paused:
self.show = self.seek()
if self.show:
self.display()
self.show = False
if not self.handle_keyboard_input():
break
time.sleep(0.01)
def main():
"""Initialize the labeler based on user args and run it"""
print(
"""
This labeling tool interpolates the data based on camera frames and then lets you label each.
To exit press ESCAPE
To save press s
To navigate between frames:
Left/Right: move in 1 frame increments
Up/Down: move in 10 frame increments
1: goes to beginning
2: goes to 25% in
3: goes to 50% in
4 goes to 25% in
5: goes to end
To adjust horizon line press PAGE_UP or PAGE_DOWN
To change the quality label of this frame
g: good (use for training)
r: risk (advanced situation not suitable for classic training)
t: junk (don't use this part of the video, aka trash)
c: clear, as in don't change the quality label
"""
)
parser = argparse.ArgumentParser()
parser.add_argument("paths", type=Path, nargs="*", metavar="N", help="recording path location")
parser.add_argument("--scale", type=float, default=1.0, help="frame rescale ratio")
args = parser.parse_args()
if not args.paths:
recordings = (derp.util.DERP_ROOT / "recordings").glob("recording-*")
args.paths = [r for r in recordings if not (r / "quality.bin").exists()]
for path in args.paths:
print("Labeling", path)
labeler = Labeler(folder=path, scale=args.scale)
labeler.run()
if __name__ == "__main__":
main()
| notkarol/derplearning | bin/label.py | label.py | py | 11,483 | python | en | code | 40 | github-code | 6 | [
{
"api_name": "derp.util.util.load_config",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "derp.util.util",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "derp.util",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "derp.util.ut... |
10648619314 | # -*- coding: utf-8 -*-
"""
Created on Wed May 6 15:04:40 2020
@author: Rijk
Scipy signal sos filter toolbox test
"""
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
plt.close('all')
b, a = signal.butter(4, 100, 'low', analog=True)
w, h = signal.freqs(b, a)
plt.figure()
plt.semilogx(w, 20 * np.log10(abs(h)))
plt.title('Butterworth filter frequency response')
plt.xlabel('Frequency [radians / second]')
plt.ylabel('Amplitude [dB]')
plt.margins(0, 0.1)
plt.grid(which='both', axis='both')
plt.axvline(100, color='green') # cutoff frequency
#plt.show()
t = np.linspace(0, 1, 1000, False) # 1 second
sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.plot(t, sig)
ax1.set_title('10 Hz and 20 Hz sinusoids')
ax1.axis([0, 1, -2, 2])
sos = signal.butter(10, 15, 'hp', fs=1000, output='sos')
filtered = signal.sosfilt(sos, sig)
ax2.plot(t, filtered)
ax2.set_title('After 15 Hz high-pass filter')
ax2.axis([0, 1, -2, 2])
ax2.set_xlabel('Time [seconds]')
plt.tight_layout()
plt.show()
# Compare FFT before and after
fft_before = np.fft.fft(sig)
fft_after = np.fft.fft(filtered)
sample_time = np.mean(np.diff(t))
f = np.fft.fftfreq(len(t), sample_time)
half = int(len(t)/2)
plt.figure()
plt.plot(f[1:half], fft_before[1:half], label='Original')
#plt.plot(f[1:half], fft_after[1:half], label='Filtered')
plt.legend()
## Measurement data filter
#f_axis = 1.12
#nyquist_f = f_axis
#
## Define filter
#lower_f = 0.025
#upper_f = 0.065
#bandpass_f = 2*np.pi * np.array([lower_f, upper_f]) / nyquist_f
#butter_low = signal.butter(2, lower_f, btype='lowpass', output='sos')
#butter_high = signal.butter(2, upper_f, btype='lowpass', output='sos')
#
#b, a = signal.butter(2, bandpass_f, btype='bandstop', output='ba')
#w, h = signal.freqz(b, a)
#
#plt.figure()
#plt.plot(w, 20 * np.log10(abs(h))) | rehogenbirk/MEP_control_software | Measurements/20200324 WO3196dev9/Hydrogen R_T correction/0324_1904_WO3196dev9_H2ToAir/signal_toolbox_test.py | signal_toolbox_test.py | py | 1,897 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.close",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "scipy.signal.butter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "scipy.s... |
21689221352 | import json
import os
import subprocess
from collections import OrderedDict
from copy import deepcopy
from sys import platform
from tabulate import tabulate
from openwpm.config import ConfigEncoder
def parse_http_stack_trace_str(trace_str):
"""Parse a stacktrace string and return an array of dict."""
stack_trace = []
frames = trace_str.split("\n")
for frame in frames:
try:
func_name, rest = frame.split("@", 1)
rest, async_cause = rest.rsplit(";", 1)
filename, line_no, col_no = rest.rsplit(":", 2)
stack_trace.append(
{
"func_name": func_name,
"filename": filename,
"line_no": line_no,
"col_no": col_no,
"async_cause": async_cause,
}
)
except Exception as exc:
print("Exception parsing the stack frame %s %s" % (frame, exc))
return stack_trace
def get_firefox_binary_path():
"""
If ../../firefox-bin/firefox-bin or os.environ["FIREFOX_BINARY"] exists,
return it. Else, throw a RuntimeError.
"""
if "FIREFOX_BINARY" in os.environ:
firefox_binary_path = os.environ["FIREFOX_BINARY"]
if not os.path.isfile(firefox_binary_path):
raise RuntimeError(
"No file found at the path specified in "
"environment variable `FIREFOX_BINARY`."
"Current `FIREFOX_BINARY`: %s" % firefox_binary_path
)
return firefox_binary_path
root_dir = os.path.dirname(__file__) + "/../.."
if platform == "darwin":
firefox_binary_path = os.path.abspath(
root_dir + "/Nightly.app/Contents/MacOS/firefox-bin"
)
else:
firefox_binary_path = os.path.abspath(root_dir + "/firefox-bin/firefox-bin")
if not os.path.isfile(firefox_binary_path):
raise RuntimeError(
"The `firefox-bin/firefox-bin` binary is not found in the root "
"of the OpenWPM directory (did you run the install script "
"(`install.sh`)?). Alternatively, you can specify a binary "
"location using the OS environment variable FIREFOX_BINARY."
)
return firefox_binary_path
def get_version():
"""Return OpenWPM version tag/current commit and Firefox version"""
try:
openwpm = subprocess.check_output(
["git", "describe", "--tags", "--always"]
).strip()
except subprocess.CalledProcessError:
ver = os.path.join(os.path.dirname(__file__), "../../VERSION")
with open(ver, "r") as f:
openwpm = f.readline().strip()
firefox_binary_path = get_firefox_binary_path()
try:
firefox = subprocess.check_output([firefox_binary_path, "--version"])
except subprocess.CalledProcessError as e:
raise RuntimeError("Firefox not found. " " Did you run `./install.sh`?") from e
ff = firefox.split()[-1]
return openwpm, ff
def get_configuration_string(manager_params, browser_params, versions):
"""Construct a well-formatted string for {manager,browser}params
Constructs a pretty printed string of all parameters. The config
dictionaries are split to try to avoid line wrapping for reasonably
size terminal windows.
"""
config_str = "\n\nOpenWPM Version: %s\nFirefox Version: %s\n" % versions
config_str += "\n========== Manager Configuration ==========\n"
config_str += json.dumps(
manager_params.to_dict(),
sort_keys=True,
indent=2,
separators=(",", ": "),
cls=ConfigEncoder,
)
config_str += "\n\n========== Browser Configuration ==========\n"
print_params = [deepcopy(x.to_dict()) for x in browser_params]
table_input = list()
profile_dirs = OrderedDict()
archive_dirs = OrderedDict()
js_config = OrderedDict()
profile_all_none = archive_all_none = True
for item in print_params:
browser_id = item["browser_id"]
# Update print flags
if item["seed_tar"] is not None:
profile_all_none = False
if item["profile_archive_dir"] is not None:
archive_all_none = False
# Separate out long profile directory strings
profile_dirs[browser_id] = str(item.pop("seed_tar"))
archive_dirs[browser_id] = str(item.pop("profile_archive_dir"))
js_config[browser_id] = item.pop("cleaned_js_instrument_settings")
# Copy items in sorted order
dct = OrderedDict()
dct["browser_id"] = browser_id
for key in sorted(item.keys()):
dct[key] = item[key]
table_input.append(dct)
key_dict = OrderedDict()
counter = 0
for key in table_input[0].keys():
key_dict[key] = counter
counter += 1
config_str += "Keys:\n"
config_str += json.dumps(key_dict, indent=2, separators=(",", ": "))
config_str += "\n\n"
config_str += tabulate(table_input, headers=key_dict)
config_str += "\n\n========== JS Instrument Settings ==========\n"
config_str += json.dumps(js_config, indent=None, separators=(",", ":"))
config_str += "\n\n========== Input profile tar files ==========\n"
if profile_all_none:
config_str += " No profile tar files specified"
else:
config_str += json.dumps(profile_dirs, indent=2, separators=(",", ": "))
config_str += "\n\n========== Output (archive) profile dirs ==========\n"
if archive_all_none:
config_str += " No profile archive directories specified"
else:
config_str += json.dumps(archive_dirs, indent=2, separators=(",", ": "))
config_str += "\n\n"
return config_str
| openwpm/OpenWPM | openwpm/utilities/platform_utils.py | platform_utils.py | py | 5,733 | python | en | code | 1,286 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_nu... |
37950984200 | import pykka
import re
import json
from gpt_connection import GPT_Connection
from tools import ToolRunner
from frontend_utils import initial_request
from bots.Dispatcher import Dispatcher
from bots.Workflow import Workflow
from prompts import DISPATCHER_PROMPT_TEMPLATE, INITIAL_PROMPT_TEMPLATE, ITERATING_ACTION_PROMPT_TEMPLATE, FORCE_END_ITERATION_PROMPT, \
PLANT_HEALTH_BOT_DESCRIPTION, PRODUCTION_OUTPUT_BOT_DESCRIPTION, DISTRIBUTION_BOT_DESCRIPTION, \
ONT_PRODUCTION_PLANT, ONT_MACHINES, ONT_WORK_ORDERS, ONT_PRODUCTION_ALLOCATION_PLAN, ONT_DISTRIBUTION_WAREHOUSE, ONT_TRANSIT_ORDER, \
GET_OBJECTS, MODIFY_OBJECT, CREATE_OBJECT
from string import Template
"""
Main Execution Code
"""
gpt_connection = GPT_Connection()
tool_runner = ToolRunner()
plant_health_ref = Workflow.start(
name="Plant Health Bot",
id=2,
dispatcher_id=1,
bot_description=PLANT_HEALTH_BOT_DESCRIPTION,
initial_prompt_template=INITIAL_PROMPT_TEMPLATE,
iteration_prompt_template=ITERATING_ACTION_PROMPT_TEMPLATE,
force_end_prompt_template=FORCE_END_ITERATION_PROMPT,
information=[ONT_PRODUCTION_PLANT, ONT_MACHINES, ONT_WORK_ORDERS],
readtools=[GET_OBJECTS],
writetools=[MODIFY_OBJECT, CREATE_OBJECT],
gpt_connection=gpt_connection,
tool_runner=tool_runner
)
production_output_ref = Workflow.start(
name="Production Output Bot",
id=3,
dispatcher_id=1,
bot_description=PRODUCTION_OUTPUT_BOT_DESCRIPTION,
initial_prompt_template=INITIAL_PROMPT_TEMPLATE,
iteration_prompt_template=ITERATING_ACTION_PROMPT_TEMPLATE,
force_end_prompt_template=FORCE_END_ITERATION_PROMPT,
information=[ONT_PRODUCTION_PLANT, ONT_MACHINES, ONT_PRODUCTION_ALLOCATION_PLAN],
readtools=[GET_OBJECTS],
writetools=[MODIFY_OBJECT, CREATE_OBJECT],
gpt_connection=gpt_connection,
tool_runner=tool_runner
)
distribution_ref = Workflow.start(
name="Distribution Bot",
id=4,
dispatcher_id=1,
bot_description=DISTRIBUTION_BOT_DESCRIPTION,
initial_prompt_template=INITIAL_PROMPT_TEMPLATE,
iteration_prompt_template=ITERATING_ACTION_PROMPT_TEMPLATE,
force_end_prompt_template=FORCE_END_ITERATION_PROMPT,
information=[ONT_PRODUCTION_PLANT, ONT_DISTRIBUTION_WAREHOUSE, ONT_PRODUCTION_ALLOCATION_PLAN, ONT_TRANSIT_ORDER],
readtools=[GET_OBJECTS],
writetools=[MODIFY_OBJECT, CREATE_OBJECT],
gpt_connection=gpt_connection,
tool_runner=tool_runner
)
workflows = {
"Plant Health Bot": plant_health_ref,
"Production Output Bot": production_output_ref,
"Distribution Bot": distribution_ref
}
workflow_ids = {
"Plant Health Bot": 2,
"Production Output Bot": 3,
"Distribution Bot": 4
}
dispatcher_ref = Dispatcher.start(
id=1,
prompt_template=DISPATCHER_PROMPT_TEMPLATE,
workflows=workflows,
workflow_ids=workflow_ids,
gpt_connection=gpt_connection)
initial_ask = "We just got a message that the ocelot making machine has broken at Stuffed Animal Plant 8"
# initial_ask = "Our trucks carrying transit order T0030 got into an accident."
# initial_ask = "Alice has left the company"
initial_request(initial_ask)
dispatcher_ref.ask(initial_ask)
dispatcher_ref.stop()
| nhuang25/llm-composition | stuffed_animal_llc/stuffed_animal_llc.py | stuffed_animal_llc.py | py | 3,203 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gpt_connection.GPT_Connection",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tools.ToolRunner",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "bots.Workflow.Workflow.start",
"line_number": 29,
"usage_type": "call"
},
{
"api_n... |
3681349954 | import json
from dataclasses import dataclass, field
from typing import Union
import requests
@dataclass
class ManyChatAPI:
api_base_url = 'https://api.manychat.com/fb/'
api_key: str
psid: str
headers: dict = field(init=False)
def __post_init__(self):
self.headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}',
}
def get_user_info(self) -> dict:
params = {
'subscriber_id': self.psid,
}
try:
response = requests.get(
url=f'{self.api_base_url}subscriber/getInfo',
headers=self.headers,
params=params,
timeout=5,
)
except Exception as e:
results = {
'status': 'error',
'message': e,
}
else:
results = json.loads(response.text)
return results
def send_content(self, messages: list) -> dict:
params = {
'subscriber_id': self.psid,
'data': {
'version': 'v2',
'content': {
'messages': [
{
'type': 'text',
'text': message,
} for message in messages
]
}
},
}
try:
response = requests.post(
url=f'{self.api_base_url}sending/sendContent',
headers=self.headers,
data=json.dumps(params),
timeout=5,
)
except Exception as e:
results = {
'status': 'error',
'message': e,
}
else:
results = json.loads(response.text)
return results
def send_flow(self, flow_ns: str) -> dict:
params = {
'subscriber_id': self.psid,
'flow_ns': flow_ns,
}
try:
response = requests.post(
url=f'{self.api_base_url}sending/sendFlow',
headers=self.headers,
data=json.dumps(params),
timeout=5,
)
except Exception as e:
results = {
'status': 'error',
'message': e,
}
else:
results = json.loads(response.text)
return results
def set_custom_field_by_name(self,
field_name: str,
field_value: Union[str, int, bool]) -> dict:
params = {
'subscriber_id': self.psid,
'field_name': field_name,
'field_value': field_value,
}
try:
response = requests.post(
url=f'{self.api_base_url}subscriber/setCustomFieldByName',
headers=self.headers,
data=json.dumps(params),
timeout=5,
)
except Exception as e:
results = {
'status': 'error',
'message': e,
}
else:
results = json.loads(response.text)
return results
| daiangan/manychat-dialogflow-connector | utils/manychat_helpers.py | manychat_helpers.py | py | 3,304 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "dataclasses.field",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_n... |
37568035732 | # import statements
import nltk
import sys
import pandas as pd
import re
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize, sent_tokenize
from sqlalchemy import create_engine
# download necessary NLTK data
nltk.download(['punkt', 'wordnet'])
nltk.download('stopwords')
def load_data(messages_filepath, categories_filepath):
"""
Load the data containing the messages and the
categories, and merge on the common column 'id'
Returns a single pandas Dataframe.
Keyword arguments:
messages_filepath -- filepath (including file name)
of the file containing the messages
categories_filepath -- filepath (including file name)
of the file containing the message
categories
"""
messages = pd.read_csv(messages_filepath, encoding='UTF-8')
categories = pd.read_csv(categories_filepath, encoding='UTF-8')
return messages.merge(categories, on=['id'])
def clean_data(df):
"""
Parse the single 'categories' column into the 36 distinct
message category columns, name the resulting columns, and
clean the values, removing the category name from the cells
and leaving only the numeric categorical value.
Remove true duplicate rows.
Returns a cleaned Dataframe.
Keyword argument:
df -- Dataframe requiring cleaning.
"""
categories_new = df['categories'].str.split(pat=';', expand=True)
row = categories_new.iloc[0,:]
category_colnames = list(row.apply(lambda x: x[:-2]))
categories_new.columns = category_colnames
for column in categories_new:
# set each value to be the last character of the string
categories_new[column] = categories_new[column].str.slice(-1)
# convert column from string to numeric
categories_new[column] = categories_new[column].astype(int)
df.drop(columns=['categories'], inplace=True)
df = pd.concat([df, categories_new], axis=1)
# drop duplicates
df.drop_duplicates(inplace=True)
return df
def save_data(df, database_filename):
"""
Save cleaned Dataframe to a SQL Database table.
Keyword arguments:
df -- Cleaned Dataframe for export
database_filename -- name of the database in which
table will be saved
"""
engine = create_engine('sqlite:///' + database_filename)
df.to_sql('messages_and_categories', engine, index=False)
def main():
"""
Executes following functions:
1) load_data(messages_filepath, categories_filepath)
2) clean_data(df)
3) save_data(df, database_filename)
"""
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main() | goitom/project_2_disaster_response | data/process_data.py | process_data.py | py | 3,643 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.download",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"li... |
74550779386 |
import torch
from torch import Tensor, nn
import torchvision
import os
import numpy as np
class Normalize:
def __init__(self, n_channels, expected_values, variance):
self.n_channels = n_channels
self.expected_values = expected_values
self.variance = variance
assert self.n_channels == len(self.expected_values)
def __call__(self, x):
x_clone = x.clone()
for channel in range(self.n_channels):
x_clone[:, channel] = (x[:, channel] - self.expected_values[channel]) / self.variance[channel]
return x_clone
class Denormalize:
def __init__(self, n_channels, expected_values, variance):
self.n_channels = n_channels
self.expected_values = expected_values
self.variance = variance
assert self.n_channels == len(self.expected_values)
def __call__(self, x):
x_clone = x.clone()
for channel in range(self.n_channels):
x_clone[:, channel] = x[:, channel] * self.variance[channel] + self.expected_values[channel]
return x_clone
class RegressionModel(nn.Module):
def __init__(self, task, model, init_mask, init_pattern):
self._EPSILON = 1e-7
super(RegressionModel, self).__init__()
self.mask_tanh = nn.Parameter(torch.tensor(init_mask))
self.pattern_tanh = nn.Parameter(torch.tensor(init_pattern))
self.classifier = self._get_classifier(model)
self.normalizer = self._get_normalize(task)
self.denormalizer = self._get_denormalize(task)
def forward(self, x):
mask = self.get_raw_mask()
pattern = self.get_raw_pattern()
if self.normalizer:
pattern = self.normalizer(self.get_raw_pattern())
x = (1 - mask) * x + mask * pattern
return self.classifier(x)
def get_raw_mask(self):
mask = nn.Tanh()(self.mask_tanh)
return mask / (2 + self._EPSILON) + 0.5
def get_raw_pattern(self):
pattern = nn.Tanh()(self.pattern_tanh)
return pattern / (2 + self._EPSILON) + 0.5
def _get_classifier(self, model):
classifier = model
for param in classifier.parameters():
param.requires_grad = False
classifier.eval()
return classifier.to('cuda')
def _get_denormalize(self, task):
if task == 'cifar10':
denormalizer = Denormalize(3, [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
elif task == 'mnist':
denormalizer = Denormalize(1, [0.5], [0.5])
elif task == 'imageNet':
denormalizer = Denormalize(3,[0.485,0.456,0.406],[0.229,0.224,0.225])
elif task == 'gtsrb':
denormalizer = None
else:
raise Exception("Invalid dataset")
return denormalizer
def _get_normalize(self, task):
if task == 'cifar10':
normalizer = Normalize(3, [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
elif task == 'mnist':
normalizer = Normalize(1, [0.5], [0.5])
elif task == 'imageNet':
normalizer = Denormalize(3,[0.485,0.456,0.406],[0.229,0.224,0.225])
elif task == 'gtsrb':
normalizer = None
else:
raise Exception("Invalid dataset")
return normalizer
class Recorder:
def __init__(self,target_label,task):
super().__init__()
# Best optimization results
self.mask_best = None
self.pattern_best = None
self.reg_best = float('inf')
self.target_label = target_label
# Logs and counters for adjusting balance cost
self.logs = []
self.cost_set_counter = 0
self.cost_up_counter = 0
self.cost_down_counter = 0
self.cost_up_flag = False
self.cost_down_flag = False
# Counter for early stop
self.early_stop_counter = 0
self.early_stop_reg_best = self.reg_best
# Cost
self.cost = 1e-3
self.cost_multiplier_up = 2
self.cost_multiplier_down = 2 ** 1.5
self.task = task
def reset_state(self):
self.cost = 1e-3
self.cost_up_counter = 0
self.cost_down_counter = 0
self.cost_up_flag = False
self.cost_down_flag = False
print("Initialize cost to {:f}".format(self.cost))
def save_result_to_dir(self):
result_dir = '%s/u_t_%s'%(self.task,self.task)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
result_dir = os.path.join(result_dir, str(self.target_label))
if not os.path.exists(result_dir):
os.makedirs(result_dir)
pattern_best = self.pattern_best
mask_best = self.mask_best
trigger = pattern_best * mask_best
path_mask = os.path.join(result_dir, 'mask.png')
path_pattern = os.path.join(result_dir, 'pattern.png')
path_trigger = os.path.join(result_dir, 'trigger.png')
torchvision.utils.save_image(mask_best, path_mask, normalize=True)
torchvision.utils.save_image(pattern_best, path_pattern, normalize=True)
torchvision.utils.save_image(trigger, path_trigger, normalize=True)
class UniversalTrigger:
def __init__(self):
self.universal_trigger_dict = {}
def train(task, model, target_label, init_mask, init_pattern, test_loader):
# Build regression model
regression_model = RegressionModel(task, model, init_mask, init_pattern).to('cuda')
# Set optimizer
optimizerR = torch.optim.Adam(regression_model.parameters(), lr=1e-1, betas=(0.5, 0.9))
# Set recorder (for recording best result)
recorder = Recorder(target_label,task)
for epoch in range(50):
early_stop = train_step(regression_model, optimizerR, test_loader, recorder, epoch, target_label)
if early_stop:
break
# Save result to dir
recorder.save_result_to_dir()
return recorder
def train_step(regression_model, optimizerR, dataloader, recorder, epoch, target_label,early_stop=True):
print("Epoch {} - Label: {}".format(epoch, target_label))
# Set losses
cross_entropy = nn.CrossEntropyLoss()
total_pred = 0
true_pred = 0
# Record loss for all mini-batches
loss_ce_list = []
loss_reg_list = []
loss_list = []
loss_acc_list = []
# Set inner early stop flag
inner_early_stop_flag = False
for batch_idx, (inputs, labels) in enumerate(dataloader):
# Forwarding and update model
optimizerR.zero_grad()
inputs = inputs.to('cuda')
sample_num = inputs.shape[0]
total_pred += sample_num
target_labels = torch.ones((sample_num), dtype=torch.int64).to('cuda') * target_label
predictions = regression_model(inputs)
loss_ce = cross_entropy(predictions, target_labels)
loss_reg = torch.norm(regression_model.get_raw_mask(), 2)
total_loss = loss_ce + recorder.cost * loss_reg
total_loss.backward()
optimizerR.step()
# Record minibatch information to list
minibatch_accuracy = torch.sum(torch.argmax(predictions, dim=1) == target_labels).detach() * 100. / sample_num
loss_ce_list.append(loss_ce.detach())
loss_reg_list.append(loss_reg.detach())
loss_list.append(total_loss.detach())
loss_acc_list.append(minibatch_accuracy)
true_pred += torch.sum(torch.argmax(predictions, dim=1) == target_labels).detach()
loss_ce_list = torch.stack(loss_ce_list)
loss_reg_list = torch.stack(loss_reg_list)
loss_list = torch.stack(loss_list)
loss_acc_list = torch.stack(loss_acc_list)
avg_loss_ce = torch.mean(loss_ce_list)
avg_loss_reg = torch.mean(loss_reg_list)
avg_loss = torch.mean(loss_list)
avg_loss_acc = torch.mean(loss_acc_list)
# Check to save best mask or not
if avg_loss_acc >= 99. and avg_loss_reg < recorder.reg_best:
recorder.mask_best = regression_model.get_raw_mask().detach()
recorder.pattern_best = regression_model.get_raw_pattern().detach()
recorder.reg_best = avg_loss_reg
recorder.save_result_to_dir()
print(" Updated !!!")
# Show information
print(' Result: Accuracy: {:.3f} | Cross Entropy Loss: {:.6f} | Reg Loss: {:.6f} | Reg best: {:.6f}'.format(
true_pred * 100. / total_pred,
avg_loss_ce,
avg_loss_reg,
recorder.reg_best))
# Check early stop
if early_stop:
if recorder.reg_best < float('inf'):
if recorder.reg_best >= 99. * recorder.early_stop_reg_best:
recorder.early_stop_counter += 1
else:
recorder.early_stop_counter = 0
recorder.early_stop_reg_best = min(recorder.early_stop_reg_best, recorder.reg_best)
if (
recorder.cost_down_flag and recorder.cost_up_flag and recorder.early_stop_counter >= 25):
print('Early_stop !!!')
inner_early_stop_flag = True
if not inner_early_stop_flag:
# Check cost modification
if recorder.cost == 0 and avg_loss_acc >= 99.:
recorder.cost_set_counter += 1
if recorder.cost_set_counter >= 5:
recorder.reset_state()
else:
recorder.cost_set_counter = 0
if avg_loss_acc >= 99.:
recorder.cost_up_counter += 1
recorder.cost_down_counter = 0
else:
recorder.cost_up_counter = 0
recorder.cost_down_counter += 1
if recorder.cost_up_counter >= 5:
recorder.cost_up_counter = 0
print("Up cost from {} to {}".format(recorder.cost, recorder.cost * recorder.cost_multiplier_up))
recorder.cost *= recorder.cost_multiplier_up
recorder.cost_up_flag = True
elif recorder.cost_down_counter >= 5:
recorder.cost_down_counter = 0
print("Down cost from {} to {}".format(recorder.cost, recorder.cost / recorder.cost_multiplier_down))
recorder.cost /= recorder.cost_multiplier_down
recorder.cost_down_flag = True
# Save the final version
if recorder.mask_best is None:
recorder.mask_best = regression_model.get_raw_mask().detach()
recorder.pattern_best = regression_model.get_raw_pattern().detach()
return inner_early_stop_flag
| Mr-Ace-1997/SGBA-A-Stealthy-Scapegoat-Backdoor-Attack-against-Deep-Neural-Networks | utils_universal_trigger.py | utils_universal_trigger.py | py | 10,352 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
37520613786 | import pandas as pd
import redis
from redisgraph import Graph
from config import redisgraph_config
from pathlib import Path
def redisgraph_import_csv(output: Path) -> None:
r = redis.Redis(host=redisgraph_config['host'], port=redisgraph_config['port'])
graph_name = 'movie_graph'
redis_graph = Graph(graph_name, r)
import_csv = pd.read_csv(output / 'neo4j_export.csv')
break_column_name = '_start'
nodes = pd.DataFrame()
relations = pd.DataFrame()
active_df = nodes
for idx, col in enumerate(import_csv.columns):
if col == break_column_name:
active_df = relations
active_df[col] = import_csv[col]
nodes.dropna(how='all', subset=None, inplace=True)
relations.dropna(how='all', subset=None, inplace=True)
# relations.to_csv('../out/Relations.csv')
node_values = nodes['_labels'].unique()
rows_to_drop = [node_value for node_value in node_values if 'UNIQUE IMPORT LABEL' in node_value]
for row_to_drop in rows_to_drop:
nodes = nodes[nodes["_labels"].str.contains(row_to_drop) == False]
node_values = nodes['_labels'].unique()
for node_type in node_values:
node_data = nodes[nodes['_labels'] == node_type]
filename = f'../out/{node_type.replace(":", "")}.csv'
node_data.dropna(how='all', axis=1, inplace=True)
# node_data.to_csv(filename)
for node in node_data.iloc:
params = ''
for param in node_data.columns:
if not param.startswith('_'):
val = node[param]
if not pd.isna(val):
try:
val = float(val)
except:
val = f'"{val}"'
params = params + f', {param}: {val}'
query = f'MERGE ({node["_labels"]} {{id: {node["_id"]} {params} }})'
redis_graph.query(query)
for node in relations.iloc:
query = f"""
MATCH (a), (b) WHERE a.id = {node["_start"]} AND b.id = {node["_end"]} CREATE (a)-[:{node['_type']}]->(b)
"""
redis_graph.query(query)
| Wojaqqq/graph_data_exchange_tool | imports/redisgraph_import_csv.py | redisgraph_import_csv.py | py | 2,156 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "redis.Redis",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "config.redisgraph_config",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "redisgraph.Graph",
... |
39227047564 | import os
import numpy as np
import scipy.io.wavfile as wavfile
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import normalize
from utils import read_textgrid
from numpy_operation import get_martix
from python_speech_features import mfcc
from python_speech_features import delta
#from python_speech_features import fbank
def to_one_hot(labels, dimension=5):
results = np.zeros((len(labels),dimension))
for i, label in enumerate(labels):
results[i, label] = 1.
return results
def read_wav(filename):
rate, data = wavfile.read(filename)
#only use the 1st channel if stereo
if len(data.shape) > 1:
data = data[:,0]
data = data.astype(np.float32)
data = data / 32768 #convert PCM int16 to float
return data, rate
def feature_extract(filename, wavpath, tgpath):
wav_filename = os.path.join(wavpath,filename+'.wav')
print(wav_filename)
tg_filename = os.path.join(tgpath,filename+'.textgrid')
y,sr = read_wav(wav_filename)
mfccs = mfcc(signal=y,samplerate=sr,winlen=0.02,winfunc=np.hamming)
delta1 = delta(mfccs,1)
delta2 = delta(mfccs,2)
_mfccs = np.concatenate((mfccs,delta1,delta2),1)
_mfccs = normalize(_mfccs)
_mfccs = get_martix(_mfccs,30,10)
_labels = None
if(os.path.exists(tg_filename)):
_labels = read_textgrid(tg_filename,len(_mfccs))
_labels = to_one_hot(_labels)
return _mfccs,_labels
| MakerFace/voice-activation-system | feature_extractor.py | feature_extractor.py | py | 1,510 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile.read",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "numpy.float32",
... |
2896601636 | from dotenv import load_dotenv
import discord
from discord.ext import commands
import os
import re
load_dotenv()
token = os.getenv('DISCORD_TOKEN')
commMark = os.getenv('COMMAND_MARKER')
description = 'Discord administration bot'
intents = discord.Intents.default()
intents.message_content = True
intents.members = True
bot = commands.Bot(command_prefix=commMark, description=description, intents=intents)
#posts log in message on log in
@bot.event
async def on_ready():
print('Logged on as {bot.user} (ID {bot.user.id})')
print('-----------')
#------------------------------------------------Functions---------------------------------------------------#
def get_rid(ctx, role_input):
print("get_rid starting") #debugging - prints once function is called
#cleans input and assigns to role_name
role_name = role_input.strip()
print(role_name)
#first trying regext to get the id from the message itself
role_id = re.search(r'\d{18}', role_name)
roles_list = [] #initializing return list
if role_id != None: #checking if re found something
role_id = role.id.group(0) # getting readable id
roles_list.append(int(role_id)) #getting and appending role-id to list
else:
#iterating through roles, searching for name match
for g_role in ctx.guild.roles:
if role_name in str(g_role.name):
roles_list.append(int(g_role.id)) #appending to list
print(roles_list) #debugging - prints roles_list
roleLen = len(roles_list)
print('length: ' + str(roleLen)) #debugging - prints length of roles_list
print('get_rid finishing')
return roles_list, len(roles_list)
#similar function to get_rid, but for retrieving user ID
def getuid(ctx, user_input):
print("get_uid starting") #debugging - prints once function is called
#cleans input and assigns to role_name
users_list = []
user_name = user_input.strip()
print("uid start " + user_name)
for g_user in ctx.guild.members:
print( "uid for " + str(g_user))
if user_name in str(g_user):
users_list.append(int(g_user.id))
print("username match")
print("appended " + str(g_user.id)) #appending to list
print("get_uid users list " + str(users_list))
else:
print("Not a match")
print("get_uid list" + str(users_list)) #debugging - prints roles_list
userLen = len(users_list)
print(userLen)
print('get_uid finishing')
return users_list, len(users_list)
#----------------------------------------- Commands below, functions above-----------------------------------------------------------#
#------------------------------------------testing/troubleshooting commands----------------------------------------------------------#
#@bot.command()
#async def hello(ctx):
# await ctx.send(f'Hello {ctx.author.display_name}.')
#test command, just echoes the argument
#@bot.command()
#async def test(ctx, content):
# await ctx.send(content)
#-----------------------------------------------administrative commands---------------------------------------------------------------#
#command to get role ID
@bot.command()
async def roleid(ctx, role_name: str):
try:
role, le = get_rid(ctx, role_name)
print(role)
print(le)
if le == 1:
roleAdd = role[0]
await ctx.send(roleAdd)
except:
emby = discord.Embed(title="", color=discord.Color.red())
emby.add_field(name="Something went wrong", value="Please check your given argument")
await ctx.send(embed=emby)
#command to add role to user
@bot.command()
async def addrole(ctx, role_name: str, user):
try:
role, le = get_rid(ctx, role_name)
print(role)
print(le)
if le == 1:
roleAdd = role[0]
print(roleAdd)
getuid(user)
await ctx.send('Adding role %s to user %s' % (role_name, user))
except:
print("except")
#command to feth user id's
@bot.command()
async def userid(ctx, user):
print('User ID command called by %s Requesting UserID for %s' % (ctx.author, user ))
try:
#calls function to get user ID by username, then prints variables for debugging/logging
userN, leU = getuid(ctx, user)
#outputs all user IDs to chat
if leU == 0:
await ctx.send("No user found with that name")
else:
for i in userN:
await ctx.send(i)
except:
emby = discord.Embed(title="", color=discord.Color.red())
emby.add_field(name="Something went wrong", value="Please check your given argument")
await ctx.send(embed=emby)
#starts the bot
bot.run(token)
| Srs2311/gene.py | gene.py | gene.py | py | 4,778 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "discord.Intents.default",
"l... |
33670502801 | import sqlite3
import sys
from PyQt6.QtWidgets import QApplication, QLabel, QWidget, QGridLayout, \
QLineEdit, QPushButton, QMainWindow, QTableWidget, QTableWidgetItem, QDialog, \
QVBoxLayout, QComboBox, QToolBar, QStatusBar, QMessageBox
from PyQt6.QtGui import QAction, QIcon
from PyQt6.QtCore import Qt
# Database connection class
class DatabaseConnection:
def __init__(self, database_file="database.db"):
self.database_file = database_file
def connect(self):
# Establish connection to database and create cursor
connection = sqlite3.connect(self.database_file)
cursor = connection.cursor()
# return connection and cursor, destructure variables in creation of instances
return connection, cursor
def close_connection(self, connection, cursor):
# Commit changes to db and close connections, refresh app table
return connection.commit(), cursor.close(), connection.close(), student_management_sys.load_data()
# App Main Window class
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Student Management System")
self.setMinimumSize(600, 400)
# Menu items
file_menu_item = self.menuBar().addMenu("&File")
utility_menu_item = self.menuBar().addMenu("&Utility")
help_menu_item = self.menuBar().addMenu("&Help")
# Add student menu item and action with toolbar icon binding to action
add_student_action = QAction(QIcon("icons/add.png"), "Add Student", self)
add_student_action.triggered.connect(self.insert)
file_menu_item.addAction(add_student_action)
# About menu item and action
about_action = QAction("About", self)
help_menu_item.addAction(about_action)
about_action.triggered.connect(self.about)
# SEARCH item and action with toolbar icon binding to action
search_action = QAction(QIcon("icons/search.png"), "Search", self)
search_action.triggered.connect(self.search)
utility_menu_item.addAction(search_action)
# Toolbar widget and elements, toolbar is also movable
toolbar = QToolBar()
toolbar.setMovable(True)
self.addToolBar(toolbar)
toolbar.addAction(add_student_action)
toolbar.addAction(search_action)
# Statusbar widget and elements
self.statusbar = QStatusBar()
self.setStatusBar(self.statusbar)
# QTableWidget attributes
self.table = QTableWidget()
self.table.setColumnCount(4)
self.table.setHorizontalHeaderLabels(("Id", "Name", "Course", "Mobile"))
# To hide default vertical numbers not associated with SQL database
self.table.verticalHeader().setVisible(False)
# Detect if cell is clicked
self.table.cellClicked.connect(self.cell_clicked)
# Set a center layout widget to QTableWidget instance
self.setCentralWidget(self.table)
# Cell clicked method
def cell_clicked(self):
# Edit button
edit_button = QPushButton("Edit Record")
edit_button.clicked.connect(self.edit)
# Delete button
delete_button = QPushButton("Delete Record")
delete_button.clicked.connect(self.delete)
# Find children of statusbar widgets and remove appending children
# Prevent duplications of widgets for every cell click
children = self.findChildren(QPushButton)
if children:
for child in children:
self.statusbar.removeWidget(child)
# Add widgets after cell is clicked
self.statusbar.addWidget(edit_button)
self.statusbar.addWidget(delete_button)
# Load SQL Database data in PyQt
def load_data(self):
# Connect SQL database
connection, cursor = DatabaseConnection().connect()
results = connection.execute("SELECT * FROM students")
# Initialize table number to 0
self.table.setRowCount(0)
# Iterate through row numbers
for row_number, row_data in enumerate(results):
# Every index insert a row cell with a row number
self.table.insertRow(row_number)
# Iterate through column numbers
for column_number, column_data in enumerate(row_data):
# Every index of a row number and column number add column data
self.table.setItem(row_number, column_number, QTableWidgetItem(str(column_data)))
# Close the database connection
connection.close()
# Insert new data method call
def insert(self):
dialog = InsertDialog()
dialog.exec()
def search(self):
search_dialog = SearchDialog()
search_dialog.exec()
def edit(self):
edit_dialog = EditDialog()
edit_dialog.exec()
def delete(self):
delete_dialog = DeleteDialog()
delete_dialog.exec()
def about(self):
about_dialog = AboutDialog()
about_dialog.exec()
# Dialog Attributes for Insert
class InsertDialog(QDialog):
def __init__(self):
super().__init__()
# Set Window Attributes
self.setWindowTitle("Insert Student Data")
self.setFixedWidth(300)
self.setFixedHeight(300)
layout = QVBoxLayout()
# Add Student Name widget
self.student_name = QLineEdit()
self.student_name.setPlaceholderText("Name")
layout.addWidget(self.student_name)
# Add Course ComboBox widget
self.course_name = QComboBox()
courses = ["Biology", "Math", "Astronomy", "Physics"]
self.course_name.addItems(courses)
layout.addWidget(self.course_name)
# Add Mobile Number widget
self.mobile_number = QLineEdit()
self.mobile_number.setPlaceholderText("Mobile Number")
layout.addWidget(self.mobile_number)
# Submit button
submit_btn = QPushButton("Register")
submit_btn.clicked.connect(self.add_student)
layout.addWidget(submit_btn)
self.setLayout(layout)
# Add Student method
def add_student(self):
# Reference to field values stored in variables
name = self.student_name.text()
course = self.course_name.itemText(self.course_name.currentIndex())
mobile = self.mobile_number.text()
# Connect to database and create cursor
connection, cursor = DatabaseConnection().connect()
# Use the cursor to destructure and INSERT reference variables into related db columns
cursor.execute("INSERT INTO students (name, course, mobile) VALUES (?, ?, ?)",
(name, course, mobile))
# Commit changes, Close connection to database and cursor
DatabaseConnection().close_connection(connection, cursor)
# Close window after entry
self.close()
# Dialog Attributes for Search
class SearchDialog(QDialog):
def __init__(self):
super().__init__()
# Set Window Attributes
self.setWindowTitle("Search Student")
self.setFixedWidth(300)
self.setFixedHeight(300)
search_layout = QVBoxLayout()
# Search Student Name widget
self.search_student_name = QLineEdit()
self.search_student_name.setPlaceholderText("Name")
search_layout.addWidget(self.search_student_name)
# Search button
search_btn = QPushButton("Search")
search_btn.clicked.connect(self.search_student)
search_layout.addWidget(search_btn)
self.setLayout(search_layout)
# Search Student method
def search_student(self):
# Reference to field values stored in variables
name = self.search_student_name.text()
# Connect to database and create cursor
connection, cursor = DatabaseConnection().connect()
# Select all fields that contained query of student name in database
result = cursor.execute("SELECT * FROM students WHERE name = ?", (name, ))
rows = list(result)
print(rows)
# Select all fields in Main window table and find match of student name
items = student_management_sys.table.findItems(name, Qt.MatchFlag.MatchFixedString)
# Highlight all names that match query and print item row to console
for item in items:
print(item)
student_management_sys.table.item(item.row(), 1).setSelected(True)
# Close cursor and connection to db
cursor.close()
connection.close()
# Close dialog after search
self.close()
# Dialog Attributes for Edit
class EditDialog(QDialog):
def __init__(self):
super().__init__()
# Set Window Attributes
self.setWindowTitle("Update Student Data")
self.setFixedWidth(300)
self.setFixedHeight(300)
layout = QVBoxLayout()
# Get table row and column of student to edit
index = student_management_sys.table.currentRow()
# Get ID from selected Row
self.student_id = student_management_sys.table.item(index, 0).text()
# Get student name
student_name = student_management_sys.table.item(index, 1).text()
# Get Course name
course_name = student_management_sys.table.item(index, 2).text()
# Get Mobile number
mobile_number = student_management_sys.table.item(index, 3).text()
# Add Student Name widget
self.student_name = QLineEdit(student_name)
self.student_name.setPlaceholderText("Name")
layout.addWidget(self.student_name)
# Add Course ComboBox widget
self.course_name = QComboBox()
courses = ["Biology", "Math", "Astronomy", "Physics"]
self.course_name.addItems(courses)
self.course_name.setCurrentText(course_name)
layout.addWidget(self.course_name)
# Add Mobile Number widget
self.mobile_number = QLineEdit(mobile_number)
self.mobile_number.setPlaceholderText("Mobile Number")
layout.addWidget(self.mobile_number)
# Submit button
submit_btn = QPushButton("Update")
submit_btn.clicked.connect(self.update_student)
layout.addWidget(submit_btn)
self.setLayout(layout)
# Update method
def update_student(self):
connection, cursor = DatabaseConnection().connect()
# Destructure table rows and UPDATE with new values from references in edit fields
cursor.execute("UPDATE students SET name = ?, course = ?, mobile = ? WHERE id = ?",
(self.student_name.text(),
self.course_name.itemText(self.course_name.currentIndex()),
self.mobile_number.text(), self.student_id))
# Commit changes, Close connection to database and cursor
DatabaseConnection().close_connection(connection, cursor)
# Close dialog after update
self.close()
# Dialog Attributes for Delete
class DeleteDialog(QDialog):
def __init__(self):
super().__init__()
# Set Window Attributes
self.setWindowTitle("Delete Student Data")
layout = QGridLayout()
confirmation = QLabel("Are you sure you want to delete?")
yes = QPushButton("Yes")
no = QPushButton("No")
layout.addWidget(confirmation, 0, 0, 1, 2)
layout.addWidget(yes, 1, 0)
layout.addWidget(no, 1, 1)
self.setLayout(layout)
yes.clicked.connect(self.delete_student)
no.clicked.connect(self.close)
# Delete Method
def delete_student(self):
# Connect to database
connection, cursor = DatabaseConnection().connect()
# Get table row and column of student to edit
index = student_management_sys.table.currentRow()
# Get ID from selected Row
student_id = student_management_sys.table.item(index, 0).text()
# Execute SQL DELETE query using student ID
cursor.execute("DELETE FROM students WHERE id = ?", (student_id, ))
# Commit changes, Close connection to database and cursor
DatabaseConnection().close_connection(connection, cursor)
# Create a message box to relay deletion was successful
confirmation_widget = QMessageBox()
confirmation_widget.setWindowTitle("Success")
confirmation_widget.setText("The Record Deleted Successfully!")
confirmation_widget.exec()
# Close delete dialog window
self.close()
# About Inheriting from 'QMessageBox' simple child version of a QDialog
class AboutDialog(QMessageBox):
def __init__(self):
super().__init__()
self.setWindowTitle("About")
# Content for about section
content = "I built this academic management app as I learned PyQt6 and it's component libraries. " \
"I used object oriented architecture to keep my code organized and scalable." \
" A SQL database was used store records and 'CRUD' methods were used to managed it's contents."
# Use set text to content
self.setText(content)
if __name__ == "__main__":
app = QApplication(sys.argv)
student_management_sys = MainWindow()
student_management_sys.show()
student_management_sys.load_data()
sys.exit(app.exec())
| KelvinBrannonJr/Student_Mangement_System | main.py | main.py | py | 13,372 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QMainWindow",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "PyQt6.QtGui.QAction",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PyQt6... |
30353175971 | import sys
import os
from os.path import splitext
import glob
from common import TestCase
def get_tests():
"""Get all the tests to run.
"""
files = glob.glob('test_*.py')
return files
def run_all(tests):
"""Run the given tests.
"""
args = ' '.join(sys.argv[1:])
success = []
fail = []
for test in tests:
cmd = 'python %s %s'%(test, args)
print(cmd)
status = os.system(cmd)
if status == 0:
print("OK")
success.append(test)
else:
print("FAIL: %s"%test)
fail.append(test)
print('-'*70)
print("%d successful tests, %d failures"%(len(success), len(fail)))
for test in fail:
print(test)
print('-'*70)
return len(fail) != 0
class RunAllTests(TestCase):
"""Runs all the tests in one go, instead of running each test
separately. This speeds up the testing.
"""
def get_tests(self):
tests = get_tests()
tests = [splitext(t)[0] for t in tests]
klasses = []
for test in tests:
# Find test.
m = __import__(test)
m.mayavi = self.script
m.application = self.application
for name in dir(m):
klass = getattr(m, name)
try:
if issubclass(klass, TestCase) and klass is not TestCase:
mod_name = '%s.%s'%(test, name)
klasses.append((mod_name, klass))
break
except TypeError:
continue
return klasses
def do(self):
klasses = self.get_tests()
for name, klass in klasses:
# Close existing scenes.
e = self.script.engine
for scene in e.scenes:
e.close_scene(scene)
print('*'*80)
print(name)
obj = klass()
obj.trait_set(script=self.script)
obj.test()
def main():
argv = ' '.join(sys.argv)
if '--one-shot' in argv:
argv = argv.replace('--one-shot', '')
sys.argv = argv.split()
t = RunAllTests()
t.main()
else:
tests = get_tests()
status = run_all(tests)
sys.exit(status)
if __name__ == "__main__":
main()
| enthought/mayavi | integrationtests/mayavi/run.py | run.py | py | 2,323 | python | en | code | 1,177 | github-code | 6 | [
{
"api_name": "glob.glob",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "common.TestCase",
"line_number"... |
29969829561 | """
MongoDB Interaction - A simple example for future developments
Fabio Bove | fabio.bove.dr@gmail.com
"""
#!/usr/bin/env python
# coding: utf-8
# Imports
from pymongo import MongoClient
class MongoUtils:
def __init__(self, auth_param: str, collection_name: str, database_name: str, data: dict) -> None:
self.mongo_client = None
self.last_op_status = None
self.database = None
self.collection = None
self.database_list = None
self.collections_list = None
self.auth_param = auth_param
self.collection_name = collection_name
self.database_name = database_name
self.data = data
def get_last_op_status(self) -> str:
"""
get_last_op_status, this method returns a string containing the status of the last operation made by this class
param: None
return: last_op_status: A string containing the status of the last operation made by this class
"""
return self.last_op_status
def connect_to_cluster(self) -> None:
"""
connect_to_cluster, this method allow to instantiate a new cluster Connection using the pymongo lib
pram: None
return: None
"""
try:
self.mongo_client = MongoClient(self.auth_param)
self.last_op_status = "Successfully connected to Mongo Cluster"
except Exception as e:
self.last_op_status = f"Something went wrong during cluster connection: \n {e}"
self.mongo_client = None
def init_dabase(self, database_name:str) -> None:
"""
init_dabase method, creates (if don't exists yet) a new database with name <database_name>
param: database_name: A string with the name of the new database
return: Nothing
"""
try: # Get the list of databases for the current cluster
self.database_list = self.mongo_client.list_database_names()
self.last_op_status = f"Got the list of active databases: \n {self.database_list}"
except Exception as e:
self.last_op_status = f"Can't get the list of databases: \n {e}"
self.database_list = None
try:
if self.database_list is not None and database_name in self.database_list:
self.last_op_status = f"Database {database_name} already exists."
self.database = self.mongo_client.get_database(database_name)
else:
self.database = self.mongo_client[database_name]
self.last_op_status = f"Database <{database_name}> created successfully."
except Exception as e:
self.last_op_status = f"Something went wrong during database creation: \n {e}"
self.database = None
def init_collection(self, collection_name:str):
"""
init_collection method, initialize a collection if doesn't exists already otherwhise returns the existing one
param: collection_name: The name of the collection
return: Nothing
"""
try:
self.collections_list = self.database.list_collection_names()
except Exception as e:
self.last_op_status = f"Can't get the list of collection: \n {e}"
self.collection = None
self.collections_list = None
try:
if self.collections_list is not None and collection_name in self.collections_list:
self.last_op_status = f"Collection already exists."
self.collection = self.database.get_collection(collection_name)
else:
self.collection = self.database[collection_name]
self.last_op_status = f"Collection <{collection_name}> created successfully."
except Exception as e:
self.last_op_status = f"Something went wrong during collection creation: \n {e}"
self.collection = None
def init_documents(self, data:dict) -> None:
"""
init_documents method, inserts the documents into our collection taken from the given data
param: data: a dict containing all the data to load in the collection
return: Nothing
"""
try:
self.collection.insert_many(data) # [self.collection.insert_one(elem) for elem in data]
self.last_op_status = f"Documents loaded successfully."
except Exception as e:
self.last_op_status = f"Something went wrong during document insertion: \n {e}"
def clean_collection(self, collection_name: str) -> None:
"""
clean_collection method, remove all the documents of a collection
param: collection_name: A string containing the name of the collection.
return: Nothing
"""
if collection_name is not None: # Load the desired collection, if collection_name is empty use the last collection connected to the class
self.init_collection(collection_name)
if self.collection is not None:
if self.collection.count_documents({}) > 0: # Remove the old documents
self.collection.delete_many({})
self.last_op_status = f"Removed old files from the collection."
def init_cluster(self):
self.connect_to_cluster()
self.init_dabase(self.database_name)
self.init_collection(self.collection_name)
self.init_documents(self.data) | fabiobove-dr/mongo-db-interaction-utils | src/MongoUtils.py | MongoUtils.py | py | 5,532 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 42,
"usage_type": "call"
}
] |
88489070 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2018-19: Homework 5
model_embeddings.py: Embeddings for the NMT model
Pencheng Yin <pcyin@cs.cmu.edu>
Sahil Chopra <schopra8@stanford.edu>
Anand Dhoot <anandd@stanford.edu>
Michael Hahn <mhahn2@stanford.edu>
"""
import torch.nn as nn
# Do not change these imports; your module names should be
# `CNN` in the file `cnn.py`
# `Highway` in the file `highway.py`
# Uncomment the following two imports once you're ready to run part 1(j)
from cnn import CNN
from highway import Highway
# End "do not change"
class ModelEmbeddings(nn.Module):
"""
Class that converts input words to their CNN-based embeddings.
"""
def __init__(self, embed_size, vocab):
"""
Init the Embedding layer for one language
@param embed_size (int): Embedding size (dimensionality) for the output
@param vocab (VocabEntry): VocabEntry object. See vocab.py for documentation.
"""
super(ModelEmbeddings, self).__init__()
## A4 code
# pad_token_idx = vocab.src['<pad>']
# self.embeddings = nn.Embedding(len(vocab.src), embed_size, padding_idx=pad_token_idx)
## End A4 code
### YOUR CODE HERE for part 1j
self.e_char = 50
self.w_word = 21 # same as max_word_length. same value (21) used in function pad_sents_char in utils.py
self.embed_size = embed_size # same as e_word
self.char_embedding = nn.Embedding(len(vocab.char2id), self.e_char, vocab.char2id['<pad>'])
self.cnn = CNN(self.e_char, self.embed_size, self.w_word)
self.highway = Highway(self.embed_size)
### END YOUR CODE
def forward(self, input):
"""
Looks up character-based CNN embeddings for the words in a batch of sentences.
@param input: Tensor of integers of shape (sentence_length, batch_size, max_word_length) where
each integer is an index into the character vocabulary
@param output: Tensor of shape (sentence_length, batch_size, embed_size), containing the
CNN-based embeddings for each word of the sentences in the batch
"""
## A4 code
# output = self.embeddings(input)
# return output
## End A4 code
### YOUR CODE HERE for part 1j
# x_padded has shape : (sentence_length, batch_size, max_word_length)
x_padded = input
# x_emb has shape : (sentence_length, batch_size, max_word_length, e_char)
x_emb = self.char_embedding(x_padded)
# x_reshape_4D has shape : (sentence_length, batch_size, e_char, max_word_length)
x_reshape_4D = x_emb.permute(0, 1, 3, 2)
sentence_length, batch_size, e_char, max_word_length = x_reshape_4D.shape
# x_reshape has shape : (-1, e_char, max_word_length)
x_reshape = x_reshape_4D.view(-1, e_char, max_word_length)
# x_conv_out has shape : (-1, e_word)
x_conv_out = self.cnn(x_reshape)
# x_word_embed has shape : (-1, e_word)
x_word_embed = self.highway(x_conv_out)
output = x_word_embed.view(sentence_length, batch_size, self.embed_size)
return output
### END YOUR CODE
| abgoswam/CS224N-Natural-Language-Processing-with-Deep-Learning | a5/model_embeddings.py | model_embeddings.py | py | 3,223 | python | en | code | 18 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
18211764194 | import argparse
import os
import unittest
from bin.get_alleles_from_srst2_mlst import get_mismatch_and_depth, get_new_and_existing_alleles, write_alleles_file, get_arguments
class TestProcessResults(unittest.TestCase):
TEST_OUTPUT_PREFIX = 'test'
TEST_FILE = 'tests/test_data/input/test__mlst__Streptococcus_agalactiae_MLST_alleles__results.txt'
TEST_OUT1 = 'tests/test_data/output/test_mlst_alleles.txt'
TEST_OUT2 = 'tests/test_data/output/test_mlst_alleles2.txt'
def test_get_mismatch_and_depth(self):
actual = get_mismatch_and_depth(self.TEST_FILE)
self.assertEqual(actual, ('adhP_1/1snp', 173.614142857, 'ST-1'))
def test_get_new_and_existing_alleles(self):
actual = get_new_and_existing_alleles(('adhP_1/1snp', 173.614142857, 'ST-1'), 30, self.TEST_OUTPUT_PREFIX)
f = open(self.TEST_OUTPUT_PREFIX + '_new_mlst_alleles.txt', "r")
actual = "".join(f.readlines())
self.assertEqual(actual, """Alleles found\nadhP_1\n""")
os.remove(self.TEST_OUTPUT_PREFIX + '_new_mlst_alleles.txt')
def test_get_new_and_existing_alleles_low_depth(self):
actual = get_new_and_existing_alleles(('adhP_1/1snp', 29.99, 'ST-1'), 30, self.TEST_OUTPUT_PREFIX)
f = open(self.TEST_OUTPUT_PREFIX + '_new_mlst_alleles.txt', "r")
actual = "".join(f.readlines())
self.assertEqual(actual, """test: No new MLST alleles were found with sufficient read depth above 30.\n""")
os.remove(self.TEST_OUTPUT_PREFIX + '_new_mlst_alleles.txt')
def test_get_new_and_existing_alleles_multi_alleles(self):
actual = get_new_and_existing_alleles(('adhP_1/1snp;pheS_1/1snp', 173.614142857, 'ST-1'), 30, self.TEST_OUTPUT_PREFIX)
f = open(self.TEST_OUTPUT_PREFIX + '_new_mlst_alleles.txt', "r")
actual = "".join(f.readlines())
self.assertEqual(actual, """Alleles found\nadhP_1\npheS_1\n""")
os.remove(self.TEST_OUTPUT_PREFIX + '_new_mlst_alleles.txt')
def test_get_new_and_existing_alleles_no_mismatches(self):
actual = get_new_and_existing_alleles(('0', 173.614142857, 'ST-1'), 30, self.TEST_OUTPUT_PREFIX)
f = open(self.TEST_OUTPUT_PREFIX + '_existing_mlst_alleles.txt', "r")
actual = "".join(f.readlines())
self.assertEqual(actual, """ID\tST\ntest\tST-1\n""")
os.remove(self.TEST_OUTPUT_PREFIX + '_existing_mlst_alleles.txt')
def test_alleles_file(self):
write_alleles_file(['Alleles found', 'adhP_1', 'pheS_1'], self.TEST_OUT1)
f = open(self.TEST_OUT1, "r")
actual = "".join(f.readlines())
self.assertEqual(actual, """Alleles found\nadhP_1\npheS_1\n""")
def test_alleles_file_without_alleles(self):
write_alleles_file(['No new MLST alleles were found.'], self.TEST_OUT2)
f = open(self.TEST_OUT2, "r")
actual = "".join(f.readlines())
self.assertEqual(actual, """No new MLST alleles were found.\n""")
def test_arguments(self):
actual = get_arguments().parse_args(
['--mlst_results_file', 'mlst_file', '--min_read_depth', '30',
'--output_prefix', 'out'])
self.assertEqual(actual,
argparse.Namespace(mlst='mlst_file', min_depth=30, output='out'))
def test_arguments_short_options(self):
actual = get_arguments().parse_args(
['-m', 'mlst_file', '-d', '30', '-o', 'out'])
self.assertEqual(actual,
argparse.Namespace(mlst='mlst_file', min_depth=30, output='out'))
| sanger-bentley-group/GBS-Typer-sanger-nf | tests/get_alleles_from_srst2_mlst_test.py | get_alleles_from_srst2_mlst_test.py | py | 3,532 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "bin.get_alleles_from_srst2_mlst.get_mismatch_and_depth",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bin.get_alleles_from_srst2_mlst.get_new_and_existing_alleles",
"... |
26040479196 | from __future__ import annotations
from dataclasses import dataclass
from typing import Iterable
from pants.backend.javascript import install_node_package
from pants.backend.javascript.install_node_package import (
InstalledNodePackageRequest,
InstalledNodePackageWithSource,
)
from pants.backend.javascript.nodejs_project_environment import NodeJsProjectEnvironmentProcess
from pants.backend.javascript.package_json import (
NodeBuildScriptEntryPointField,
NodeBuildScriptExtraEnvVarsField,
NodePackageDependenciesField,
)
from pants.core.goals.run import RunFieldSet, RunInSandboxBehavior, RunRequest
from pants.core.util_rules.environments import EnvironmentField
from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
from pants.engine.internals.selectors import Get
from pants.engine.process import Process
from pants.engine.rules import Rule, collect_rules, rule
from pants.engine.unions import UnionRule
@dataclass(frozen=True)
class RunNodeBuildScriptFieldSet(RunFieldSet):
required_fields = (NodeBuildScriptEntryPointField, NodePackageDependenciesField)
run_in_sandbox_behavior = RunInSandboxBehavior.RUN_REQUEST_HERMETIC
entry_point: NodeBuildScriptEntryPointField
extra_env_vars: NodeBuildScriptExtraEnvVarsField
environment: EnvironmentField
@rule
async def run_node_build_script(
field_set: RunNodeBuildScriptFieldSet,
) -> RunRequest:
installation = await Get(
InstalledNodePackageWithSource, InstalledNodePackageRequest(field_set.address)
)
target_env_vars = await Get(
EnvironmentVars, EnvironmentVarsRequest(field_set.extra_env_vars.value or ())
)
process = await Get(
Process,
NodeJsProjectEnvironmentProcess(
installation.project_env,
args=("--prefix", "{chroot}", "run", str(field_set.entry_point.value)),
description=f"Running {str(field_set.entry_point.value)}.",
input_digest=installation.digest,
extra_env=target_env_vars,
),
)
return RunRequest(
digest=process.input_digest,
args=process.argv,
extra_env=process.env,
immutable_input_digests=process.immutable_input_digests,
)
def rules() -> Iterable[Rule | UnionRule]:
return [*collect_rules(), *install_node_package.rules(), *RunNodeBuildScriptFieldSet.rules()]
| pantsbuild/pants | src/python/pants/backend/javascript/run/rules.py | rules.py | py | 2,379 | python | en | code | 2,896 | github-code | 6 | [
{
"api_name": "pants.core.goals.run.RunFieldSet",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "pants.backend.javascript.package_json.NodeBuildScriptEntryPointField",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pants.backend.javascript.package_json.NodeP... |
13255530705 | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from math import sqrt
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
from statsmodels.formula.api import ols
from sklearn.metrics import mean_squared_error, r2_score, explained_variance_score, mean_absolute_error
from sklearn.feature_selection import f_regression, SelectKBest, RFE
from sklearn.linear_model import LinearRegression, LassoLars, TweedieRegressor
from sklearn.preprocessing import PolynomialFeatures
from numpy import mean
from numpy import std, absolute
from sklearn.datasets import make_blobs
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
def feature_ranking(X_train_scaled, y_train):
lm = LinearRegression()
rfe = RFE(lm, 1)
rfe.fit(X_train_scaled, y_train)
ranks = rfe.ranking_
names = X_train_scaled.columns.tolist()
rankdf = pd.DataFrame({'features': names, 'rank': ranks}).set_index('rank').sort_values('rank')
return rankdf
def cvLinearReg(X_train, y_train):
# create loocv procedure
cvLR = LeaveOneOut()
# create model
modelLR = LinearRegression()
# evaluate model
scoresLR = cross_val_score(modelLR, X_train, y_train, scoring='neg_mean_absolute_error', cv=cvLR, n_jobs=-1)
# force positive
scoresLR = absolute(scoresLR)
# report performance
print('MAE: %.3f (%.3f)' % (mean(scoresLR), std(scoresLR)))
meanMAE = mean(scoresLR)
stddevMAE = std(scoresLR)
return meanMAE
def cvLassoLars(X_train, y_train, x):
# LassoLars
# create loocv procedure
cvLL = LeaveOneOut()
# create model
modelLL = LassoLars(alpha=x)
# evaluate model
scoresLL = cross_val_score(modelLL, X_train, y_train, scoring='neg_mean_absolute_error', cv=cvLL, n_jobs=-1)
# force positive
scoresLL = absolute(scoresLL)
# report performance
print('MAE: %.3f (%.3f)' % (mean(scoresLL), std(scoresLL)))
meanMAE = mean(scoresLL)
stddevMAE = std(scoresLL)
return meanMAE
def cvTweedie(X_train, y_train, pwr, alf):
# Tweedie Regressor
# create loocv procedure
cvTW = LeaveOneOut()
# create model
modelTW = TweedieRegressor(power=pwr, alpha=alf) # 0 = normal distribution
# evaluate model
scoresTW = cross_val_score(modelTW, X_train, y_train, scoring='neg_mean_absolute_error', cv=cvTW, n_jobs=-1)
# force positive
scoresTW = absolute(scoresTW)
# report performance
print('MAE: %.3f (%.3f)' % (mean(scoresTW), std(scoresTW)))
meanMAE = mean(scoresTW)
stddevMAE = std(scoresTW)
return meanMAE
def cvRandomForest(X_train, y_train, x):
# Random Forest Regressor
# create loocv procedure
cvRF = LeaveOneOut()
# create model
modelRF = RandomForestRegressor(n_estimators=x, random_state = 123)
# evaluate model
scoresRF = cross_val_score(modelRF, X_train, y_train, scoring='neg_mean_absolute_error', cv=cvRF, n_jobs=-1)
# force positive
scoresRF = absolute(scoresRF)
# report performance
print('MAE: %.3f (%.3f)' % (mean(scoresRF), std(scoresRF)))
meanMAE = mean(scoresRF)
stddevMAE = std(scoresRF)
return meanMAE
def cvSVR(X_train, y_train, x):
# Support Vector Regressor
# create loocv procedure
cvSVR = LeaveOneOut()
# create model
modelSVR = SVR(kernel = x)
# evaluate model
scoresSVR = cross_val_score(modelSVR, X_train, y_train, scoring='neg_mean_absolute_error', cv=cvSVR, n_jobs=-1)
# force positive
scoresSVR = absolute(scoresSVR)
# report performance
print('MAE: %.3f (%.3f)' % (mean(scoresSVR), std(scoresSVR)))
meanMAE = mean(scoresSVR)
stddevMAE = std(scoresSVR)
return meanMAE
def get_baseline_mean(y_train):
'''
Using mean gets baseline for y dataframe
'''
# determine Baseline to beat
rows_needed = y_train.shape[0]
# create array of predictions of same size as y_train.logerror based on the mean
y_hat = np.full(rows_needed, np.mean(y_train))
# calculate the MSE for these predictions, this is our baseline to beat
baseline = mean_absolute_error(y_train, y_hat)
print("Baseline MAE:", baseline)
return baseline, y_hat
def get_baseline_median(y_train):
'''
Using median gets baseline for y dataframe
'''
# determine Baseline to beat
rows_needed = y_train.shape[0]
# create array of predictions of same size as y_train.logerror based on the median
y_hat = np.full(rows_needed, np.median(y_train))
# calculate the MSE for these predictions, this is our baseline to beat
baseline = mean_absolute_error(y_train, y_hat)
print("Baseline MAE:", baseline)
return baseline, y_hat
def linear_reg_train(x_scaleddf, target):
'''
runs linear regression algorithm
'''
lm = LinearRegression()
lm.fit(x_scaleddf, target)
y_hat = lm.predict(x_scaleddf)
LM_MAE = mean_absolute_error(target, y_hat)
return LM_MAE
def lasso_lars(x_scaleddf, target):
'''
runs Lasso Lars algorithm
'''
# Make a model
lars = LassoLars(alpha=1)
# Fit a model
lars.fit(x_scaleddf, target)
# Make Predictions
lars_pred = lars.predict(x_scaleddf)
# Computer root mean squared error
lars_MAE = mean_absolute_error(target, lars_pred)
return lars_MAE
def polynomial2(X_trainsdf, target):
'''
runs polynomial algorithm
'''
# Make a model
pf = PolynomialFeatures(degree=2)
# Fit and Transform model to get a new set of features...which are the original features squared
X_train_squared = pf.fit_transform(X_trainsdf)
# Feed new features in to linear model.
lm_squared = LinearRegression(normalize=True)
lm_squared.fit(X_train_squared, target)
# Make predictions
lm_squared_pred = lm_squared.predict(X_train_squared)
# Compute root mean squared error
pf2_MAE = mean_absolute_error(target, lm_squared_pred)
return pf2_MAE
def tweedie05(X_train_scaled, y_train):
'''
runs tweedie algorithm
'''
# Make Model
tw = TweedieRegressor(power=0, alpha=.5) # 0 = normal distribution
# Fit Model
tw.fit(X_train_scaled, y_train)
# Make Predictions
tw_pred = tw.predict(X_train_scaled)
# Compute root mean squared error
tw_MAE = mean_absolute_error(y_train, tw_pred)
return tw_MAE
def randomforest_test(x_scaleddf, target, X_test, y_test, est):
'''
runs random forest regressor
'''
# make model
regressor = RandomForestRegressor(n_estimators = est, random_state = 123)
# fit the model
regressor.fit(x_scaleddf, target)
# make predictions
y_pred = regressor.predict(X_test)
# calculate MAE
randMAE = mean_absolute_error(y_test, y_pred)
return randMAE, regressor
def lasso_lars_test(x_scaleddf, target, X_test, y_test):
'''
runs Lasso Lars algorithm
'''
# Make a model
lars = LassoLars(alpha=1)
# Fit a model
lars.fit(x_scaleddf, target)
# Make Predictions
lars_pred = lars.predict(X_test)
# calculate MAE
lars_MAE = mean_absolute_error(y_test, lars_pred)
return lars_MAE, lars, lars_pred
def linear_test(x_scaleddf, target, X_test, y_test):
'''
runs Lasso Lars algorithm
'''
# Make a model
lm = LinearRegression()
# Fit model on train dataset
lm.fit(x_scaleddf, target)
# Make Predictions on test dataset
y_hat = lm.predict(X_test)
# calculate MAE
LM_MAE = mean_absolute_error(y_test, y_hat)
return LM_MAE, lm, y_hat
def SVR_test(x_scaleddf, target, X_test, y_test, kern):
'''
runs Support Vector Regressor algorithm
'''
# Make a model
regressor = SVR(kernel = kern)
# Fit model on train dataset
regressor.fit(x_scaleddf, target)
# Make Predictions on test dataset
y_hat = sc_y.inverse_transform(regressor.predict(sc_X.transform(X_test)))
# calculate MAE
svr_MAE = mean_absolute_error(y_test, y_hat)
return svr_MAE, regressor
def tweedie_test(X_train, y_train, X_test, y_test, pwr, alf):
'''
runs tweedie algorithm
'''
# Make Model
tw = TweedieRegressor(power=pwr, alpha=alf) # 0 = normal distribution
# Fit Model
tw.fit(X_train, y_train)
# Make Predictions
tw_pred = tw.predict(X_test)
# Compute root mean squared error
tw_MAE = mean_absolute_error(y_test, tw_pred)
return tw_MAE, tw, tw_pred
def create_visualdf(y_test, y_train, y_test_predLL, y_test_predLR, y_test_predTW, y_test_predrTW):
'''
creates dataframe for making visualizations
'''
visualdf = pd.DataFrame()
visualdf['actual'] = y_test.tract_cases_per_100k
visualdf['baseline'] = y_train.tract_cases_per_100k.mean()
visualdf['TWpred'] = y_test_predTW
visualdf['LRpred'] = y_test_predLR
visualdf['LLpred'] = y_test_predLL
visualdf['SVI_only'] = y_test_predrTW
return visualdf
def plot_actual_vs_predicted(visualdf):
'''
Produces subplots of actual VS predicted for the top models
'''
plt.figure(figsize=(16,8))
#plt.suptitle('Plotting Actual Cases per 100K vs Predicted Cases per 100K')
plt.plot(visualdf.actual, visualdf.baseline, alpha=.5, color="gray", label='_nolegend_')
#plt.annotate("Baseline: Predict Using Mean", (15, 8))
plt.plot(visualdf.actual, visualdf.actual, alpha=.5, color="blue", label='_nolegend_')
#plt.annotate("The Ideal Line: Predicted = Actual", (.5, 1), rotation=15.5)
#plt.subplot(1,3,1,)
plt.scatter(visualdf.actual, visualdf.SVI_only,
alpha=.5, color="blue", s=50, label="Model: TW SVI only")
#plt.subplot(1,3,2)
plt.scatter(visualdf.actual, visualdf.TWpred,
alpha=.5, color="green", s=100, label="Model: TW Top 4 Features")
#plt.subplot(1,3,3)
# plt.scatter(visualdf.actual, visualdf.LLpred,
# alpha=.5, color="orange", s=75, label="Model: LassoLars")
plt.legend()
plt.xlabel("Actual Cases per 100K")
plt.ylabel("Predicted Cases per 100K")
# plt.annotate("The polynomial model appears to overreact to noise", (2.0, -10))
# plt.annotate("The OLS model (LinearRegression)\n appears to be most consistent", (15.5, 3))
plt.show()
def plotting_errors(visualdf):
'''
Plots the errors of the top models with zero error line
'''
# plotting Errors in Predictions
plt.figure(figsize=(16,8))
plt.axhline(label="No Error")
plt.scatter(visualdf.actual, visualdf.SVI_only-visualdf.actual,
alpha=.5, color="blue", s=50, label="Model: TW SVI only")
plt.scatter(visualdf.actual, visualdf.TWpred-visualdf.actual,
alpha=.5, color="green", s=100, label="Model: TW Top 4 Features")
# plt.scatter(visualdf.actual, visualdf.LLpred-visualdf.actual,
# alpha=.5, color="orange", s=75, label="Model: LassoLars")
plt.legend()
plt.xlabel("Actual Cases per 100K")
plt.ylabel("Residual/Error: Predicted Cases per 100K - Actual Cases per 100K")
plt.title("Do the size of errors change as the actual value changes?")
# plt.annotate("The polynomial model appears to overreact to noise", (2.0, -10))
# plt.annotate("The OLS model (LinearRegression)\n appears to be most consistent", (15.5, 3))
plt.show()
def plotting_histograms(visualdf):
'''
Plots Histograms of top models, currently only shows actual vs best performing
'''
plt.figure(figsize=(16,8))
plt.hist(visualdf.actual, color='blue', alpha=.5, label="Actual Cases per 100K", histtype=u'step', linewidth=5)
plt.hist(visualdf.LRpred, color='orange', alpha=.5, label="Model: TW SVI only")
plt.hist(visualdf.TWpred, color='green', alpha=.5, label="Model: TW Top 4 Features")
#plt.hist(visualdf.LLpred, color='red', alpha=.5, label="Model: LassoLars")
plt.xlabel("Actual Cases per 100K")
plt.ylabel("predictions")
plt.title("SVI alone is not enough")
plt.legend()
plt.show() | RyvynYoung/COVID | svi_capstone/scripts_python/model_MAE.py | model_MAE.py | py | 12,076 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_selection.RFE",
"line_number": 27,
"usage_type": "call"
},... |
44191894536 | from django.conf.urls import url # include is adding then
from django.contrib import admin
from .views import(
post_home,
post_delete,
post_update,
post_edit,
post_create,
post_save,
)
urlpatterns = [
url(r'^$',post_home),
url(r'^delete/$',post_delete),
url(r'^update/$',post_update),
url(r'^edit/$',post_edit),
url(r'^create/$',post_create),
url(r'^save/$',post_save),
]
| hmoshabbar/DjangoProject | posts/urls.py | urls.py | py | 442 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.post_home",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "django.conf.urls.url",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.p... |
27009635088 | from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor
def run(x_train, y_train, x_test, y_test,
base_estimator, estimator_params, n_estimators, learning_rate, loss, random_state):
base_estimator = getEstimator(base_estimator, estimator_params)
reg = AdaBoostRegressor(base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
loss=loss,
random_state=random_state).fit(x_train, y_train)
return {'train_predict': reg.predict(x_train).tolist(),
'test_predict': reg.predict(x_test).tolist(),
'train_score': reg.score(x_train, y_train),
'test_score': reg.score(x_test, y_test),
'estimator_weights_': reg.estimator_weights_.tolist(),
'estimator_errors_': reg.estimator_errors_.tolist(),
'feature_importances_': reg.feature_importances_.tolist()
}
def getEstimator(base_estimator, estimator_params):
if base_estimator is None:
return base_estimator
base_estimator.replace("(", "").replace(")", "")
if estimator_params is None:
estimator_params = {}
return {
'GradientBoostingRegressor': GradientBoostingRegressor(*estimator_params),
'ExtraTreesRegressor': ExtraTreesRegressor(*estimator_params),
'RandomForestRegressor': RandomForestRegressor(*estimator_params)
}.get(base_estimator, RandomForestRegressor(max_depth=3))
| lisunshine1234/mlp-algorithm-python | machine_learning/regression/Ensemble methods/AdaBoostRegressor/run.py | run.py | py | 1,615 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.ensemble.AdaBoostRegressor",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.GradientBoostingRegressor",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.ExtraTreesRegressor",
"line_number": 32,
"u... |
20632320396 | import struct
import typing
from .base_bean import BaseBean
from .config import Config
from .ctrl_enum import EnumDevice, EnumCmdType, EnumFanDirection, EnumOutDoorRunCond, EnumFanVolume, EnumControl, \
EnumSensor, FreshAirHumidification, ThreeDFresh
from .dao import Room, AirCon, Geothermic, Ventilation, HD, Device, AirConStatus, get_device_by_aircon, Sensor, \
UNINITIALIZED_VALUE
from .param import GetRoomInfoParam, AirConRecommendedIndoorTempParam, AirConCapabilityQueryParam, \
AirConQueryStatusParam, Sensor2InfoParam
def decoder(b):
if b[0] != 2:
return None, None
length = struct.unpack('<H', b[1:3])[0]
if length == 0 or len(b) - 4 < length or struct.unpack('<B', b[length + 3:length + 4])[0] != 3:
if length == 0:
return HeartbeatResult(), None
else:
return None, None
return result_factory(struct.unpack('<BHBBBBIBIBH' + str(length - 16) + 'sB', b[:length + 4])), b[length + 4:]
def result_factory(data):
r1, length, r2, r3, subbody_ver, r4, cnt, dev_type, dev_id, need_ack, cmd_type, subbody, r5 = data
if dev_id == EnumDevice.SYSTEM.value[1]:
if cmd_type == EnumCmdType.SYS_ACK.value:
result = AckResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_CMD_RSP.value:
result = CmdRspResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_TIME_SYNC.value:
result = TimeSyncResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_ERR_CODE.value:
result = ErrCodeResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_GET_WEATHER.value:
result = GetWeatherResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_LOGIN.value:
result = LoginResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_CHANGE_PW.value:
result = ChangePWResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_GET_ROOM_INFO.value:
result = GetRoomInfoResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_QUERY_SCHEDULE_SETTING.value:
result = QueryScheduleSettingResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_QUERY_SCHEDULE_ID.value:
result = QueryScheduleIDResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_HAND_SHAKE.value:
result = HandShakeResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_CMD_TRANSFER.value:
result = CmdTransferResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_QUERY_SCHEDULE_FINISH.value:
result = QueryScheduleFinish(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_SCHEDULE_QUERY_VERSION_V3:
result = ScheduleQueryVersionV3Result(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SENSOR2_INFO:
result = Sensor2InfoResult(cnt, EnumDevice.SYSTEM)
else:
result = UnknownResult(cnt, EnumDevice.SYSTEM, cmd_type)
elif dev_id == EnumDevice.NEWAIRCON.value[1] or dev_id == EnumDevice.AIRCON.value[1] \
or dev_id == EnumDevice.BATHROOM.value[1] or dev_id == EnumDevice.SENSOR.value[1]:
device = EnumDevice((8, dev_id))
if cmd_type == EnumCmdType.STATUS_CHANGED.value:
result = AirConStatusChangedResult(cnt, device)
elif cmd_type == EnumCmdType.QUERY_STATUS.value:
result = AirConQueryStatusResult(cnt, device)
elif cmd_type == EnumCmdType.AIR_RECOMMENDED_INDOOR_TEMP.value:
result = AirConRecommendedIndoorTempResult(cnt, device)
elif cmd_type == EnumCmdType.AIR_CAPABILITY_QUERY.value:
result = AirConCapabilityQueryResult(cnt, device)
elif cmd_type == EnumCmdType.QUERY_SCENARIO_SETTING.value:
result = AirConQueryScenarioSettingResult(cnt, device)
elif cmd_type == EnumCmdType.SENSOR2_INFO.value:
result = Sensor2InfoResult(cnt, device)
else:
result = UnknownResult(cnt, device, cmd_type)
else:
"""ignore other device"""
result = UnknownResult(cnt, EnumDevice.SYSTEM, cmd_type)
result.subbody_ver = subbody_ver
result.load_bytes(subbody)
return result
class Decode:
def __init__(self, b):
self._b = b
self._pos = 0
def read1(self):
pos = self._pos
s = struct.unpack('<B', self._b[pos:pos + 1])[0]
pos += 1
self._pos = pos
return s
def read2(self):
pos = self._pos
s = struct.unpack('<H', self._b[pos:pos + 2])[0]
pos += 2
self._pos = pos
return s
def read4(self):
pos = self._pos
s = struct.unpack('<I', self._b[pos:pos + 4])[0]
pos += 4
self._pos = pos
return s
def read(self, l):
pos = self._pos
s = self._b[pos:pos + l]
pos += l
self._pos = pos
return s
def read_utf(self, l):
pos = self._pos
try:
s = self._b[pos:pos + l].decode('utf-8')
except UnicodeDecodeError:
s = None
pos += l
self._pos = pos
return s
class BaseResult(BaseBean):
def __init__(self, cmd_id: int, targe: EnumDevice, cmd_type: EnumCmdType):
BaseBean.__init__(self, cmd_id, targe, cmd_type)
def load_bytes(self, b):
"""do nothing"""
def do(self):
"""do nothing"""
class HeartbeatResult(BaseResult):
def __init__(self):
BaseResult.__init__(self, 0, EnumDevice.SYSTEM, EnumCmdType.SYS_ACK)
class AckResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_ACK)
def load_bytes(self, b):
Config.is_new_version = struct.unpack('<B', b)[0] == 2
class ScheduleQueryVersionV3Result(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_ACK)
class Sensor2InfoResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SENSOR2_INFO)
self._count = 0
self._mode = 0
self._room_id = 0
self._sensor_type = 0
self._sensors: typing.List[Sensor] = []
def load_bytes(self, b):
data = Decode(b)
self._mode = data.read1()
count = data.read1()
self._count = count
while count > 0:
self._room_id = data.read1()
d = Decode(data.read(data.read1()))
self._sensor_type = d.read1()
unit_id = d.read1()
sensor = Sensor()
sensor.mac = d.read(6).hex()
sensor.room_id = self._room_id
sensor.unit_id = unit_id
length = d.read1()
sensor.alias = d.read_utf(length)
sensor.name = sensor.alias
sensor.type1 = d.read1()
sensor.type2 = d.read1()
humidity = UNINITIALIZED_VALUE
hcho = UNINITIALIZED_VALUE
temp = UNINITIALIZED_VALUE
if (sensor.type1 & 1) == 1:
temp = d.read2()
if ((sensor.type1 >> 1) & 1) == 1:
humidity = d.read2()
pm25 = UNINITIALIZED_VALUE
if (sensor.type1 >> 2) & 1 == 1:
pm25 = d.read2()
co2 = UNINITIALIZED_VALUE
if (sensor.type1 >> 3) & 1 == 1:
co2 = d.read2()
voc = EnumSensor.Voc.STEP_UNUSE
if (sensor.type1 >> 4) & 1 == 1:
f = d.read1()
voc = EnumSensor.Voc(f)
tvoc = UNINITIALIZED_VALUE
if (sensor.type1 >> 5) & 1 == 1:
tvoc = d.read2()
if (sensor.type1 >> 6) & 1 == 1:
hcho = d.read2()
switch_on_off = d.read1() == 1
temp_upper = d.read2()
temp_lower = d.read2()
humidity_upper = d.read2()
humidity_lower = d.read2()
pm25_upper = d.read2()
pm25_lower = d.read2()
co2_upper = d.read2()
co2_lower = d.read2()
voc_lower = d.read1()
tvoc_upper = d.read2()
hcho_upper = d.read2()
connected = d.read1() == 1
sleep_mode_count = d.read1()
sleep_mode_enable = False
if sleep_mode_count > 0:
sleep_mode_enable = d.read1() == 1
sensor.sensor_type = self._sensor_type
sensor.temp = temp
sensor.humidity = humidity
sensor.pm25 = pm25
sensor.co2 = co2
sensor.voc = voc
if self._sensor_type == 3:
sensor.tvoc = tvoc
sensor.hcho = hcho
sensor.tvoc_upper = tvoc_upper
sensor.hcho_upper = hcho_upper
sensor.switch_on_off = switch_on_off
sensor.temp_upper = temp_upper
sensor.temp_lower = temp_lower
sensor.humidity_upper = humidity_upper
sensor.humidity_lower = humidity_lower
sensor.pm25_upper = pm25_upper
sensor.pm25_lower = pm25_lower
sensor.co2_upper = co2_upper
sensor.co2_lower = co2_lower
sensor.voc_lower = voc_lower
sensor.connected = connected
sensor.sleep_mode_count = sleep_mode_count
self._sensors.append(sensor)
count = count - 1
def do(self):
from .service import Service
Service.set_sensors_status(self._sensors)
@property
def count(self):
return self._count
@property
def mode(self):
return self._mode
@property
def room_id(self):
return self._room_id
@property
def sensor_type(self):
return self._sensor_type
@property
def sensors(self):
return self._sensors
class CmdRspResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_CMD_RSP)
self._cmdId = None
self._code = None
def load_bytes(self, b):
self._cmdId, self._code = struct.unpack('<IB', b)
@property
def cmd_id(self):
return self._cmdId
@property
def code(self):
return self._code
class TimeSyncResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_TIME_SYNC)
self._time = None
def load_bytes(self, b):
self._time = struct.unpack('<I', b)[0]
@property
def time(self):
return self._time
class ErrCodeResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_ERR_CODE)
self._code = None
self._device = None
self._room = None
self._unit = None
def load_bytes(self, b):
dev_id, room, unit = struct.unpack('<iBB', b[:6])
self._device = EnumDevice((8, dev_id))
self._room = room
self._unit = unit
self._code = b[6:].decode('ASCII')
@property
def code(self):
return self._code
@property
def device(self):
return self._device
@property
def room(self):
return self._room
@property
def unit(self):
return self._unit
class GetWeatherResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_GET_WEATHER)
self._condition = None
self._humidity = None
self._temp = None
self._wind_dire = None
self._wind_speed = None
def load_bytes(self, b):
self._condition, self._humidity, self._temp, self._wind_dire, self._wind_speed \
= struct.unpack('<BBHBB', b)
@property
def condition(self):
return self._condition
@property
def humidity(self):
return self._humidity
@property
def temp(self):
return self._temp
@property
def wind_dire(self):
return self._wind_dire
@property
def wind_speed(self):
return self._wind_speed
class LoginResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_LOGIN)
self._status = None
def load_bytes(self, b):
self._status = struct.unpack('<BB', b)[1]
@property
def status(self):
return self._status
class ChangePWResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_CHANGE_PW)
self._status = None
def load_bytes(self, b):
self._status = struct.unpack('<B', b)[0]
@property
def status(self):
return self._status
class GetRoomInfoResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_GET_ROOM_INFO)
self._count: int = 0
self._hds: typing.List[HD] = []
self._sensors: typing.List[Sensor] = []
self._rooms: typing.List[Room] = []
def load_bytes(self, b):
ver_flag = 1
d = Decode(b)
self._count = d.read2()
room_count = d.read1()
for i in range(room_count):
room = Room()
room.id = d.read2()
if self.subbody_ver == 1:
ver_flag = d.read1()
if ver_flag != 2:
length = d.read1()
room.name = d.read_utf(length)
length = d.read1()
room.alias = d.read_utf(length)
length = d.read1()
room.icon = d.read_utf(length)
unit_count = d.read2()
for j in range(unit_count):
device = EnumDevice((8, d.read4()))
device_count = d.read2()
for unit_id in range(device_count):
if EnumDevice.AIRCON == device or EnumDevice.NEWAIRCON == device or EnumDevice.BATHROOM == device:
dev = AirCon()
room.air_con = dev
dev.new_air_con = EnumDevice.NEWAIRCON == device
dev.bath_room = EnumDevice.BATHROOM == device
elif EnumDevice.GEOTHERMIC == device:
dev = Geothermic()
room.geothermic = dev
elif EnumDevice.HD == device:
dev = HD()
self.hds.append(dev)
room.hd_room = True
room.hd = dev
elif EnumDevice.SENSOR == device:
dev = Sensor()
self.sensors.append(dev)
room.sensor_room = True
elif EnumDevice.VENTILATION == device or EnumDevice.SMALL_VAM == device:
dev = Ventilation()
room.ventilation = dev
dev.is_small_vam = EnumDevice.SMALL_VAM == device
else:
dev = Device()
dev.room_id = room.id
dev.unit_id = unit_id
if ver_flag > 2:
length = d.read1()
dev.name = d.read_utf(length)
length = d.read1()
dev.alias = d.read_utf(length)
if dev.alias is None:
dev.alias = room.alias
self.rooms.append(room)
def do(self):
from .service import Service
Service.set_rooms(self.rooms)
Service.send_msg(AirConRecommendedIndoorTempParam())
Service.set_sensors(self.sensors)
aircons = []
new_aircons = []
bathrooms = []
for room in Service.get_rooms():
if room.air_con is not None:
room.air_con.alias = room.alias
if room.air_con.new_air_con:
new_aircons.append(room.air_con)
elif room.air_con.bath_room:
bathrooms.append(room.air_con)
else:
aircons.append(room.air_con)
p = AirConCapabilityQueryParam()
p.aircons = aircons
p.target = EnumDevice.AIRCON
Service.send_msg(p)
p = AirConCapabilityQueryParam()
p.aircons = new_aircons
p.target = EnumDevice.NEWAIRCON
Service.send_msg(p)
p = AirConCapabilityQueryParam()
p.aircons = bathrooms
p.target = EnumDevice.BATHROOM
Service.send_msg(p)
@property
def count(self):
return self._count
@property
def hds(self):
return self._hds
@property
def rooms(self):
return self._rooms
@property
def sensors(self):
return self._sensors
class QueryScheduleSettingResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_QUERY_SCHEDULE_SETTING)
def load_bytes(self, b):
"""todo"""
class QueryScheduleIDResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_QUERY_SCHEDULE_ID)
def load_bytes(self, b):
"""todo"""
class HandShakeResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_HAND_SHAKE)
self._time: str = ''
def load_bytes(self, b):
d = Decode(b)
self._time = d.read_utf(14)
def do(self):
p = GetRoomInfoParam()
p.room_ids.append(0xffff)
from .service import Service
Service.send_msg(p)
Service.send_msg(Sensor2InfoParam())
class GetGWInfoResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_HAND_SHAKE)
self._time: str = ''
def load_bytes(self, b):
"""todo"""
def do(self):
"""todo"""
class CmdTransferResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_CMD_TRANSFER)
def load_bytes(self, b):
"""todo"""
class QueryScheduleFinish(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_QUERY_SCHEDULE_FINISH)
def load_bytes(self, b):
"""todo"""
class AirConStatusChangedResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.STATUS_CHANGED)
self._room = 0 # type: int
self._unit = 0 # type: int
self._status = AirConStatus() # type: AirConStatus
def load_bytes(self, b):
d = Decode(b)
self._room = d.read1()
self._unit = d.read1()
status = self._status
flag = d.read1()
if flag & EnumControl.Type.SWITCH:
status.switch = EnumControl.Switch(d.read1())
if flag & EnumControl.Type.MODE:
status.mode = EnumControl.Mode(d.read1())
if flag & EnumControl.Type.AIR_FLOW:
status.air_flow = EnumControl.AirFlow(d.read1())
if flag & EnumControl.Type.CURRENT_TEMP:
status.current_temp = d.read2()
if flag & EnumControl.Type.SETTED_TEMP:
status.setted_temp = d.read2()
if Config.is_new_version:
if flag & EnumControl.Type.FAN_DIRECTION:
direction = d.read1()
status.fan_direction1 = EnumControl.FanDirection(direction & 0xF)
status.fan_direction2 = EnumControl.FanDirection((direction >> 4) & 0xF)
def do(self):
from .service import Service
Service.update_aircon(self.target, self._room, self._unit, status=self._status)
class AirConQueryStatusResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.QUERY_STATUS)
self.unit = 0
self.room = 0
self.current_temp = 0
self.setted_temp = 0
self.switch = EnumControl.Switch.OFF
self.air_flow = EnumControl.AirFlow.AUTO
self.breathe = EnumControl.Breathe.CLOSE
self.fan_direction1 = EnumControl.FanDirection.INVALID
self.fan_direction2 = EnumControl.FanDirection.INVALID
self.humidity = EnumControl.Humidity.CLOSE
self.mode = EnumControl.Mode.AUTO
self.hum_allow = False
self.fresh_air_allow = False
self.fresh_air_humidification = FreshAirHumidification.OFF
self.three_d_fresh = ThreeDFresh.CLOSE
def load_bytes(self, b):
d = Decode(b)
self.room = d.read1()
self.unit = d.read1()
flag = d.read1()
if flag & 1:
self.switch = EnumControl.Switch(d.read1())
if flag >> 1 & 1:
self.mode = EnumControl.Mode(d.read1())
if flag >> 2 & 1:
self.air_flow = EnumControl.AirFlow(d.read1())
if Config.is_c611:
if flag >> 3 & 1:
bt = d.read1()
self.hum_allow = bt & 8 == 8
self.fresh_air_allow = bt & 4 == 4
self.fresh_air_humidification = FreshAirHumidification(bt & 3)
if flag >> 4 & 1:
self.setted_temp = d.read2()
if Config.is_new_version:
if flag >> 5 & 1:
b = d.read1()
self.fan_direction1 = EnumControl.FanDirection(b & 0xf)
self.fan_direction2 = EnumControl.FanDirection(b >> 4 & 0xf)
if flag >> 6 & 1:
self.humidity = EnumControl.Humidity(d.read1())
if self.target == EnumDevice.BATHROOM:
if flag >> 7 & 1:
self.breathe = EnumControl.Breathe(d.read1())
elif self.target == EnumDevice.AIRCON:
if flag >> 7 & 1 == 1:
self.three_d_fresh = ThreeDFresh(d.read1())
else:
if flag >> 3 & 1:
self.current_temp = d.read2()
if flag >> 4 & 1:
self.setted_temp = d.read2()
if Config.is_new_version:
if flag >> 5 & 1:
b = d.read1()
self.fan_direction1 = EnumControl.FanDirection(b & 0xf)
self.fan_direction2 = EnumControl.FanDirection(b >> 4 & 0xf)
if self.target == EnumDevice.NEWAIRCON:
if flag >> 6 & 1:
self.humidity = EnumControl.Humidity(d.read1())
else:
if flag >> 7 & 1:
self.breathe = EnumControl.Breathe(d.read1())
def do(self):
from .service import Service
status = AirConStatus(self.current_temp, self.setted_temp, self.switch, self.air_flow, self.breathe,
self.fan_direction1, self.fan_direction2, self.humidity, self.mode)
Service.set_aircon_status(self.target, self.room, self.unit, status)
class AirConRecommendedIndoorTempResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.AIR_RECOMMENDED_INDOOR_TEMP)
self._temp: int = 0
self._outdoor_temp: int = 0
def load_bytes(self, b):
d = Decode(b)
self._temp = d.read2()
self._outdoor_temp = d.read2()
@property
def temp(self):
return self._temp
@property
def outdoor_temp(self):
return self._outdoor_temp
class AirConCapabilityQueryResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.AIR_CAPABILITY_QUERY)
self._air_cons: typing.List[AirCon] = []
def load_bytes(self, b):
d = Decode(b)
room_size = d.read1()
for i in range(room_size):
room_id = d.read1()
unit_size = d.read1()
for j in range(unit_size):
aircon = AirCon()
aircon.unit_id = d.read1()
aircon.room_id = room_id
aircon.new_air_con = self.target == EnumDevice.NEWAIRCON
aircon.bath_room = self.target == EnumDevice.BATHROOM
flag = d.read1()
aircon.fan_volume = EnumFanVolume(flag >> 5 & 0x7)
aircon.dry_mode = flag >> 4 & 1
aircon.auto_mode = flag >> 3 & 1
aircon.heat_mode = flag >> 2 & 1
aircon.cool_mode = flag >> 1 & 1
aircon.ventilation_mode = flag & 1
if Config.is_new_version:
flag = d.read1()
if flag & 1:
aircon.fan_direction1 = EnumFanDirection.STEP_5
else:
aircon.fan_direction1 = EnumFanDirection.FIX
if flag >> 1 & 1:
aircon.fan_direction2 = EnumFanDirection.STEP_5
else:
aircon.fan_direction2 = EnumFanDirection.FIX
aircon.fan_dire_auto = flag >> 2 & 1
aircon.fan_volume_auto = flag >> 3 & 1
aircon.temp_set = flag >> 4 & 1
aircon.hum_fresh_air_allow = (flag >> 5 & 1) & (flag >> 6 & 1)
aircon.three_d_fresh_allow = flag >> 7 & 1
flag = d.read1()
aircon.out_door_run_cond = EnumOutDoorRunCond(flag >> 6 & 3)
aircon.more_dry_mode = flag >> 4 & 1
aircon.pre_heat_mode = flag >> 3 & 1
aircon.auto_dry_mode = flag >> 2 & 1
aircon.relax_mode = flag >> 1 & 1
aircon.sleep_mode = flag & 1
else:
d.read1()
self._air_cons.append(aircon)
def do(self):
from .service import Service
if Service.is_ready():
if len(self._air_cons):
for i in self._air_cons:
Service.update_aircon(get_device_by_aircon(i), i.room_id, i.unit_id, aircon=i)
else:
for i in self._air_cons:
p = AirConQueryStatusParam()
p.target = self.target
p.device = i
from .service import Service
Service.send_msg(p)
Service.set_device(self.target, self._air_cons)
@property
def aircons(self):
return self._air_cons
class AirConQueryScenarioSettingResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.QUERY_SCENARIO_SETTING)
def load_bytes(self, b):
"""todo"""
class UnknownResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice, cmd_type: EnumCmdType):
BaseResult.__init__(self, cmd_id, target, cmd_type)
self._subbody = ''
def load_bytes(self, b):
self._subbody = struct.pack('<' + str(len(b)) + 's', b).hex()
@property
def subbody(self):
return self._subbody
| mypal/ha-dsair | custom_components/ds_air/ds_air_service/decoder.py | decoder.py | py | 27,763 | python | en | code | 65 | github-code | 6 | [
{
"api_name": "struct.unpack",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "ctrl_enum.EnumDevice.SYSTEM... |
36321645754 | import pygame
from dino_runner.components.dinosaur import Dinosaur
from dino_runner.components.obstacles.obstacle_manager import ObstacleManager
from dino_runner.components.player_hearts.player_heart_manager import PlayerHeartManager
from dino_runner.components.power_ups.power_up_manager import PowerUpManager
from dino_runner.utils.constants import BG, DEFAULT_TYPE, DIE_IMG, HAMMER_TYPE, ICON, RUNNING, SCREEN_HEIGHT, SCREEN_WIDTH, SHIELD_TYPE, TITLE, FPS
from .score import Score
from dino_runner.utils.constants import FONT_STYLE
class Game:
def __init__(self):
pygame.init()
pygame.display.set_caption(TITLE)
pygame.display.set_icon(ICON)
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
self.clock = pygame.time.Clock()
self.playing = False
self.executing = False
self.game_speed = 15
self.x_pos_bg = 0
self.y_pos_bg = 380
self.player= Dinosaur()
self.obstacle_manager = ObstacleManager()
self.power_up_manager = PowerUpManager()
self.heart_manager = PlayerHeartManager()
self.death_count=0
self.score = Score()
def execute(self):
self.executing = True
while self.executing:
if not self.playing:
self.show_menu()
pygame.quit()
def run(self):
self.game_speed = 15
self.playing = True
self.obstacle_manager.reset_obstacles()
self.score.reset_score()
self.power_up_manager.reset_power_ups()
self.heart_manager.reset_hearts()
while self.playing:
self.events()
self.update()
self.draw()
def events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.playing = False
def update(self):
user_input = pygame.key.get_pressed()
self.player.update(user_input)
self.obstacle_manager.update(self.game_speed, self.player, self.on_death)
self.score.update(self)
self.power_up_manager.update(self.game_speed, self.player, self.score.score)
def draw(self):
self.clock.tick(FPS)
self.screen.fill((255, 255, 255))
self.draw_background()
self.player.draw(self.screen)
self.obstacle_manager.draw(self.screen)
self.score.draw(self.screen)
self.power_up_manager.draw(self.screen)
self.draw_power_up_active(self.screen)
self.heart_manager.draw(self.screen)
pygame.display.update()
pygame.display.flip()
def draw_background(self):
image_width = BG.get_width()
self.screen.blit(BG, (self.x_pos_bg, self.y_pos_bg))
self.screen.blit(BG, (image_width + self.x_pos_bg, self.y_pos_bg))
if self.x_pos_bg <= -image_width:
self.screen.blit(BG, (image_width + self.x_pos_bg, self.y_pos_bg))
self.x_pos_bg = 0
self.x_pos_bg -= self.game_speed
def show_menu(self):
self.screen.fill((127,255,212))#pintar mi ventana
half_screen_height = SCREEN_HEIGHT //2
half_screen_width = SCREEN_WIDTH //2
font = pygame.font.SysFont(FONT_STYLE, 30)
if self.death_count == 0:#mostrar mensaje bienvenida
self.screen.blit(RUNNING[0],(half_screen_width -35, half_screen_height -140))#mostrar icono
text_component = font.render("Press any key to start", True, (20,51,51))
else:
self.screen.blit(DIE_IMG,(half_screen_width -35, half_screen_height -140))
text_component = font.render(f"Number of deaths : {self.death_count}", True, (20,51,51))#mostrar el numero de muertes actuales
self.screen.blit(text_component, (half_screen_width -300 , half_screen_height +30))
text_component = font.render(f"You die, press any key to restart", True, (20,51,51)) #mostrar mensaje de volver a jugar
self.score.show_score(self.screen) # mostrar el puntaje
text_rect = text_component.get_rect()
text_rect.center = (half_screen_width, half_screen_height)
self.screen.blit(text_component, text_rect)
pygame.display.update()#actualizar ventana
self.handle_key_events_on_menu() #escuchar eventos
def handle_key_events_on_menu(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.executing = False
elif event.type == pygame.KEYDOWN:
self.run()
def on_death(self):
has_shield = self.player.type == SHIELD_TYPE
is_invencible = has_shield or self.heart_manager.heart_count > 0
has_hammer = self.player.type == HAMMER_TYPE
is_revitalizing = has_hammer or self.heart_manager.heart_count > 0
if has_hammer:
if self.heart_manager.heart_count < 6 :
self.heart_manager.increase_heart()
if not has_shield and not has_hammer:
self.heart_manager.reduce_heart()
if not is_invencible and not is_revitalizing:
pygame.time.delay(500)
self.playing = False
self.death_count += 1
return is_invencible and is_revitalizing
def draw_power_up_active(self, screen):
if self.player.has_power_up:
time_to_show = round ((self.player.power_up_time_up - pygame.time.get_ticks()) / 1000, 2)
if time_to_show >= 0:
font = pygame.font.SysFont(FONT_STYLE, 18)
text_component = font.render(f"{self.player.type.capitalize()} enabled for {time_to_show} seconds", True, (0,0,0))
text_rect = text_component.get_rect()
text_rect.center = (500, 40)
screen.blit(text_component, text_rect)
else:
self.player.has_power_up = False
self.player.type = DEFAULT_TYPE
| Shrinmi/JS-Dino-Runner-Grupo-1 | dino_runner/components/game.py | game.py | py | 5,940 | python | en | code | null | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "dino_runner.utils.constants.TITLE",
"line_number": 14,
"usage_type": "argument"
},
{
"api_... |
15720162505 | from collections import deque
def solution(msg):
answer = []
ord_index = 64
cach = dict()
queue = deque()
cach_index = 27
for string in msg:
if len(queue) == 0:
queue.append(string)
elif len(queue) != 0:
queue.append(string)
queue_string = ''.join(queue)
if queue_string not in cach.keys():
cach[queue_string] = cach_index
cach_index += 1
tmp = queue.popleft()
if len(tmp) == 1:
answer.append(ord(tmp) - ord_index)
else:
answer.append(cach[tmp])
else:
queue.clear()
queue.append(queue_string)
tmp = queue.popleft()
if len(tmp) == 1:
answer.append(ord(tmp) - ord_index)
else:
answer.append(cach[tmp])
return answer
print(solution("KAKAO"))
print(solution("TOBEORNOTTOBEORTOBEORNOT"))
print(solution("ABABABABABABABAB"))
| grohong/Beajoon_Algorism | 프로그래머즈/[3차]압축/main.py | main.py | py | 1,011 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 8,
"usage_type": "call"
}
] |
32144143111 | from datetime import date
from twilio.rest import TwilioRestClient
# To find these visit https://www.twilio.com/user/account
ACCOUNT_SID = "AC937af250fc201a2c44aad667cf309fa4"
AUTH_TOKEN = "6a8accce5860c8f18391bf4ec809d84b"
client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN)
for message in client.messages.list():
message.body
def filtering(to_txt):
messages = client.messages.list(
to=to_txt,
date_sent=date(2015,1,8),
)
for message in messages:
return message.body
#filtering(mbl_no)
| suparna-ghanvatkar/EBMP | show_text.py | show_text.py | py | 515 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "twilio.rest.TwilioRestClient",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 16,
"usage_type": "call"
}
] |
35848057912 | import matplotlib.pyplot as plt
def visualize(n, x, y, file_name):
plt.scatter(x, y)
plt.xlabel('x coordinate')
plt.ylabel('y coordinate')
plt.title(file_name + ': Number of points: %d' % n)
plt.show()
def read_from_file(file_name):
coord_x = []
coord_y = []
with open(file_name, 'r') as file:
n = int(file.readline())
for i in range(n):
line = [float(i) for i in file.readline().split()]
coord_x.append(line[0])
coord_y.append(line[1])
visualize(n, coord_x, coord_y, file_name)
if __name__ == "__main__":
files = ['001.dat', '002.dat', '003.dat', '004.dat', '005.dat']
for file_name in files:
read_from_file('data_1tsk/' + file_name)
| klauchek/Python_3sem | matplotlib_lab/task1.py | task1.py | py | 744 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "mat... |
40687860073 | import wsgiserver
from flask import Flask, jsonify
def check_quota_response(**kwargs):
response = kwargs['response']
return jsonify({
'version': 1,
'payload': {
'emptyWallet': not response,
},
})
def setup_flask_server(json_response):
app = Flask(__name__)
app.add_url_rule(
'/', 'index', check_quota_response,
defaults={'response': json_response},
)
app.add_url_rule(
'/<path:dummy>', 'index', check_quota_response,
defaults={'response': json_response},
)
return app
def run_flask(ip, port, response, exit_callback):
app = setup_flask_server(response)
server = wsgiserver.WSGIServer(app, host=ip, port=port)
try:
server.start()
finally:
# When the flask server finishes running, do any other cleanup
exit_callback()
| magma/magma | lte/gateway/python/magma/pipelined/check_quota_server.py | check_quota_server.py | py | 868 | python | en | code | 1,605 | github-code | 6 | [
{
"api_name": "flask.jsonify",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "wsgiserver.WSGIServer",
"line_number": 31,
"usage_type": "call"
}
] |
14037338740 | import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.family']='serif'
plt.rcParams['font.serif']=['Times New Roman'] + plt.rcParams['font.serif']
plt.rcParams['mathtext.fontset']='stix'
plt.rcParams['font.size']=10
cm = 1/2.54
Bubble0 = np.genfromtxt("Bubble_0/KellerMiksis_R5.000e-06_fa1.570e+04_pa-1.200e+05.txt", delimiter=" ")
Bubble1 = np.genfromtxt("Bubble_1/KellerMiksis_R1.000e-05_fa1.570e+04_pa-1.200e+05.txt", delimiter=" ")
fig1 = plt.figure(figsize=(17*cm,5*cm))
ax1 = plt.subplot2grid((1,3),(0,0),colspan=1)
ax2 = plt.subplot2grid((1,3),(0,1),colspan=1)
ax3 = plt.subplot2grid((1,3),(0,2),colspan=1)
plt.subplots_adjust(wspace=1.2*cm,hspace=1.2*cm)
ax1.set(xlabel=r'$t$ [$\mu$s]',ylabel=r'$R(t)$ [$\mu$m]')
ax1.set_xlim(xmin=600,xmax=750)
ax1.set_ylim(ymin=0,ymax=60)
ax1.grid(color='gainsboro', linestyle='-', linewidth=0.5)
ax1.plot(Bubble0[:, 1]*1e6, Bubble0[:, 3]*1e6, linestyle='solid', linewidth=1,color='steelblue', label=r'$R_0 = 5 \ \mu \mathrm{m}$')
ax1.plot(Bubble1[:, 1]*1e6, Bubble1[:, 3]*1e6, linestyle='solid', linewidth=1,color='goldenrod', label=r'$R_0 = 10 \ \mu \mathrm{m}$')
ax1.legend(ncol=1,labelspacing=0.2,markerfirst=True,loc='upper right',fontsize='x-small',facecolor='None',edgecolor='None',framealpha=1,frameon=True,bbox_to_anchor=(1, 1))
ax2.set(xlabel=r'$t$ [$\mu$s]',ylabel=r'$\dot{R}(t)$[m/s]')
ax2.set_xlim(xmin=600,xmax=750)
ax2.set_ylim(ymin=-400,ymax=300)
ax2.grid(color='gainsboro', linestyle='-', linewidth=0.5)
ax2.plot(Bubble0[:, 1]*1e6, Bubble0[:, 4], linestyle='solid', linewidth=1,color='steelblue')
ax2.plot(Bubble1[:, 1]*1e6, Bubble1[:, 4], linestyle='solid', linewidth=1,color='goldenrod')
ax3.set_yscale('log')
ax3.set(xlabel=r'$t$ [$\mu$s]',ylabel=r'$p_\mathrm{G}(t)$ [Pa]')
ax3.set_xlim(xmin=600,xmax=750)
ax3.set_ylim(ymin=1e1,ymax=1e10)
ax3.grid(color='gainsboro', linestyle='-', linewidth=0.5)
ax3.plot(Bubble0[:, 1]*1e6, Bubble0[:, 5], linestyle='solid', linewidth=1.0,color='steelblue')
ax3.plot(Bubble1[:, 1]*1e6, Bubble1[:, 5], linestyle='solid', linewidth=1.0,color='goldenrod')
ax1.xaxis.set_label_coords(0.5,-0.24)
ax2.xaxis.set_label_coords(0.5,-0.24)
ax3.xaxis.set_label_coords(0.5,-0.24)
ax1.yaxis.set_label_coords(-0.25, 0.5)
ax2.yaxis.set_label_coords(-0.25, 0.5)
ax3.yaxis.set_label_coords(-0.25, 0.5)
fig1.savefig('binaryinteraction.pdf', bbox_inches='tight',pad_inches=0.035)
| polycfd/apecss | examples/binaryinteraction/plot_result.py | plot_result.py | py | 2,384 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 5,
"usage_type": "attribute"
},
{
"ap... |
32839631812 | import torch
from diffusers import DiffusionPipeline
def torch_device():
if torch.cuda.is_available():
return "cuda"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
return "mps"
return "cpu"
def pipeline(model="stabilityai/stable-diffusion-xl-base-0.9", device=torch_device()):
torch_dtype = torch.float16
variant = "fp16"
# MacOS can only use fp32
if device == "mps":
torch_dtype = torch.float32
variant = "fp32"
pipe = DiffusionPipeline.from_pretrained(
model,
torch_dtype=torch_dtype,
use_safetensors=True,
variant=variant,
)
if device == "cpu":
pipe.enable_model_cpu_offload()
else:
pipe.to(device)
# 20-30% inference speed up for torch >= 2.0
pipe.unit = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
return pipe
| 2132660698/sdxl-demos | utils.py | utils.py | py | 906 | python | en | code | null | github-code | 6 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.backends.mps.is_available",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "... |
19271968449 |
import MDAnalysis
import satoshi_pca as SAS
path = "/lustre7/home/lustre3/satoshi/MED"
TRR = ["/aff4/test_all.trr", "/eaf1/test_all.trr", "/taf7/test_all.trr",
"/aff4_kai/run_all.trr", "/eaf1_kai/run_all.trr",
"/taf7_kai/run_all.trr"]
PDB = ["/aff4/HEN.pdb", "/eaf1/HEN.pdb", "/taf7/HEN.pdb",
"/aff4_kai/aff4kai.pdb", "/eaf1_kai/eaf1kai.pdb",
"/taf7_kai/taf7kai.pdb"]
PROB = ["/aff4/prob.txt", "/eaf1/prob.txt", "/taf7/prob.txt",
"/aff4_kai/prob.dat", "/eaf1_kai/prob.dat", "/taf7_kai/prob.dat"]
def PDB_cal(num1):
num_pdb = []
RESIDUE = ["N", "C"]
for i in open(path + PDB[num1]):
f = i.split()
if f[2] in RESIDUE:
num_pdb.append(int(f[1]))
print(len(num_pdb))
return num_pdb
def PROB_cal(num1):
num_prob = []
num2 = 0
for i in open(path + PROB[num1], "r"):
if float(i) != 0:
num_prob.append(num2)
num2 += 1
return num_prob
def TRR_cal():
kai_zahyou = []
for trr in range(6):
num_pdb = PDB_cal(trr)
num_prob = PROB_cal(trr)
u = MDAnalysis.Universe(path + TRR[trr])
frm = u.trajectory
frm_itr = iter(frm)
del frm, u
print(len(num_prob))
"""
for i in num_prob:
kai = []
x = float(frm[i][0][0])
y = float(frm[i][0][1])
z = float(frm[i][0][2])
for j in num_pdb:
kai.append(str(float(frm[i][j][0]) - x))
kai.append(str(float(frm[i][j][1]) - y))
kai.append(str(float(frm[i][j][2]) - z))
kai_zahyou.append(kai)
print("kai", len(kai), " kai_zahyou", len(kai_zahyou), "/",
len(num_prob), " num_pdb", len(num_pdb))
num2 = 0
while True:
try:
kai = []
FRM = next(frm_itr)
if num2 in num_prob:
x = float(FRM[0][0])
y = float(FRM[0][1])
z = float(FRM[0][2])
for j in num_pdb:
kai.append(str(float(FRM[j][0]) - x))
kai.append(str(float(FRM[j][1]) - y))
kai.append(str(float(FRM[j][2]) - z))
kai_zahyou.append(kai)
print("kai", len(kai), " kai_zahyou", len(kai_zahyou), "/",
len(num_prob), " num_pdb", len(num_pdb))
del x, y, z, FRM
except StopIteration:
break
num2 += 1
"""
for i in safe_mem(frm_itr, num_prob, num_pdb):
kai_zahyou.append(i)
print("kai_zahyou", len(kai_zahyou), "/",
len(num_prob), " num_pdb", len(num_pdb))
del frm_itr, num_prob, num_pdb
return kai_zahyou
def safe_mem(frm_itr, num_prob, num_pdb):
num2 = 0
while True:
try:
kai = []
FRM = next(frm_itr)
if num2 in num_prob:
x = float(FRM[0][0])
y = float(FRM[0][1])
z = float(FRM[0][2])
for j in num_pdb:
kai.append(str(float(FRM[j][0]) - x))
kai.append(str(float(FRM[j][1]) - y))
kai.append(str(float(FRM[j][2]) - z))
yield kai
# del x, y, z, FRM, kai
except StopIteration:
del kai
break
num2 += 1
def PPP():
kai = [0]
num1 = 0
for i in range(6):
for i in open(path + PROB[i], "r"):
if float(i) != 0:
num1 += 1
kai.append(num1)
return kai
if __name__ == '__main__':
kai = SAS.pca(TRR_cal())
kai = kai.tolist()
path1 = "/lustre7/home/lustre3/satoshi/ALL_PCA/txt/"
ttt = "_20201207_2.txt"
ligands = ["aff", "eaf", "taf", "affkai", "eafkai", "tafkai"]
num = PPP()
for i in range(6):
f = open(path1+ligands[i]+ttt, "w")
for j in range(int(num[i]), int(num[i+1])):
f.write(str(kai[j][0]))
f.write(" ")
f.write(str(kai[j][1]))
f.write("\n")
f.close()
| satoshi-python/Desktop | all_pca_kai.py | all_pca_kai.py | py | 4,342 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "MDAnalysis.Universe",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "satoshi_pca.pca",
"line_number": 122,
"usage_type": "call"
}
] |
34321388716 | from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators import (StageToRedshiftOperator,
LoadFactOperator,
DataQualityOperator)
from airflow.operators.subdag_operator import SubDagOperator
from subdags.subdag_for_dimensions import load_dimension_subdag
from helpers import SqlQueries
from quality_checks.sql_queries import QualityChecks
AWS_KEY = os.environ.get('AWS_KEY')
AWS_SECRET = os.environ.get('AWS_SECRET')
# set default args
default_args = {
'owner': 'udacity',
'start_date': datetime(2018, 1, 1),
'end_date': datetime(2018, 12, 1),
'email_on_retry': False,
'retries': 3,
'catchup': False,
'retry_delay': timedelta(minutes=5),
'depends_on_past': False,
'wait_for_downstream': True
}
# dag is complete
dag = DAG('udac_example_dag',
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
schedule_interval='@hourly'
)
# dummy for node 0
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
# stage events
stage_events_to_redshift = StageToRedshiftOperator(
task_id='Stage_events',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
table="staging_events",
s3_bucket="udacity-dend",
s3_key="log_data",
sql_stmt=SqlQueries.log_copy_command,
provide_context=True,
json_format="s3://udacity-dend/log_json_path.json"
)
# stage songs
stage_songs_to_redshift = StageToRedshiftOperator(
task_id='Stage_songs',
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_default",
table="staging_songs",
s3_bucket="udacity-dend",
s3_key="song_data",
sql_stmt=SqlQueries.song_copy_command,
json_format="auto"
)
# load dimensions
load_dimension_subdag_task = SubDagOperator(
subdag=load_dimension_subdag(
parent_dag_name="udac_example_dag",
task_id="load_dimensions",
redshift_conn_id="redshift",
start_date=datetime(2018, 1, 1)
),
task_id="load_dimensions",
dag=dag,
)
# load fact
load_songplays_table = LoadFactOperator(
task_id='Load_songplays_fact_table',
dag=dag,
redshift_conn_id="redshift",
table="songplays",
sql_stmt=SqlQueries.songplay_table_insert
)
# run quality check
run_quality_checks = DataQualityOperator(
task_id='Run_data_quality_checks',
dag=dag,
redshift_conn_id="redshift",
sql_stmt=QualityChecks.count_check,
tables=['songs', 'time', 'users', 'artists', 'songplays'],
)
# dummy for node end
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
"""
An Overview of the implemented dag:
--> stage_events -->
// \\
start --> --> load_facts --> load_dimensions --> quality_check --> end
\\ //
--> stage_songs -->
"""
# sequence of airflow operations
start_operator >> stage_events_to_redshift
start_operator >> stage_songs_to_redshift
stage_events_to_redshift >> load_songplays_table
stage_songs_to_redshift >> load_songplays_table
load_songplays_table >> load_dimension_subdag_task
load_dimension_subdag_task >> run_quality_checks
run_quality_checks >> end_operator
| supratim94336/SparkifyDataPipelineWithAirflow | airflow/dags/udacity_dag.py | udacity_dag.py | py | 3,401 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_... |
39227052944 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import os
import numpy as np
import scipy.io.wavfile as wav
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.constraints import maxnorm
from sklearn.cross_validation import StratifiedKFold
from python_speech_features import mfcc
from python_speech_features import delta
from prepare_data import serialize_data
from prepare_data import load_data_from_npy
npy_path = './Audio Data/npy data'
train_wav_npy_filename = 'train_wav.npy'
train_tg_npy_filename = 'train_label.npy'
test_wav_npy_filename = 'test_wav.npy'
test_tg_npy_filename = 'test_label.npy'
val_wav_npy_filename = 'val_wav.npy'
val_tg_npy_filename = 'val_label.npy'
x_train = load_data_from_npy(os.path.join(npy_path,train_wav_npy_filename))
y_train = load_data_from_npy(os.path.join(npy_path,train_tg_npy_filename))
x_test = load_data_from_npy(os.path.join(npy_path,test_wav_npy_filename))
y_test = load_data_from_npy(os.path.join(npy_path,test_tg_npy_filename))
x_val = load_data_from_npy(os.path.join(npy_path,val_wav_npy_filename))
y_val = load_data_from_npy(os.path.join(npy_path,val_tg_npy_filename))
model = Sequential()
model.add(Dropout(0.2, input_shape=(39*41,)))
model.add(Dense(39*41,init='normal',activation='relu',W_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(128,activation='relu',W_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(128,activation='relu',W_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(5,activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # Fit the model
history = model.fit(x_train, y_train, epochs=10, batch_size=512,validation_data=(x_val,y_val))
with open('./log_history.txt') as log:
log.write(str(history.history))
model.save('./Audio Data/model/model-test1.h5')
#plot train and validation loss
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'bo', label = 'Training loss')
plt.plot(epochs, val_loss, 'b', label = 'Validation loss')
plt.title('Traing and validtion loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig('./Traing and validtion loss.png')
plt.clf()
acc = history.history['acc']
val_acc = history.history['val_acc']
plt.plot(epochs, acc, 'bo', label='Traing acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Traing and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig('Traing and validation accuracy')
results = model.evaluate(x_test, y_test)
print(results)
| MakerFace/voice-activation-system | mfcc-model.py | mfcc-model.py | py | 2,823 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "prepare_data.load_data_from_npy",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "prepare_dat... |
9000442932 | # 测试
# 开发时间:2022/8/5 9:26
from thop import profile
# import torchvision.models as models
# import torch
from ptflops import get_model_complexity_info
from models.basicblock import DRB, PALayer, CALayer, CCALayer, SRB
from models.SwinT import SwinT
# from .FCVit import fcvit_block
# from .FCA import MultiSpectralAttentionLayer
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.transforms import functional as trans_fn
from torchvision.transforms import InterpolationMode
from models.fusion import iAFF ,AFF, MS_CAM
# 双三次上采样
# img = trans_fn.resize(img, size, InterpolationMode.BICUBIC)
def channel_shuffle(x, groups=4):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups# reshape
x = x.view(batchsize, groups,channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
def conv_layer(in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
padding = int((kernel_size - 1) / 2) * dilation
return nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias, dilation=dilation,
groups=groups)
class Upsample(nn.Sequential):
"""Upsample module.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat):
m = []
if (scale & (scale - 1)) == 0: # scale = 2^n
for _ in range(int(math.log(scale, 2))):
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(2))
elif scale == 3:
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(3))
else:
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
super(Upsample, self).__init__(*m)
class UpsampleOneStep(nn.Sequential):
"""UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
Used in lightweight SR to save parameters.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
self.num_feat = num_feat
self.input_resolution = input_resolution
m = []
m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
m.append(nn.PixelShuffle(scale))
super(UpsampleOneStep, self).__init__(*m)
def flops(self):
H, W = self.input_resolution
flops = H * W * self.num_feat * 3 * 9
return flops
class myModel(nn.Module):
def __init__(self, img_size=64, num_heads=8, upscale=4, window_size=8, num_in_ch=3, nf=64, embed_dim=64,
depth=4, upsampler='pixelshuffledirect', img_range=1.):
super(myModel, self).__init__()
num_feat = 64
num_out_ch = 3
self.upsampler = upsampler
self.window_size = window_size
self.img_range = img_range
self.upscale = upscale
if num_in_ch == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = torch.zeros(1, 1, 1, 1)
#####################################################################################################
################################### 1, shallow feature extraction ###################################
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
#####################################################################################################
################################### 2, deep feature extraction ######################################
self.num_layers = depth
self.layers = nn.ModuleList() # 存放HRBCT模块
for i_layer in range(self.num_layers):
layer = HRBCT(embed_dim, nf, num_heads)
self.layers.append(layer)
#####################################################################################################
################################### 2.2, 深度特征融合模块 ######################################
self.conv1 = nn.Conv2d(depth*embed_dim, embed_dim, kernel_size=1) # depth*embed_dim
self.conv3 = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1, bias=True)
self.PA = PALayer(embed_dim) #
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == 'pixelshuffle':
# for classical SR
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU(inplace=True))
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
elif self.upsampler == 'pixelshuffledirect':
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
(img_size, img_size))
def check_image_size(self, x):
_, _, h, w = x.size()
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
return x
def forward_shallow_features(self, x):
x1 = self.RRDB(x)
x1 = self.firstUp(x1)
x1 = self.conv_end1(x1)
return x1
def forward_features(self, x):
retainV = []
for layer in self.layers:
x = layer(x)
retainV.append(x)
x1 = torch.cat((retainV[0], retainV[1], retainV[2], retainV[3]), 1).contiguous()
return x1
def DFF(self, x):
x1 = self.conv1(x)
x1 = self.conv3(x1)
x1 = self.PA(x1)
return x1
def forward(self, x):
H, W = x.shape[2:]
self.mean = self.mean.type_as(x)
x = (x - self.mean) * self.img_range
if self.upsampler == 'pixelshuffle':
x = self.conv_first(x)
x = self.conv_before_upsample(x)
x = self.conv_last(self.upsample(x))
elif self.upsampler == 'pixelshuffledirect':
x = self.conv_first(x) # 经过浅层特征提取
x = self.DFF(self.forward_features(x)) + x # 经过深层特征提取和特征融合
x = self.upsample(x) # 图像上采样重建
x = x / self.img_range + self.mean
return x[:, :, :H * self.upscale, :W * self.upscale]
class HRBCT(nn.Module):
def __init__(self, embed_dim=64, nf=64, num_heads=8,distillation_rate=0.50):
super(HRBCT, self).__init__()
# 知识蒸馏
self.distilled_channels = int(embed_dim * distillation_rate)
self.remaining_channels = int(embed_dim - self.distilled_channels)
self.distillation_rate = distillation_rate
self.Conv3_D1 = nn.Conv2d(self.distilled_channels, self.distilled_channels, 3, 1, 1)
self.Conv3_D2 = nn.Conv2d(int(self.remaining_channels * self.distillation_rate), int(self.remaining_channels * self.distillation_rate), 3, 1, 1)
self.ST = SwinT(embed_dim=self.remaining_channels, heads=num_heads)
self.SRB = SRB(int(nf*(1-distillation_rate)**2))
# self.BSRB = BSConvURB( int(nf*(1-distillation_rate)**2), int(nf*(1-distillation_rate)**2), kernel_size=3)
# DRB
# self.DRB = DRB(int(nf*(1-distillation_rate)**2))
# ESA
# self.ESA = ESA(n_feats=nf, conv=nn.Conv2d) # 输出通道 输入通道
self.CCA = CCALayer(nf)
def forward(self, x):
distilled_c1, remaining_c1 = torch.split(x, (self.distilled_channels, self.remaining_channels), dim=1)
distilled_c1 = self.Conv3_D1(distilled_c1)
out1 = self.ST(remaining_c1)
distilled_c2, remaining_c2 = torch.split(out1, (int(self.remaining_channels*self.distillation_rate), int(self.remaining_channels*(1-self.distillation_rate))), dim=1)
distilled_c2 = self.conv1_D2(distilled_c2)
# distilled_c2 = self.Conv3_D2(distilled_c2)
#
out2 = self.SRB(remaining_c2)
out = torch.cat([distilled_c1, distilled_c2, out2], dim=1)
x1 = self.CCA(out) #
x_4 = x + x1
return x_4
if __name__ == '__main__':
x = torch.randn((1, 3, 64, 64))
model = myModel()
y = model(x)
print(y.shape)
device = torch.device('cuda:0')
input = x.to(device)
model.eval()
model = model.to(device)
macs, params = get_model_complexity_info(model, (3, 64, 64), as_strings=True,
print_per_layer_stat=True, verbose=True)
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
| sad192/LISN-Infrared-Image-SR | models/network_hybrid.py | network_hybrid.py | py | 9,368 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.transpose",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"l... |
8804328521 | import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from datetime import datetime
from BaseTest import BaseTest
from Helper import Helper
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from Utilities.config import Utilities
from selenium.webdriver.support.color import Color
import time
@pytest.mark.usefixtures("user")
@pytest.mark.usefixtures("setup")
@pytest.mark.usefixtures("env")
@pytest.mark.migration
@pytest.mark.singapore
@pytest.mark.uk
@pytest.mark.usa
@pytest.mark.sandoz
@pytest.mark.eg
@pytest.mark.global_site
@pytest.mark.de
@pytest.mark.at
@pytest.mark.ph
@pytest.mark.ch
@pytest.mark.ch_fr
@pytest.mark.fr
@pytest.mark.za
@pytest.mark.malaysia
@pytest.mark.es
@pytest.mark.pt
@pytest.mark.tw
@pytest.mark.jp
@pytest.mark.lv
@pytest.mark.it
@pytest.mark.ar
@pytest.mark.fi
@pytest.mark.kr
@pytest.mark.br
@pytest.mark.cn
@pytest.mark.scn
@pytest.mark.hu
@pytest.mark.biome
@pytest.mark.foundation
@pytest.mark.ie
@pytest.mark.gr
@pytest.mark.dk
# @pytest.mark.no
@pytest.mark.ca
# @pytest.mark.se
@pytest.mark.tr
# @pytest.mark.cz
@pytest.mark.ru
# @pytest.mark.rs
@pytest.mark.ro
@pytest.mark.co
@pytest.mark.sk
@pytest.mark.ve
#@pytest.mark.id
@pytest.mark.bd
@pytest.mark.be
@pytest.mark.au
@pytest.mark.pl
class Test_NewsArchive(BaseTest):
# @pytest.mark.malaysia
# def test_ARC_2586_NewsArchive_get_newsArchive(self):
# self.driver.get(self.env)
# self.newsPage.launch_newsArchive(self.env_name)
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_bannerImage(self):
"""
Checks banner image in news archive page.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
if len(self.driver.find_elements(*self.newsPage.banner_img_css)) > 0:
assert len(self.basePage.get_elemet_attribute(self.newsPage.banner_img_css, "src")) > 0
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_pattern(self):
"""
Checks pattern in news archive page.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
assert "patterns" in self.basePage.get_css_property(self.newsPage.pattern_css, "background-image")
# Already covered in test_breadcrumb
# def test_ARC_2586_NewsArchive_breadcrumbs(self):
# self.driver.get(self.env)
# if self.env_name == "france":
# self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
# self.basePage.press_button(self.newsRoomPage.link_btn_css)
# else:
# self.newsPage.launch_newsArchive(self.env_name)
# breadcrumb_items, breadcrumb_first_arrow_element, breadcrumb_second_arrow_element= self.newsPage.breadcrumb_ele()
# assert len(breadcrumb_items) == 3
# assert ">" in breadcrumb_first_arrow_element
# assert ">" in breadcrumb_second_arrow_element
# assert "#656565" == self.basePage.get_css_color(self.newsPage.news_archive_last_child_breadcrumb_color_css, "color")
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_search_bar_button(self):
"""
Checks search bar and button in news archive page.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
search_bar, search_btn = self.newsPage.search_bar_and_btn()
assert search_bar == True
assert search_btn == True
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_default_view(self):
"""
Checks default view - grid view and list.svg icon in news archive page.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
assert "arctic_grid_view" in self.basePage.get_elemet_attribute(self.newsPage.news_page_view_css, "class")
assert "list.svg" in self.basePage.get_css_property(self.newsPage.view_toggle_icon, "background")
# This is disabled in all sites now
# def test_ARC_2586_NewsArchive_calendar_from_to(self):
# if self.env_name == "france":
# self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
# self.basePage.press_button(self.newsRoomPage.link_btn_css)
# else:
# self.newsPage.launch_newsArchive(self.env_name)
# # assert len(self.basePage.get_element_text(self.newsPage.from_date_xpath)) > 0
# assert len(self.basePage.get_element_text(self.newsPage.to_date_xpath)) > 0
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_all_topics_grey_color(self):
"""
Checks All topics is greyed out by default in news archive page.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
all_topics_text_status, hex_code= self.newsPage.all_topics_grey_color()
assert all_topics_text_status == True
assert "#f1f1f1" in hex_code
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_pagination_num(self):
"""
Checks pagination numbers are correctly displayed.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
current_page_num , page_num = self.newsPage.pagination_number()
assert current_page_num == page_num
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_right_hand_rail(self):
"""
Checks if there is no right hand rail.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
assert len(self.driver.find_elements(*self.newsPage.right_hand_rail_xpath)) == 0
# Already covered in test_breadcrumb
# def test_ARC_2586_NewsArchive_first_second_level_breadcrumbs(self):
# self.driver.get(self.env)
# if self.env_name == "france":
# self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
# self.basePage.press_button(self.newsRoomPage.link_btn_css)
# else:
# self.newsPage.launch_newsArchive(self.env_name)
# breadcumb_anchor_url_list, breadcumb_anchor_current_url_list = self.newsPage.check_all_breadcrumb_url()
# for breadcumb_anchor_url, breadcumb_anchor_current_url in zip(breadcumb_anchor_url_list, breadcumb_anchor_current_url_list):
# assert breadcumb_anchor_url in breadcumb_anchor_current_url
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_pagination(self):
"""
Checks pagination is working as expected or not.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
if len(self.driver.find_elements(self.newsPage.pagination_heading_xpath[0], self.newsPage.pagination_heading_xpath[1])) > 0:
num_content_page,pagination_heading, page_content_list = self.newsPage.pagination_validation()
if num_content_page == 12 and pagination_heading > 0 :
page_count_len = len(page_content_list)
while page_count_len > 0:
assert len(page_content_list) > 0
page_count_len = page_count_len - 1
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_menuTabs_viewEmpty(self):
"""
Checks menu tabs are getting greyed out or not and if under any menu tab, no content is there,
It should display No Result Found.
"""
empty_list = []
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
hex_color_list, empty_list = self.newsPage.grey_menu_verify()
hex_color_list_len = len(hex_color_list)
while hex_color_list_len > 0:
assert "#f1f1f1" in hex_color_list[hex_color_list_len-1]
hex_color_list_len = hex_color_list_len - 1
# def test_ARC_2586_NewsArchive_viewEmpty(self):
# self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
empty_list_len = len(empty_list)
if empty_list_len > 0:
if "view-empty" in empty_list[empty_list_len-1]:
assert "view-empty" in empty_list[empty_list_len-1]
empty_list_len = empty_list_len - 1
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_listview(self):
"""
Checks the following :-
1. list.svg icon.
2. list view button.
3. Default view.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
list_icon,list_view_btn,grid_view = self.newsPage.list_view()
assert "list.svg" in list_icon
assert list_view_btn == True
assert "arctic_grid_view" in grid_view
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_gridView(self):
"""
Checks the following :-
1. grid.svg icon.
2. grid view button.
3. view is in list view.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
grid_icon,grid_view_btn,list_view = self.newsPage.grid_view()
assert "grid.svg" in grid_icon
assert grid_view_btn == True
assert "arctic_list_view" in list_view
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_randomText_search_url(self):
"""
Checks the searched text is coming in the url.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
searched_keyword = self.newsPage.verify_search_result()
current_url = self.driver.current_url
assert searched_keyword in current_url
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_randomText_search_keyword(self):
"""
Checks searched keyword is coming in the text box while changing the tabs.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
searched_keyword_list = self.newsPage.verify_searchText_with_Tabs()
for search_keyword in searched_keyword_list:
assert search_keyword == self.newsPage.test_data
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_randomText_greyMenu_verify(self):
"""
After searching some keyword, It checks tabs are getting greyed out or not.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
hex_color_list = self.newsPage.grey_menu_verify_with_Tabs()
for hex_color in hex_color_list:
assert "#f1f1f1" == hex_color
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_randomText_menuTab_url(self):
"""
Checks menu tab name is coming in the url.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
tab_href_list, selected_tab_url_list = self.newsPage.verify_menuTab_url()
for tab_href, selected_tab_url in zip(tab_href_list, selected_tab_url_list):
assert tab_href in selected_tab_url
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_randomText_searchKeyword_url(self):
"""
Checks searched keyword is coming in the url while changing tabs.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
searched_keyword,selected_tab_url_list = self.newsPage.verify_searchText_with_url()
for url in selected_tab_url_list:
assert searched_keyword in url
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_pagination_front_arrow_back_arrow(self):
"""
Checks front and back pagination arrow is working.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
if len(self.driver.find_elements(self.newsPage.pagination_heading_xpath[0], self.newsPage.pagination_heading_xpath[1])) > 0:
num_content_page,pagination_heading,page_one_contents, page_zero_contents = self.newsPage.pagination_front_arrow_back_arrow_validation()
if num_content_page == 12 and pagination_heading > 0 :
assert len(page_one_contents) > 0
assert len(page_zero_contents) > 0
# @pytest.mark.malaysia
# def test_ARC_2586_NewsArchive_key_release_language_tab(self):
# """
# Checks language dropdown is coming under key release dropdown.
# """
# self.driver.get(self.env)
# if self.env_name == "france":
# self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
# self.basePage.press_button(self.newsRoomPage.link_btn_css)
# else:
# self.newsPage.launch_newsArchive(self.env_name)
# if self.env_name == "global":
# language_tab_txt = self.newsPage.key_releases(self.env_name)
# assert "Language" in language_tab_txt
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_media_release_language_tab(self):
"""
Checks language dropdown is coming under media release dropdown and dates are coming in descending order.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
desc_sort = self.newsPage.media_release(self.env_name)
assert desc_sort == True
# if self.env_name == "global":
# assert "Language" in language_tab_txt
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_randomText_verification(self):
"""
Checks after searching a keyword, the keyword is coming in the search box or not and all topics is getting greyed out or not.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
searched_keyword = self.newsPage.verify_search_result()
assert searched_keyword == self.newsPage.test_data
all_topics_text_status, hex_code = self.newsPage.all_topics_grey_color()
assert all_topics_text_status == True
assert "#f1f1f1" in hex_code
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_content_validation(self):
"""
Checks if content is there in newsw archive page.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
contents = self.basePage.get_elements(self.newsPage.content_pages_css)
assert len(contents) > 0
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_search_results(self):
"""
Checks searched input is coming in the searched result or not.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
content_title = self.newsPage.search_results()
for title in content_title:
if self.newsPage.test_data_novartis in title:
assert self.newsPage.test_data_novartis in title
break
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_verify_filters(self):
"""
Checks Filters are working or not.
"""
media_release_list = ['key release', 'media release']
featured_news_list = ['pulse update', 'statement','featured news']
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
menu_tab_list , tab_label_list = self.newsPage.verify_filters()
for filter_name,tab_label in zip(menu_tab_list , tab_label_list):
if filter_name == "media releases" and self.env_name == 'global':
for tab in tab_label:
assert tab in media_release_list
elif filter_name == "key releases" and self.env_name == 'global':
for tab in tab_label:
assert tab in media_release_list
elif filter_name == "featured news" and self.env_name == 'global':
for tab in tab_label:
assert tab in featured_news_list
elif filter_name == "statements" and self.env_name == 'global':
for tab in tab_label:
assert tab in featured_news_list
elif filter_name == "pulse updates" and self.env_name == 'global':
for tab in tab_label:
assert tab in featured_news_list
elif filter_name == "statement" and self.env_name == 'biome':
for tab in tab_label:
assert tab in featured_news_list
elif filter_name == "statements" and self.env_name == 'usa':
for tab in tab_label:
assert tab in featured_news_list
elif filter_name == "statement" and self.env_name == 'foundation':
for tab in tab_label:
assert tab in featured_news_list
else :
for label in tab_label :
assert label in filter_name
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_banner_title(self):
"""
Checks if banner title is displaying.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
if self.env_name == "global":
assert self.basePage.is_displayed(self.newsPage.banner_text_css) == True
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_key_release_text(self):
"""
Checks key release text is there above search bar.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
if self.env_name == "global":
assert self.basePage.is_displayed(self.newsPage.key_releases_text_xpath) == True
assert "Key Releases" in self.basePage.get_element_text(self.newsPage.key_releases_text_xpath)
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_menu_ellipse(self):
"""
Checks ellipse is displayed.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
if self.env_name == "global":
visible_element = self.driver.find_elements(*self.newsPage.visible_element_xpath)
if len(visible_element) == 7 :
assert self.basePage.is_displayed(self.newsPage.ellipses_xpath) == True
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_ellipses_validation(self):
"""
Checks any element is there inside ellipse.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
if self.env_name == "global":
if self.basePage.is_displayed(self.newsPage.ellipses_xpath):
self.basePage.press_button(self.newsPage.ellipses_xpath)
elements = self.driver.find_elements(*self.newsPage.non_visible_ellipses_xpath)
assert len(elements) > 0
@pytest.mark.malaysia
def test_ARC_2586_NewsArchive_ellipses_element_greyed_out(self):
"""
Checks while clicking any element in ellipse, that element should greyed out.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
if self.env_name == "global":
if self.basePage.is_displayed(self.newsPage.ellipses_xpath):
self.basePage.press_button(self.newsPage.ellipses_xpath)
non_visible_element_text = self.basePage.get_element_text(self.newsPage.non_visible_ellipses_xpath)
self.basePage.press_button(self.newsPage.non_visible_ellipses_xpath)
length = len(self.driver.find_elements(*self.newsPage.visible_element_xpath))
element = self.driver.find_element(By.CSS_SELECTOR,f'ul#block-newsarchivenavigation>li:nth-child({length}) > a')
value_of_css = element.value_of_css_property('background-color')
hex = Color.from_string(value_of_css).hex
assert '#f1f1f1' == hex
assert non_visible_element_text == element.text
def test_ARC_2586_NewsArchive_grey_bg_only_one_tab(self):
"""
Checks only tab is getting greyed out at a time.
"""
self.driver.get(self.env)
if self.env_name == "france":
self.homePage.mega_menu_landing_page(self.env_name,self.homePage.mega_menu_news_xpath,'news')
self.basePage.press_button(self.newsRoomPage.link_btn_css)
else:
self.newsPage.launch_newsArchive(self.env_name)
first_tab_color, second_tab_color = self.newsPage.menu_tab_one_grey_bg()
assert "#000000" == first_tab_color
assert "#f1f1f1" == second_tab_color
| Shreyasi2205/MyPOMProject | tests/test_NewsArchive.py | test_NewsArchive.py | py | 27,544 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "BaseTest.BaseTest",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "pytest.mark",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark",
... |
30650525881 | # Matrix Game experiment
# Author: Lucas Cassano
# Paper: "Logical Team Q-learning"
# ===================================
# Import necessary packages
from absl import app
from absl import flags
import numpy as np
import matplotlib.pyplot as plt
import q_mix
flags.DEFINE_string('save_path', '/tmp/', 'directory to save results.')
def main(argv):
"""Run simple 2 agent matrix game."""
nmbr_games = 500
seed = 1
mu = 1e-1
nmbr_agents = 2
qmix_extra_iters = 100
np.random.seed(seed)
payoff = np.array([[0, 2, 0], [0, 1, 2]]) #np.array([[8, -12, -12], [-12, 0, 0], [-12, 0, 0]]) #
q_joint = np.zeros_like(payoff)
nmbr_actions_1, nmbr_actions_2 = payoff.shape
q_logic_b = {0: np.zeros([nmbr_games, nmbr_actions_1]), 1: np.zeros([nmbr_games, nmbr_actions_2])}
q_logic_u = {0: np.zeros([nmbr_games, nmbr_actions_1]), 1: np.zeros([nmbr_games, nmbr_actions_2])}
q_dist = {0: np.zeros([nmbr_games, nmbr_actions_1]), 1: np.zeros([nmbr_games, nmbr_actions_2])}
q_ind = {0: np.zeros([nmbr_games, nmbr_actions_1]), 1: np.zeros([nmbr_games, nmbr_actions_2])}
q_tran = {0: np.zeros([nmbr_games, nmbr_actions_1]), 1: np.zeros([nmbr_games, nmbr_actions_2])}
q_mix_class = q_mix.Qmix(payoff.shape, mu/2)
q_mix_out = {0: np.zeros([nmbr_games, nmbr_actions_1]), 1: np.zeros([nmbr_games, nmbr_actions_2])}
for n in range(nmbr_games - 1):
actions = np.array([np.random.randint(nmbr_actions_1), np.random.randint(nmbr_actions_2)]) #Pick actions uniformly
r = payoff[actions[0]][actions[1]]
# Logic Team Q-learning
for agent in range(nmbr_agents):
q_logic_b[agent][n + 1] = q_logic_b[agent][n]
q_logic_u[agent][n + 1] = q_logic_u[agent][n]
chosen_action = actions[agent]
if actions[nmbr_agents - 1 - agent] == np.argmax(q_logic_b[nmbr_agents - 1 - agent][n]):
q_logic_b[agent][n + 1][chosen_action] += mu * (r - q_logic_b[agent][n][chosen_action])
q_logic_u[agent][n + 1][chosen_action] += mu * (r - q_logic_u[agent][n][chosen_action])
elif r > q_logic_b[agent][n][chosen_action]:
q_logic_b[agent][n + 1][chosen_action] += mu * (r - q_logic_b[agent][n][chosen_action])
# Independent Q-learning
for agent in range(nmbr_agents):
q_ind[agent][n + 1] = q_dist[agent][n]
chosen_action = actions[agent]
q_ind[agent][n + 1][chosen_action] += mu * (r - q_dist[agent][n][chosen_action])
# Distributed Q-learning
for agent in range(nmbr_agents):
q_dist[agent][n + 1] = q_dist[agent][n]
chosen_action = actions[agent]
if r > q_dist[agent][n][chosen_action]:
q_dist[agent][n + 1][chosen_action] += mu * (r - q_dist[agent][n][chosen_action])
# Qtran-base
q_joint[actions[0], actions[1]] -= (q_joint[actions[0], actions[1]] - r)
q_j = q_joint[actions[0], actions[1]]
q_tilde = q_tran[0][n][actions[0]] + q_tran[1][n][actions[1]]
for agent in range(nmbr_agents):
q_tran[agent][n + 1] = q_tran[agent][n]
chosen_action = actions[agent]
if q_tran[0][n][actions[0]] == np.max(q_tran[0][n]) and q_tran[1][n][actions[1]] == np.max(q_tran[1][n]):
q_tran[agent][n + 1][chosen_action] -= mu * (q_tilde - q_j)
else:
q_tran[agent][n + 1][chosen_action] -= mu * np.minimum(q_tilde - q_j, 0)
# Qmix
q_mix_out[0][n + 1] = q_mix_out[0][n]
q_mix_out[1][n + 1] = q_mix_out[1][n]
for _ in range(qmix_extra_iters): #Needs far extra iters to converge XD
actions = np.array([np.random.randint(nmbr_actions_1), np.random.randint(nmbr_actions_2)])
r = payoff[actions[0]][actions[1]]
q1, q2, qmix = q_mix_class.learn(actions, r)
q_mix_out[0][n + 1][actions[0]] = q1
q_mix_out[1][n + 1][actions[1]] = q2
# Print final Qmix matrices
qmix1 = np.zeros([nmbr_actions_1])
qmix2 = np.zeros([nmbr_actions_2])
qmix_total = np.zeros([nmbr_actions_1, nmbr_actions_2])
for a1 in range(nmbr_actions_1):
for a2 in range(nmbr_actions_2):
qmix1[a1], qmix2[a2], qmix_total[a1, a2] = q_mix_class.obtain_q([a1, a2])
print(qmix1)
print(qmix2)
print(qmix_total)
# Plot results
fig1, ax1 = plt.subplots()
plt.xlabel('Games', fontsize=25)
plt.ylabel('Q-values', fontsize=25)
ax1.plot(np.arange(start=0, stop=nmbr_games), q_logic_b[0], 'b')
ax1.plot(np.arange(start=0, stop=nmbr_games), q_logic_b[1], 'r')
ax1.set_yticks(np.arange(0, 2.01, step=0.5))
ax1.tick_params(axis='both', which='major', labelsize=15)
plt.grid()
fig1.savefig(fname='biased_logic_matrix_game_1', bbox_inches='tight')
fig2, ax2 = plt.subplots()
plt.xlabel('Games', fontsize=25)
plt.ylabel('Q-values', fontsize=25)
ax2.plot(np.arange(start=0, stop=nmbr_games), q_logic_u[0], 'b')
ax2.plot(np.arange(start=0, stop=nmbr_games), q_logic_u[1], 'r')
ax2.set_yticks(np.arange(0, 2.01, step=0.5))
ax2.tick_params(axis='both', which='major', labelsize=15)
plt.grid()
fig2.savefig(fname='unbiased_logic_matrix_game_1', bbox_inches='tight')
fig3, ax3 = plt.subplots()
plt.xlabel('Games', fontsize=25)
plt.ylabel('Q-values', fontsize=25)
ax3.plot(np.arange(start=0, stop=nmbr_games), q_dist[0], 'b')
ax3.plot(np.arange(start=0, stop=nmbr_games), q_dist[1], 'r')
ax3.set_yticks(np.arange(0, 2.01, step=0.5))
ax3.tick_params(axis='both', which='major', labelsize=15)
plt.grid()
fig3.savefig(fname='q_dist_matrix_game_1', bbox_inches='tight')
fig4, ax4 = plt.subplots()
plt.xlabel('Games', fontsize=25)
plt.ylabel('Q-values', fontsize=25)
ax4.plot(np.arange(start=0, stop=nmbr_games * qmix_extra_iters, step=qmix_extra_iters), q_mix_out[0], 'b')
ax4.plot(np.arange(start=0, stop=nmbr_games * qmix_extra_iters, step=qmix_extra_iters), q_mix_out[1], 'r')
ax4.tick_params(axis='both', which='major', labelsize=15)
plt.grid()
fig4.savefig(fname='q_mix_matrix_game_1', bbox_inches='tight')
fig5, ax5 = plt.subplots()
plt.xlabel('Games', fontsize=25)
plt.ylabel('Q-values', fontsize=25)
ax5.plot(np.arange(start=0, stop=nmbr_games), q_ind[0], 'b')
ax5.plot(np.arange(start=0, stop=nmbr_games), q_ind[1], 'r')
ax5.set_yticks(np.arange(0, 2.01, step=0.5))
ax5.tick_params(axis='both', which='major', labelsize=15)
plt.grid()
fig5.savefig(fname='ind_q_matrix_game_1', bbox_inches='tight')
fig6, ax6 = plt.subplots()
plt.xlabel('Games', fontsize=25)
plt.ylabel('Q-values', fontsize=25)
ax6.plot(np.arange(start=0, stop=nmbr_games), q_tran[0], 'b')
ax6.plot(np.arange(start=0, stop=nmbr_games), q_tran[1], 'r')
ax6.set_yticks(np.arange(0, 2.01, step=0.5))
ax6.tick_params(axis='both', which='major', labelsize=15)
plt.grid()
fig6.savefig(fname='q_tran_matrix_game_1', bbox_inches='tight')
print(np.expand_dims(q_tran[0][-1], axis=1))
print(np.expand_dims(q_tran[1][-1], axis=0))
print(np.expand_dims(q_tran[0][-1], axis=1) + np.expand_dims(q_tran[1][-1], axis=0))
return 1
if __name__ == '__main__':
app.run(main)
| lcassano/Logical_Team_Q_Learning_paper | matrix_game/run_matrix_exp.py | run_matrix_exp.py | py | 6,910 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "absl.flags.DEFINE_string",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "absl.flags",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.random",
... |
648716471 | import glob
import logging
import os
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
logfilepath = "" # 따로 지정하지 않으면 terminal에 뜸
if os.path.isfile(logfilepath):
os.remove(logfilepath)
logging.basicConfig(filename=logfilepath, level=logging.INFO)
class DetectionDataset(Dataset):
"""
Parameters
----------
path : str(jpg)
Path to input image directory.
transform : object
"""
CLASSES = ['ng', 'ok']
def __init__(self, path='Dataset/train', transform=None, sequence_number=1, test=False):
super(DetectionDataset, self).__init__()
if sequence_number < 1 and isinstance(sequence_number, float):
logging.error(f"{sequence_number} Must be greater than 0")
return
self._name = os.path.basename(path)
self._sequence_number = sequence_number
self._class_path_List = sorted(glob.glob(os.path.join(path, "*")), key=lambda path: self.key_func(path))
self._transform = transform
self._items = []
self._itemname = []
self._test = test
self._make_item_list()
def key_func(self, path):
return path
def _make_item_list(self):
if self._class_path_List:
for path in self._class_path_List:
class_name = os.path.basename(path)
image_path_list = sorted(glob.glob(os.path.join(path, "*")), key=lambda path: self.key_func(path))
for i in tqdm(range(len(image_path_list) - (self._sequence_number - 1))):
image_path = image_path_list[i:i + self._sequence_number]
self._items.append((image_path, class_name))
base_image = os.path.basename(image_path[-1])
self._itemname.append(base_image)
else:
logging.info("The dataset does not exist")
def __getitem__(self, idx):
images = []
image_sequence_path, label = self._items[idx]
for image_path in image_sequence_path:
image = cv2.imread(image_path, flags=-1)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
images.append(image)
images = np.concatenate(images, axis=-1)
origin_images = images.copy()
if self._transform:
one_hot_label = self._one_hot(label)
result = self._transform(images, one_hot_label, self._itemname[idx])
if self._test:
return result[0], result[1], result[2], torch.as_tensor(origin_images)
else:
return result[0], result[1], result[2]
else:
return origin_images, label, self._itemname[idx]
def _one_hot(self, label):
unit_matrix = np.eye(len(self.CLASSES))
if label == 'ng':
label=unit_matrix[0]
elif label == 'ok':
label=unit_matrix[1]
return label
@property
def classes(self):
return self.CLASSES
@property
def num_class(self):
"""Number of categories."""
return len(self.CLASSES)
def __str__(self):
return self._name + " " + "dataset"
def __len__(self):
return len(self._items)
# test
if __name__ == "__main__":
import random
from core.utils.util.utils import plot_bbox
sequence_number = 1
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
dataset = DetectionDataset(path=os.path.join(root, 'Dataset', 'train'), sequence_number=sequence_number)
length = len(dataset)
sequence_image, label, file_name = dataset[random.randint(0, length - 1)]
print('images length:', length)
print('sequence image shape:', sequence_image.shape)
if sequence_number > 1:
sequence_image = sequence_image[:,:,3*(sequence_number-1):]
file_name = file_name[-1]
plot_bbox(sequence_image, score=None, label=label,
class_names=dataset.classes, colors=None, reverse_rgb=True,
image_show=True, image_save=False, image_save_path="result", image_name=os.path.basename(file_name), gt=True)
'''
images length: 1499
sequence image shape: (720, 1280, 9)
'''
| DeepFocuser/PyTorch-Detector-alpha | classification/core/utils/dataprocessing/dataset.py | dataset.py | py | 4,393 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "os.path.isfile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line... |
43589412425 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
df = None
# In[2]:
from reaktoro import *
import numpy as np
from natsort import natsorted
from tqdm.notebook import tqdm
import os
from bokeh.io import show, output_notebook
from bokeh.layouts import column
from bokeh.plotting import figure
from bokeh.models import Range1d, ColumnDataSource
from bokeh.layouts import gridplot
# In[3]:
second = 1
minute = 60
hour = 60 * minute
day = 24 * hour
week = 7 * day
year = 365 * day
# In[4]:
xl = 0.0
xr = 0.5
ncells = 100
nsteps = 200
dx = (xr - xl) / ncells
dt = 60 * minute
# 7el Cl H N Na O Z
# D = [ 2.032*1.0e-9, 9.311*10.0e-9, 1.902*1.0e-9,1.334*1.0e-9, 1.0e-9, 1.0e-9]
D = 1.0e-09
v = 1.0 / week
T = 60.0 + 273.15
P = 100 * 1e5
phi = 0.1
# In[5]:
xcells = np.linspace(xl, xr, ncells + 1)
# In[6]:
dirichlet = False
# In[7]:
output_quantities = """
pH
speciesMolality(H+)
speciesMolality(Cl-)
speciesMolality(Na+)
speciesMolality(NO3-)
speciesMolarity(OH-)
""".split()
# In[8]:
column_quantities = """
pH
Hcation
Clanion
Nacation
NO3anion
OHanion
""".split()
# In[9]:
# Create the list of columns stored in dataframes
columns = ['step', 'x'] + column_quantities
import pandas as pd
# In[10]:
# Initialize dataframes with above defined columns
df = pd.DataFrame(columns=columns)
# In[11]:
folder_results = 'results-rt-calcite-dolomite'
def make_results_folders():
os.system('mkdir -p ' + folder_results)
# In[12]:
def simulate():
# Construct the chemical system with its phases and species
system = define_chemical_system()
# Define the initial condition of the reactive transport modeling problem
state_ic = define_initial_condition(system)
# Define the boundary condition of the reactive transport modeling problem
state_bc = define_boundary_condition(system)
# Generate indices of partitioning fluid and solid species
nelems, ifluid_species, isolid_species = partition_indices(system)
# Partitioning fluid and solid species
b, bfluid, bsolid, b_bc = partition_elements_in_mesh_cell(ncells, nelems, state_ic, state_bc)
# Create a list of chemical states for the mesh cells (one for each cell, initialized to state_ic)
states = [state_ic.clone() for _ in range(ncells + 1)]
# Create the equilibrium solver object for the repeated equilibrium calculation
solver = EquilibriumSolver(system)
# Running the reactive transport simulation loop
step = 0 # the current step number
t = 0.0 # the current time (in seconds)
# Output the initial state of the reactive transport calculation
outputstate_df(step, system, states)
with tqdm(total=nsteps, desc="Reactive transport simulations") as pbar:
while step <= nsteps:
# Perform transport calculations
bfluid, bsolid, b = transport(states, bfluid, bsolid, b, b_bc, nelems, ifluid_species, isolid_species)
# Perform reactive chemical calculations
states = reactive_chemistry(solver, states, b)
# Increment time step and number of time steps
t += dt
step += 1
# Output the current state of the reactive transport calculation
outputstate_df(step, system, states)
# Update a progress bar
pbar.update(1)
# In[ ]:
# In[13]:
def define_chemical_system():
# Construct the chemical system with its phases and species
db = Database('supcrt98.xml')
editor = ChemicalEditor(db)
editor.addAqueousPhaseWithElements('H N O Na Cl') .setChemicalModelPitzerHMW() .setActivityModelDrummondCO2()
system = ChemicalSystem(editor)
return system
# In[14]:
def define_initial_condition(system):
problem_ic = EquilibriumProblem(system)
problem_ic.setTemperature(T)
problem_ic.setPressure(P)
problem_ic.add('H2O', 0.001, 'kg')
problem_ic.add('NaCl', 1e-4, 'mol')
problem_ic.add('HNO3', 1e-4, 'mol')
# Calculate the equilibrium states for the initial conditions
state_ic = equilibrate(problem_ic)
# Scale the volumes of the phases in the initial condition
state_ic.scalePhaseVolume('Aqueous', 1.0, 'm3')
return state_ic
# In[15]:
def define_boundary_condition(system):
# Define the boundary condition of the reactive transport modeling problem
problem_bc = EquilibriumProblem(system)
problem_bc.setTemperature(T)
problem_bc.setPressure(P)
problem_bc.add('H2O', 0.001, 'kg')
problem_bc.add('NaCl', 1e-4, 'mol')
problem_bc.add('HNO3', 1e-6, 'mol')
# Calculate the equilibrium states for the boundary conditions
state_bc = equilibrate(problem_bc)
# Scale the boundary condition state to 1 m3
state_bc.scaleVolume(1.0, 'm3')
return state_bc
# In[16]:
def partition_indices(system):
nelems = system.numElements()
els = system.elements()
for el in els:
print('elements', el.name())
ifluid_species = system.indicesFluidSpecies()
isolid_species = system.indicesSolidSpecies()
return nelems, ifluid_species, isolid_species
# In[17]:
def partition_elements_in_mesh_cell(ncells, nelems, state_ic, state_bc):
# The concentrations of each element in each mesh cell (in the current time step)
b = np.zeros((ncells, nelems))
# Initialize the concentrations (mol/m3) of the elements in each mesh cell
b[:] = state_ic.elementAmounts()
# The concentrations (mol/m3) of each element in the fluid partition, in each mesh cell
bfluid = np.zeros((ncells, nelems))
# The concentrations (mol/m3) of each element in the solid partition, in each mesh cell
bsolid = np.zeros((ncells, nelems))
# Initialize the concentrations (mol/m3) of each element on the boundary
b_bc = state_bc.elementAmounts()
return b, bfluid, bsolid, b_bc
# In[18]:
def transport(states, bfluid, bsolid, b, b_bc, nelems, ifluid_species, isolid_species):
# Collect the amounts of elements from fluid and solid partitions
for icell in range(ncells):
bfluid[icell] = states[icell].elementAmountsInSpecies(ifluid_species)
bsolid[icell] = states[icell].elementAmountsInSpecies(isolid_species)
# Get the porosity of the boundary cell
bc_cell = 0
phi_bc = states[bc_cell].properties().fluidVolume().val / states[bc_cell].properties().volume().val
# print(nelems)
# Transport each element in the fluid phase
for j in range(nelems):
transport_fullimplicit(bfluid[:, j], dt, dx, v, D, phi_bc * b_bc[j])
# Update the amounts of elements in both fluid and solid partitions
b[:] = bsolid + bfluid
return bfluid, bsolid, b
# In[19]:
def transport_fullimplicit(u, dt, dx, v, D, ul):
# Number of DOFs
n = len(u)
alpha = D * dt / dx ** 2
beta = v * dt / dx
# Upwind finite volume scheme
a = np.full(n, -beta - alpha)
b = np.full(n, 1 + beta + 2 * alpha)
c = np.full(n, -alpha)
# Set the boundary condition on the left cell
if dirichlet:
# Use Dirichlet BC boundary conditions
b[0] = 1.0
c[0] = 0.0
u[0] = ul
else:
# Flux boundary conditions (implicit scheme for the advection)
# Left boundary
b[0] = 1 + alpha + beta
c[0] = -alpha # stays the same as it is defined -alpha
u[0] += beta * ul # = dt/dx * v * g, flux that we prescribe is equal v * ul
# Right boundary is free
a[-1] = - beta
b[-1] = 1 + beta
# Solve a tridiagonal matrix equation
thomas(a, b, c, u)
# In[20]:
def thomas(a, b, c, d):
n = len(d)
c[0] /= b[0]
for i in range(1, n - 1):
c[i] /= b[i] - a[i] * c[i - 1]
d[0] /= b[0]
for i in range(1, n):
d[i] = (d[i] - a[i] * d[i - 1]) / (b[i] - a[i] * c[i - 1])
x = d
for i in reversed(range(0, n - 1)):
x[i] -= c[i] * x[i + 1]
return x
# In[21]:
def reactive_chemistry(solver, states, b):
for icell in range(ncells):
solver.solve(states[icell], T, P, b[icell])
return states
# In[22]:
def outputstate_df(step, system, states):
quantity = ChemicalQuantity(system)
values = [None] * len(columns)
for state, x in zip(states, xcells):
values[0] = step
values[1] = x
quantity.update(state)
for quantity_name, i in zip(output_quantities, range(2, len(states))):
values[i] = quantity.value(quantity_name) * (100 / (1 - phi) if "phaseVolume" in quantity_name else 1)
df.loc[len(df)] = values
# In[23]:
def titlestr(t):
t = t / minute
h = int(t) / 60
m = int(t) % 60
return 'Time: %2dh %2dm' % (h, m)
# In[24]:
def plot_figures_ph(steps, files):
plots = []
for i in steps:
t = i * dt
source = ColumnDataSource(df[df['step'] == i])
p = figure(plot_width=600, plot_height=250)
p.line(source.data['x'], source.data['pH'], color='teal', line_width=2, legend_label='pH')
p.x_range = Range1d(-0.001, 1.001)
p.y_range = Range1d(2.5, 12.0)
p.xaxis.axis_label = 'Distance [m]'
p.yaxis.axis_label = 'pH'
p.legend.location = 'bottom_right'
p.title.text = titlestr(t)
plots.append([p])
grid = gridplot(plots)
show(grid)
# In[25]:
def plot_figures_aqueous_species(steps, files):
plots = []
for i in steps:
t = i * dt
source = ColumnDataSource(df[df['step'] == i])
p = figure(plot_width=600, plot_height=300)
p.line(source.data['x'], source.data['Nacation'], color='orange', line_width=2, legend_label='Na')
p.line(source.data['x'], source.data['NO3anion'], color='green', line_width=2, legend_label='NO3-')
p.line(source.data['x'], source.data['Clanion'], color='red', line_width=2, legend_label='Cl')
p.line(source.data['x'], source.data['Hcation'], color='darkviolet', line_width=2, legend_label='H+')
# p.x_range = Range1d(-0.001, 1.0)
# p.y_range = Range1d(1e-9, 1e-2)
p.xaxis.axis_label = 'Distance [m]'
p.yaxis.axis_label = 'Concentration [molal]'
p.legend.location = 'top_right'
p.title.text = titlestr(t)
p.legend.click_policy = 'mute'
plots.append([p])
grid = gridplot(plots)
show(grid)
# In[26]:
make_results_folders()
# In[27]:
simulate()
# In[28]:
step = 0
df_step = df[df['step'] == step].loc[:, ['x'] + column_quantities]
df_step
# In[29]:
df.shape
# In[30]:
selected_steps_to_plot = [10, 20]
assert all(step <= nsteps for step in selected_steps_to_plot), f"Make sure that selceted steps are less than " f"total amount of steps {nsteps}"
# In[31]:
print("Collecting files...")
files = [file for file in natsorted(os.listdir(folder_results))]
# In[32]:
output_notebook()
# In[33]:
plot_figures_ph(selected_steps_to_plot, files)
# In[34]:
plot_figures_aqueous_species(selected_steps_to_plot, files)
| nimaamp/Reactive-transport | Nima Benchmark!.py | Nima Benchmark!.py | py | 11,252 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.linspace",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "tqdm.notebook.tqdm",
... |
25793507679 | # -*-coding:utf-8-*-
__author__ = 'BING'
from django.http import HttpResponse
from django.shortcuts import render,render_to_response
from zhihupaper import apiUse,singelNews,latestNews,beforeNews
import re
from getPic import GetPic
def home(request):
api = apiUse()
news = latestNews(api)
count = news.getnum()
stories = news.getstories()
return render_to_response('home.html',{'stories':stories})
def mobile(request):
api = apiUse()
news = latestNews(api)
topstories = news.gettopstories()
stories = news.getmobilestories()
return render_to_response('mobile.html',{'topstories':topstories, 'stories':stories})
def story(request, id):
api = apiUse()
singelnews = singelNews(api, int(id))
title = singelnews.gettitle()
body = singelnews.getbody()
image = singelnews.getimage()
source = singelnews.getsource()
body = replaceUrl(body)
body = replaceImg(body, image, title, source)
return render_to_response('story.html', {'title': title, 'body': body})
def ajax_morestory(request, date):
api = apiUse()
beforenews = beforeNews(api, date)
stories = beforenews.getstories()
return render_to_response('ajax_morestory.html', { 'stories': stories})
def m_ajax_morestory(request, date):
api = apiUse()
beforenews = beforeNews(api, date)
stories = beforenews.getmobilestories()
return render_to_response('m_ajax_morestory.html', { 'stories': stories})
def replaceImg(body, image, title, source):
pattern = re.compile('<div class=\"img-place-holder\"><\/div>',re.DOTALL)
replaceStr = r'<div class="img-wrap"><h1 class="headline-title">%s</h1><span class="img-source">%s</span><img src="/imgurl/url=%s" alt></div>' % (title, source, image)
return pattern.sub(replaceStr, body)
def replaceUrl(body):
pattern = re.compile(r'src=\"', re.DOTALL)
replaceStr = r'src="/imgurl/url='
return pattern.sub(replaceStr, body)
def get_pic(request, url):
url = url[4:]
getpic = GetPic(url)
req = getpic.get_pic()
pic = req.read()
return HttpResponse(pic) | codeBing/zhihudaily | paper/views.py | views.py | py | 2,087 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "zhihupaper.apiUse",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "zhihupaper.latestNews",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 14,
"usage_type": "call"
},
{
"api_n... |
17435427439 | import pytest
import pdb
from typing import List
class Solution:
def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:
"""
"""
def intertwine(ws, sps):
# one word + one sps appended
t = []
for w, s in zip(ws, sps):
t.extend([w, s])
t.append(ws[-1])
return ''.join(t)
# 1. Greedy pick words for each line
cur_count = 0
buffer = []
lines = []
for w in words:
if (cur_count + len(w) + (1 if cur_count else 0)) <= maxWidth:
cur_count += len(w) + (1 if cur_count else 0)
else:
lines.append(buffer)
cur_count = len(w)
buffer = []
buffer.append(w)
if buffer:
lines.append(buffer)
# 2. Justify spaces using divmod
for i in range(len(lines)):
remaining = maxWidth - (sum(len(w)+1 for w in lines[i]) - 1)
# Left justify last line
if i == len(lines)-1:
lines[i] = ' '.join(lines[i]) + ' '*remaining
# Full justify
else:
if len(lines[i]) > 1:
interval, left = divmod(remaining, len(lines[i])-1)
spaces = [' ' *(1+interval+ (1 if j < left else 0)) for j, w in enumerate(lines[i])][:-1]
lines[i] = intertwine(lines[i], spaces)
else:
# Left justfiy if only 1 word per line
lines[i] = ' '.join(lines[i]) + ' '*remaining
return lines
@pytest.mark.parametrize('input, length, output', [
(["This", "is", "an", "example", "of", "text", "justification."],16,["This is an", "example of text", "justification. "]),
(["Listen","to","many,","speak","to","a","few."], 6,["Listen","to ","many ,","speak ","to a","few. "])
])
def test(input, length, output):
Solution().fullJustify(input, length) == output
if __name__ == '__main__':
sys.exit(pytest.main(['-s', '-v'] + sys.argv))
| naubull2/codingtests | leetcode/quick-prep/68_Text_Justification/solution.py | solution.py | py | 2,224 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "pytest.main",
... |
18882639437 | #! /usr/bin/env python3
import sys
import json
from flask import Flask, request
app = Flask(__name__)
def is_browser(ua_string):
return ua_string.split('/')[0].lower() == 'mozilla'
@app.route("/")
def hello():
msg_content = "Hello World!"
if is_browser(request.headers['User-Agent']):
return "<html><body><h1>{}</body></html>".format(msg_content)
else:
response = dict()
response["msg"] = msg_content
return json.dumps(response)
@app.route("/name", methods=["POST"])
def greeting():
print(request.data, file=sys.stdout)
req = json.loads(request.data)
req["msg"] = "Hi, {}".format(req["name"])
return json.dumps(req)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=int(sys.argv[1]))
| glennneiger/estate-backend | example/simple.py | simple.py | py | 770 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "json.dumps",
... |
30575157530 | from Domain.BuildEnumMethods import BuildEnumMethods
from .UtilityScriptBase import UtilityScriptBase
import logging
from .Exceptions.ArgNotFoundException import ArgNotFoundException
import json
from mongoengine import connect
import pandas
from Domain.EquityCorporateData import EquityCorporateData
class LoadNasdaqTickers(UtilityScriptBase):
def __init__(self):
UtilityScriptBase.__init__( self )
logging.debug("In Nasdaq Utility Script")
##Change Description
self.description = "Loads tickers from nasdaq csv"
##Init args
self.args["DB_CONNECTION"] = None
self.args["DB_HOST"] = None
self.args["DB_PORT"] = None
self.args["pathToCsv"] = None
def run(self):
logging.debug("Prompting for value")
self.queryArg("pathToCsv", self.args, "What is the path to the nasdaq csv??\nValue: \t")
self.queryArg("DB_CONNECTION", self.args, "What DB Connection?\nValue: \t")
self.queryArg("DB_HOST", self.args, "What DB Host?\nValue: \t")
self.queryArg("DB_PORT", self.args, "What DB Port?\nValue: \t")
connect(self.args["DB_CONNECTION"], host=self.args["DB_HOST"], port=int(self.args["DB_PORT"]))
nasdaqDF = self.fileToDf(self.args["pathToCsv"])
equities = EquityCorporateData.build(BuildEnumMethods.DF, DF=nasdaqDF)
equitiesInSystem = self.getEquityObjects()
for i in equities:
i.save()
def runWithArgFile(self, argFile):
self.parseArgFile(argFile)
self.validateArgs()
self.run()
def parseArgFile(self, argFile):
f = open(argFile)
data = json.load(f)
for i in data:
self.args[i] = data[i]
def validateArgs(self):
if(self.args["pathToCsv"] == None):
raise ArgNotFoundException("pathToCsv")
if(self.args["DB_CONNECTION"] == None):
raise ArgNotFoundException("DB_CONNECTION")
if(self.args["DB_PORT"] == None):
raise ArgNotFoundException("DB_PORT")
if(self.args["DB_HOST"] == None):
raise ArgNotFoundException("DB_HOST")
def fileToDf(self, filePath : str) -> pandas.DataFrame:
return pandas.read_csv(filePath)
def getEquityObjects(self) -> [EquityCorporateData]:
return EquityCorporateData.objects | jminahan/backtest_framework | UtilityRunner/UtilityScripts/LoadNasdaqTickers.py | LoadNasdaqTickers.py | py | 2,352 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "UtilityScriptBase.UtilityScriptBase",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "UtilityScriptBase.UtilityScriptBase.__init__",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "UtilityScriptBase.UtilityScriptBase",
"line_number": 13,
... |
16866363316 | import pytorch_lightning as pl
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch
from torchmetrics import Accuracy
from loss import create_criterion
class Backbone(nn.Module):
def __init__(self):
super(Backbone, self).__init__()
self.feature = nn.Sequential(
nn.Conv2d(1, 32, (3, 3), (1, 1)),
nn.BatchNorm2d(32),
nn.Dropout(0.5),
nn.ReLU(),
nn.Conv2d(32, 64, (3, 3), (1, 1)),
nn.BatchNorm2d(64),
nn.Dropout(0.5),
nn.ReLU(),
)
self.avg = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(64, 10)
def forward(self, x):
x = self.feature(x)
x = self.avg(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
class MNISTModel(pl.LightningModule):
def __init__(self, loss, lr):
super(MNISTModel, self).__init__()
self.net = Backbone()
self._criterion = create_criterion(loss)
self.acc = Accuracy()
self.learning_rate = lr
self.save_hyperparameters(ignore="model")
def forward(self, x):
x = self.net(x)
x = F.softmax(x, dim=1)
return x
def training_step(self, batch, batch_idx):
preds, loss, acc, labels = self.__share_step(batch, 'train')
self.log("train_loss", loss)
self.log("train_accuracy", acc)
return {"loss": loss, "pred": preds.detach(), 'labels': labels.detach()}
def validation_step(self, batch, batch_idx):
preds, loss, acc, labels = self.__share_step(batch, 'val')
self.log("val_loss", loss)
self.log("val_accuracy", acc)
return {"loss": loss, "pred": preds, 'labels': labels}
def __share_step(self, batch, mode):
x, y = batch
y_hat = self.net(x)
loss = self._criterion(y_hat, y)
acc = self.acc(y_hat, y)
return y_hat, loss, acc, y
def configure_optimizers(self):
optimizer = optim.Adam(
self.parameters(), lr=self.learning_rate)
scheduler = lr_scheduler.StepLR(
optimizer,
step_size=10, gamma=0.5
)
return [optimizer], [scheduler]
| KyubumShin/MNIST_pl | model.py | model.py | py | 2,306 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
21998644026 | from typing import List
class Trie:
def __init__(self):
self.L = 30
self.left = None
self.right = None
def insert(self, val: int):
node = self
for i in range(self.L, -1, -1):
bit = (val >> i) & 1
if bit == 0:
if not node.left:
node.left = Trie()
node = node.left
else:
if not node.right:
node.right = Trie()
node = node.right
def get_max_xor(self, val: int) -> int:
ans, node = 0, self
for i in range(self.L, -1, -1):
bit = (val >> i) & 1
check = False
if bit == 0:
if node.right:
node = node.right
check = True
else:
node = node.left
else:
if node.left:
node = node.left
check = True
else:
node = node.right
if check:
ans |= 1 << i
return ans
class Solution:
def maximizeXor(self, nums: List[int], queries: List[List[int]]) -> List[int]:
n, q = len(nums), len(queries),
nums.sort()
queries = [(x, m, i) for i, (x, m) in enumerate(queries)]
queries.sort(key=lambda query: query[1])
ans = [0] * q
t = Trie()
idx = 0
for x, m, qid in queries:
while idx < n and nums[idx] <= m:
t.insert(nums[idx])
idx += 1
if idx == 0:
ans[qid] = -1
else:
ans[qid] = t.get_max_xor(x)
return ans
| hangwudy/leetcode | 1700-1799/1707. 与数组中元素的最大异或值.py | 1707. 与数组中元素的最大异或值.py | py | 1,735 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 46,
"usage_type": "name"
}
] |
38447711574 | import sys
import os
import json
from parse import validate_file
if __name__ == '__main__':
outputs_dir = sys.argv[1]
submission_name = sys.argv[2]
submission = {}
for input_path in os.listdir("inputs"):
graph_name = input_path.split('.')[0]
output_file = f'{outputs_dir}/{graph_name}.out'
if os.path.exists(output_file) and validate_file(output_file):
output = open(f'{outputs_dir}/{graph_name}.out').read()
submission[input_path] = output
with open(submission_name, 'w') as f:
f.write(json.dumps(submission))
| Sea-Snell/170project | prepare_submission.py | prepare_submission.py | py | 588 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_numbe... |
10972428124 | #
# train.py
# @author amangupta0044@gmail.com
# @description
# @created 2020-12-09T16:35:56.524Z+05:30
# @last-modified 2020-12-11T20:05:30.671Z+05:30
#
########### Help ###########
'''
python train.py \
--data_dir /Users/aman.gupta/Documents/eagleview/utilities/onsite_data_fetch/fetched_images/annotated_combined_thumbnail_after_may_2020/splitted_letterbox_training_data \
--log_dir ./logs \
--epochs 1 \
--save_interval 5 \
--print_interval 1 \
--batch_size 64 \
--name exp0
'''
#############################
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import argparse
import os
from utils import (load_split_train_test,
plot_classes_preds,
save_checkpoint)
from torch.utils.tensorboard import SummaryWriter
import time
from model import Model
import sys
import configs
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="this script trains the classification model")
parser.add_argument("--data_dir",required = True,help="training data path")
parser.add_argument("--log_dir",required=False,default="./logs",type=str,help="dir to save logs")
parser.add_argument("--epochs",default=10,type =int, help="number of epochs to train a model")
parser.add_argument("--save_interval",default=100,type = int,help="interval to save model")
parser.add_argument("--print_interval",default=10,type = int,help="interval to print log")
parser.add_argument("--lr",default=0.003,type = float,help="learning rate")
parser.add_argument("--batch_size",default=4,type = int,help="batch size")
parser.add_argument("--test_split",default=0.2,type = float,help="test split out of 1.0")
parser.add_argument("--name",default="exp0",type = str,help="experiment name")
args = parser.parse_args()
os.makedirs(args.log_dir,exist_ok=True)
#tensorboard writter
# default `log_dir` is "runs" - we'll be more specific here
writer = SummaryWriter(args.log_dir)
##load data
data_dir = args.data_dir
trainloader, testloader = load_split_train_test(data_dir, args.batch_size)
print(trainloader.dataset.classes)
# sys.exit()
##load model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
output_layers = len(configs.CLASSES)
model_obj = Model(output_layers,device,args.lr)
model,optimizer,criterion = model_obj.model,model_obj.optimizer,model_obj.criterion
## training loop
epochs = args.epochs
steps = 0
running_loss = 0
print_every = args.print_interval
train_losses, test_losses = [], []
try:
print("Training Started")
for epoch in range(epochs):
for inputs, labels in trainloader:
steps += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in testloader:
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
train_losses.append(running_loss/len(trainloader))
test_losses.append(test_loss/len(testloader))
# ...log the running loss
writer.add_scalar('loss/training_loss',
running_loss / print_every,
global_step=epoch * len(trainloader) + steps)
# ...log the test loss
writer.add_scalar('loss/test_loss',
test_loss/len(testloader),
global_step=epoch * len(trainloader) + steps)
# ...log the test Accuracy
writer.add_scalar('test Accuracy',
accuracy/len(testloader),
global_step=epoch * len(trainloader) + steps)
# ...log a Matplotlib Figure showing the model's predictions on a
# random mini-batch
writer.add_figure('predictions vs. actuals',
plot_classes_preds(model, inputs, labels),
global_step=epoch * len(trainloader) + steps)
print(f"Epoch {epoch+1}/{epochs}.. "
f"Step :{steps}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Test loss: {test_loss/len(testloader):.3f}.. "
f"Test accuracy: {accuracy/len(testloader):.3f}")
running_loss = 0
model.train()
if steps % args.save_interval==0:
path = os.path.join(args.log_dir,"checkpoints",args.name,f"epochs-{epochs}-steps-{steps}")
save_checkpoint(path,epoch,model,optimizer,train_losses)
print(f"checkpoint saved at :{path}")
path = os.path.join(args.log_dir,"checkpoints",args.name,"last")
save_checkpoint(path,epoch,model,optimizer,train_losses)
print(f"checkpoint saved at :{path}")
except KeyboardInterrupt:
path = os.path.join(args.log_dir,"checkpoints",args.name,"last")
save_checkpoint(path,epoch,model,optimizer,train_losses)
print(f"Training interrupted checkpoint saved at :{path}") | aman0044/resnet-classifier | train.py | train.py | py | 6,456 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 64,
"usage_type": "call"
},
{
"api_nam... |
2140106274 | import numpy as np
from pyqtgraph.Qt import QtGui, QtCore
import init_multi as im
import func_multi as fm
'''
BEGING PROGRAM
'''
# функция отрисовки изображения, использует convert() для получения данных из fifo
def showTrack(i):
x1, y1 = fm.ellipseCreate(20+5*i, 30, 50, 70-3*i, 0.05*i)
im.ellipse1.setData(x1, y1)
x2, y2 = fm.ellipseCreate(40-5*i, 20, 40, 80+3*i, 0.07*i)
im.ellipse2.setData(x2, y2)
tx, ty = fm.ellipseCross(x1, y1, x2, y2)
'''
tx = np.zeros((1))
ty = np.zeros((1))
tx[0] = 10 + i*5
ty[0] = 20 + i*6
'''
fm.plot2track(tx, ty)
im.cross.setData(tx, ty)
def showCfar(i):
data_target = fm.getFifoCfar()
print("\tdata_target", data_target.shape)
data_target[:, im.FNSAMPLES/2] = 1
im.img1.setImage(data_target)
im.img2.setImage(data_target)
def showAf(i):
data_af = fm.getFifoAf(2*im.AF_SIZE)
print("\tdata_af", data_af.shape)
#Z = np.sin(i*im.d_gl) / im.d2_gl
#im.plot_gl.setData(z=Z)
im.plot_gl.setData(z=data_af)
def updateData():
global i
showTrack(i)
#showCfar(i)
if im.AF_UDP == 1:
print("[%d]" % (i))
showAf(i)
# программирование таймера
# запуск этой же функции снова через 1 сек
#im.QtCore.QTimer.singleShot(0.1, updateData)
if i < im.frames - 1:
i += 1
t.start(100)
i = 0
t = QtCore.QTimer()
t.timeout.connect(updateData)
t.setSingleShot(True)
t.start(0)
if __name__ == '__main__':
print("Start")
# запуск в первый раз чтобы подхватить таймер
#updateData()
## Start Qt event loop unless running in interactive mode.
import sys
if (sys.flags.interactive != 1) or not hasattr(mi.QtCore, 'PYQT_VERSION'):
im.QtGui.QApplication.instance().exec_()
im.fifo.close()
exit(0)
'''
'''
| NazimAliev/public | embedded/passive-radar-fpga-dsp-arm/multix86/main_multi.py | main_multi.py | py | 1,879 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "func_multi.ellipseCreate",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "init_multi.ellipse1.setData",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "init_multi.ellipse1",
"line_number": 14,
"usage_type": "attribute"
},
{
"api... |
22094537095 | import logging
import datetime
import sqlite3
import voluptuous as vol
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'hasentinel'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
'entities': vol.All(cv.ensure_list, [{
vol.Required('entity_id'): cv.entity_id,
vol.Required('urgency'): cv.string
}])
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the HASentinel component."""
conf = config[DOMAIN]
entities = conf.get('entities')
urgency_to_minutes = {
'low': 7*24*60, # 1 week
'medium': 48*60, # 48 hours
'high': 60 # 1 hour (For testing purposes, change this back to 24*60 for production)
}
conn = sqlite3.connect('/config/hasentinel.db')
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS entity_states (
entity_id TEXT PRIMARY KEY,
device_id TEXT,
last_seen TEXT,
urgency_level TEXT,
reported INTEGER
)
''')
def check_entities(now):
"""Routine to check entities' availability."""
for entity in entities:
entity_id = entity['entity_id']
urgency = entity['urgency']
state = hass.states.get(entity_id)
if not state:
_LOGGER.error(f"Error fetching state for {entity_id}")
continue
last_seen_attribute = state.attributes.get("last_seen")
current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
cursor.execute("SELECT last_seen, reported FROM entity_states WHERE entity_id = ?", (entity_id,))
record = cursor.fetchone()
if last_seen_attribute:
last_seen_dt = datetime.datetime.fromisoformat(last_seen_attribute.replace('Z', '+00:00'))
elif record:
last_seen_dt = datetime.datetime.strptime(record[0], '%Y-%m-%d %H:%M:%S')
else:
last_seen_dt = datetime.datetime.now()
delta = datetime.datetime.now() - last_seen_dt
if state.state != "unavailable" or (last_seen_attribute and delta.total_seconds() <= urgency_to_minutes[urgency] * 60):
if record:
cursor.execute("UPDATE entity_states SET last_seen = ?, reported = 0 WHERE entity_id = ?", (current_time, entity_id))
else:
cursor.execute("INSERT INTO entity_states (entity_id, device_id, last_seen, urgency_level, reported) VALUES (?, ?, ?, ?, 0)",
(entity_id, state.attributes.get("device_id", ""), current_time, urgency))
else:
if record and delta.total_seconds() > urgency_to_minutes[urgency] * 60 and record[1] == 0:
cursor.execute("UPDATE entity_states SET reported = 1 WHERE entity_id = ?", (entity_id,))
conn.commit()
# Set up the routine to run every minute
async_track_time_interval(hass, check_entities, datetime.timedelta(minutes=1))
return True
| dennis-bell/HASentinel | custom_components/hasentinel/__init__.py | __init__.py | py | 3,269 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "voluptuous.Schema",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "voluptuous.Schema",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "voluptuous.All",... |
72532883709 | import os
from pathlib import Path
import s4l_v1
import s4l_v1.analysis.viewers as viewers
import s4l_v1.document as document
import s4l_v1.model as model
import s4l_v1.simulation.emfdtd as fdtd
import s4l_v1.units as units
from dotenv import load_dotenv
from osparc_isolve_api import run_simulation
from s4l_v1._api.application import get_app_safe, run_application
from s4l_v1._api.simwrappers import ApiSimulation
from s4l_v1.model import Vec3
load_dotenv()
HOST = os.environ.get("OSPARC_API_URL", "http://127.0.0.1:8006")
KEY = os.environ["OSPARC_API_KEY"]
SECRET = os.environ["OSPARC_API_SECRET"]
def create_model():
wire = model.CreateWireBlock(
p0=Vec3(0, 0, 0), p1=Vec3(100, 100, 100), parametrized=True
)
wire.Name = "Plane Wave Source"
def create_simulation() -> ApiSimulation:
# retrieve needed entities from model
entities = model.AllEntities()
source_box = entities["Plane Wave Source"]
sim = fdtd.Simulation()
sim.Name = "Plane Wave Simulation"
sim.SetupSettings.SimulationTime = 10.0, units.Periods
# Materials:
# No materials
# Sources
planesrc_settings = sim.AddPlaneWaveSourceSettings(source_box)
options = planesrc_settings.ExcitationType.enum
planesrc_settings.ExcitationType = options.Harmonic
planesrc_settings.CenterFrequency = 1.0, units.GHz
# Sensors
# Only using overall field sensor
# Boundary Conditions
options = sim.GlobalBoundarySettings.GlobalBoundaryType.enum
sim.GlobalBoundarySettings.GlobalBoundaryType = options.UpmlCpml
# Grid
manual_grid_settings = sim.AddManualGridSettings([source_box])
manual_grid_settings.MaxStep = (9.0,) * 3 # model units
manual_grid_settings.Resolution = (2.0,) * 3 # model units
# Voxels
auto_voxel_settings = sim.AddAutomaticVoxelerSettings(source_box)
# Solver settings
options = sim.SolverSettings.Kernel.enum
# sim.SolverSettings.Kernel = options.Software
sim.SolverSettings.Kernel = options.Cuda
# FIXME: This does not work. WHY??? sim.SolverSettings.Kernel = options.AXware
return sim
def analyze_simulation(sim):
# Create extractor for a given simulation output file
results = sim.Results()
# overall field sensor
overall_field_sensor = results["Overall Field"]
# Create a slice viewer for the E field
slice_field_viewer_efield = viewers.SliceFieldViewer()
slice_field_viewer_efield.Inputs[0].Connect(overall_field_sensor["EM E(x,y,z,f0)"])
slice_field_viewer_efield.Data.Mode = (
slice_field_viewer_efield.Data.Mode.enum.QuantityRealPart
)
slice_field_viewer_efield.Data.Component = (
slice_field_viewer_efield.Data.Component.enum.Component0
)
slice_field_viewer_efield.Slice.Plane = (
slice_field_viewer_efield.Slice.Plane.enum.YZ
)
slice_field_viewer_efield.Update(0)
slice_field_viewer_efield.GotoMaxSlice()
document.AllAlgorithms.Add(slice_field_viewer_efield)
def setup_simulation(smash_path: Path) -> ApiSimulation:
s4l_v1.document.New()
create_model()
sim = create_simulation()
s4l_v1.document.AllSimulations.Add(sim)
sim.UpdateGrid()
sim.CreateVoxels(str(smash_path))
sim.WriteInputFile()
return sim
def run(smash_path: Path):
sim = setup_simulation(smash_path)
# run using specific version
# run_simulation(sim, isolve_version="2.0.79", host=HOST, api_key=KEY, api_secret=SECRET)
# run using latest version
run_simulation(sim, host=HOST, api_key=KEY, api_secret=SECRET)
analyze_simulation(sim)
def main():
if get_app_safe() is None:
run_application()
project_dir = Path()
filename = "em_fdtd_simulation.smash"
smash_path = project_dir / filename
run(smash_path)
if __name__ == "__main__":
main()
| ITISFoundation/osparc-simcore | tests/public-api/examples/s4l_tutorial.py | s4l_tutorial.py | py | 3,834 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"l... |
8665336110 | from selenium import webdriver
import time
from selenium.webdriver.common.action_chains import ActionChains
browser = webdriver.Chrome()
browser.get("http://www.baidu.com/")
browser.maximize_window()
time.sleep(3)
browser.find_element_by_id("kw").send_keys("哈哈")
# 定位百度一下按钮
name = browser.find_element_by_id("su")
# 右击
# ActionChains(browser).context_click(name).perform()
# 双击
ActionChains(browser).double_click(name).perform()
time.sleep(3)
| Zshuangshuang/Reload | 2021_03_12自动化学习/src2021_03_12/testDemo12.py | testDemo12.py | py | 472 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "selenium.webdriv... |
27061966352 | __all__ = [
"InvalidPaddingError",
"find_potential_ecb",
"pad_pkcs_7",
"strip_pkcs_7",
"detect_potential_repeating_ecb_blocks",
"ecb_encrypt",
"cbc_encrypt_prepadded",
"ecb_decrypt",
"cbc_encrypt",
"cbc_decrypt",
"ctr_transcrypt"
]
# noinspection PyPackageRequirements
# false alert, is in requirements as pycryptodome
from Crypto.Cipher import AES
from bitfiddle import brake_into_keysize_blocks
from primitive_crypt import xor_buffers
def detect_potential_repeating_ecb_blocks(ciphertext, blocksize=16):
seen = set()
for block in brake_into_keysize_blocks(ciphertext, blocksize):
if block in seen:
return True
else:
seen.add(block)
return False
def find_potential_ecb(cyphertexts):
for cyphertext in cyphertexts:
if detect_potential_repeating_ecb_blocks(cyphertext):
return cyphertext
return None
def pad_pkcs_7(blob, blocksize):
num_pad_bytes = blocksize - (len(blob) % blocksize)
padding = bytes([num_pad_bytes] * num_pad_bytes)
return blob + padding
class InvalidPaddingError(ValueError):
pass
def strip_pkcs_7(blob):
length = len(blob)
if length == 0:
raise InvalidPaddingError()
num_padding = blob[-1]
if num_padding == 0 or length < num_padding:
raise InvalidPaddingError()
for byte in blob[-num_padding:]:
if byte != num_padding:
raise InvalidPaddingError()
return blob[:-num_padding]
def ecb_encrypt(key, plaintext):
cipher = AES.new(key, AES.MODE_ECB)
input_blob = pad_pkcs_7(plaintext, 16)
return cipher.encrypt(input_blob)
def ecb_decrypt(key, ciphertext):
cipher = AES.new(key, AES.MODE_ECB)
decrypted = cipher.decrypt(ciphertext)
return strip_pkcs_7(decrypted)
def cbc_encrypt_prepadded(key, iv, plaintext):
blocks = brake_into_keysize_blocks(plaintext, 16)
cipher = AES.new(key, AES.MODE_ECB)
def cryptoblocks():
last_block = iv
for block in blocks:
chained = xor_buffers(last_block, block)
last_block = cipher.encrypt(chained)
yield last_block
return b''.join([cb for cb in cryptoblocks()])
def cbc_encrypt(key, iv, plaintext):
return cbc_encrypt_prepadded(key, iv, pad_pkcs_7(plaintext, 16))
def cbc_decrypt(key, iv, ciphertext):
assert len(ciphertext) % 16 == 0
blocks = brake_into_keysize_blocks(ciphertext, 16)
cipher = AES.new(key, AES.MODE_ECB)
def plainblocks():
last_block = iv
for block in blocks:
decrypted_block = cipher.decrypt(block)
plain_block = xor_buffers(last_block, decrypted_block)
last_block = block
yield plain_block
return strip_pkcs_7(b''.join(pb for pb in plainblocks()))
def ctr_keystream(key, nonce, block_count):
if nonce < 0 or nonce > 2**64 or block_count < 0 or block_count > 2**64:
raise ValueError()
plain_nonce = nonce.to_bytes(8, byteorder="little", signed=False)
plain_count = block_count.to_bytes(8, byteorder="little", signed=False)
plain = plain_nonce + plain_count
cipher = AES.new(key, AES.MODE_ECB)
return cipher.encrypt(plain)
def ctr_transcrypt(key, nonce, data):
instream = brake_into_keysize_blocks(data, 16)
num_blocks = len(instream)
if num_blocks == 0:
return b''
keystream = [ctr_keystream(key, nonce, i) for i in range(num_blocks)]
keystream[-1] = keystream[-1][:len(instream[-1])]
outstream = [xor_buffers(instream[i], keystream[i])
for i in range(num_blocks)]
return b''.join(outstream)
| BrendanCoughlan/cryptopals | block_crypt.py | block_crypt.py | py | 3,652 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "bitfiddle.brake_into_keysize_blocks",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.AES.new",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.AES",
"line_number": 64,
"usage_type": "name"
},
{
"api_n... |
34839802828 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 21 01:32:51 2021
@author: vidhy
"""
from fastapi import FastAPI, File, UploadFile, Request
import uvicorn
import numpy as np
from io import BytesIO
from PIL import Image
import requests
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import RedirectResponse
app = FastAPI()
origins = [
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# uvicorn main:app --reload
# install docker
# docker pull tensorflow/serving
# cd D:/deep-learning/
# docker run -t --rm -p 8501:8501 -v D:/deep-learning/potato-disease-classification:/potato-disease-classification tensorflow/serving --rest_api_port=8501 --model_config_file=/potato-disease-classification/models.config
endpoint = "http://localhost:8501/v1/models/potatoes_model:predict"
CLASS_NAMES = ['Early Blight', 'Late Blight', 'Healthy']
@app.get("/ping")
async def ping():
return "Hello, I'm alive"
@app.get("/", include_in_schema=False)
async def index():
return RedirectResponse(url="/docs")
def read_file_as_image(data) -> np.ndarray:
# bytes = await file.read()
image = np.array(Image.open(BytesIO(data)))
return image
@app.post("/predict")
async def predict(
file: UploadFile = File(...)
):
image = read_file_as_image(await file.read())
img_batch = np.expand_dims(image, 0)
json_data = {
"instances": img_batch.tolist()
}
response = requests.post(endpoint, json=json_data)
prediction = response.json()["predictions"][0]
predicted_class = CLASS_NAMES[np.argmax(prediction)]
confidence = np.max(prediction)
return {
'class': predicted_class,
'confidence': float(confidence)
}
if __name__ == "__main__":
uvicorn.run(app, host='localhost', port=8000)
| VidhyaGupta/Potato-Disease-Classification | api/main-tf-serving.py | main-tf-serving.py | py | 1,938 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "starlette.middleware.cors.CORSMiddleware",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "starlette.responses.RedirectResponse",
"line_number": 49,
"usage_type": "cal... |
37857057401 | #! /usr/bin/env python3
from uuid import uuid4 as uuid
import argparse
import pika
from protos import events
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument("--exchange", required=True)
p.add_argument("--pretty-print", default=False, action='store_true')
args = p.parse_args()
conn = pika.BlockingConnection(
pika.ConnectionParameters('localhost')
)
channel = conn.channel()
channel.exchange_declare(exchange=args.exchange, exchange_type='fanout')
queue_name = f"manual_{uuid().hex}"
queue = channel.queue_declare(queue=queue_name, exclusive=True)
channel.queue_bind(exchange=args.exchange, queue=queue.method.queue)
def print_message(ch, method, properties, body):
event: events.Event = events.Event().FromString(body)
print(event)
channel.basic_consume(queue=queue_name, auto_ack=True, on_message_callback=print_message)
channel.start_consuming()
| cjmcgraw/studious-carnival | rabbitmq/read-from-exchange.py | read-from-exchange.py | py | 959 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pika.BlockingConnection",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pika.ConnectionParameters",
"line_number": 16,
"usage_type": "call"
},
{
"api_nam... |
25231399833 | #! /usr/bin/env python
# encoding: utf-8
# vim: ai ts=4 sts=4 et sw=4
##
##
## @author Nadia
## nadia@gmail.com/joel@gmail.com
##
from coreapp.appmodel.models import CrecheParent, CrecheChild, PARENT_CHILD_RELATION
from coreapp.service.base_service import BaseService
from coreapp.service.child_service import ChildService, GENDER, CHILD_CLASSES
from coreapp.exception.critical_error import CriticalError
from django.db.models import Q, Max
from django.utils.datetime_safe import datetime
class ParentService(BaseService):
def __init__(self):
BaseService.__init__(self)
def list(self, params):
sortLimitParams = self.setSortLimitParameters(params)
filterObj = Q()
if params.get('searchName'):
filterObj = filterObj & Q(names__icontains=params.get('searchName'))
if params.get('searchParentId'):
filterObj = filterObj & Q(id=params.get('searchCParentId'))
if params.get('searchDateCreated'):
filterObj = filterObj & Q(
date_created__gte=datetime.strptime(params.get('searchDateCreated') + ' 00:00:59',
'%Y-%m-%d %H:%M:%S'))
filterObj = filterObj & Q(
date_created__lte=datetime.strptime(params.get('searchDateCreated') + ' 23:59:59',
'%Y-%m-%d %H:%M:%S'))
if params.get('searchTelephone'):
filterObj = filterObj & Q(telephone = params.get('searchTelephone'))
if params.get('searchIDNO'):
filterObj = filterObj & Q(identity_document = params.get('searchIDNO'))
if params.get('searchEmail'):
filterObj = filterObj & Q(email = params.get('searchEmail'))
result = CrecheParent.objects.filter(filterObj).order_by(sortLimitParams['dir'] + sortLimitParams['sort'])[
sortLimitParams['start']: sortLimitParams['limit']]
count = CrecheParent.objects.filter(filterObj).count()
records = []
for item in result:
record = {}
record['id'] = item.id
record['telephone'] = item.telephone.encode('utf-8')
record['id_number'] = item.identity_document.encode('utf-8')
record['date_created'] = item.date_created.isoformat()
record['children'] = [ {"names": ch.names, "regno": ch.regno, "id": ch.id} for ch in item.children.all()]
record['address'] = item.full_address.encode('utf-8')
record['email'] = item.email.encode('utf-8')
record['names'] = item.names
record['relationship'] = item.relationship.encode('utf-8')
records.append(record)
return {'totalCount': count, 'records': records}
def listExport(self, params):
"""Export the applicant data"""
records = self.list(params)
return self.decodeDataToExport(records, params.get('exportColumns'))
def save_parent(self, postValues):
"""
we assume we will not register a child without a parent, and a parent without a child
:param postValues:
:return:
"""
parent = None
params = postValues.copy()
if params.get('parent_names'):
try:
parent = CrecheParent.objects.get(id = params.get('id_number'))
parent.names = params.get('parent_names')
parent.telephone = params.get('telephone')
parent.identity_number = params.get('id_number')
parent.relationship = params.get('relationship')
parent.full_address = params.get('full_address')
parent.email = params.get('email'),
parent.last_updated = datetime.now()
except CrecheParent.DoesNotExist:
parent = CrecheParent( names = params.get('parent_names'),
telephone = params.get('telephone'),
identity_number = params.get('id_number'),
relationship=params.get('relationship'),
full_address=params.get('full_address'),
email=params.get('email'),
date_created=datetime.now(),
last_updated = datetime.now()
)
try:
parent.save()
except Exception:
raise CriticalError({'message': "Unkwon Error while saving parent '" + params.get("parent_names") + "'. Try again or contact system admin "})
return parent
def save_parent_child(self, postValues):
"""
we assume we will not register a child without a parent, and a parent without a child
:param postValues:
:return:
"""
parent = None
child = None
params = postValues.copy()
if params.get('parent_names'):
try:
parent = CrecheParent.objects.get(id = params.get('id_number'))
parent.names = params.get('parent_names')
parent.telephone = params.get('telephone')
parent.identity_document = params.get('id_number')
parent.relationship = params.get('relationship')
parent.full_address = params.get('full_address')
parent.email = params.get('email'),
parent.last_updated = datetime.now()
except CrecheParent.DoesNotExist:
parent = CrecheParent( names = params.get('parent_names'),
telephone = params.get('telephone'),
identity_document = params.get('id_number'),
relationship=params.get('relationship'),
full_address=params.get('full_address'),
email=params.get('email'),
date_created=datetime.now(),
last_updated = datetime.now()
)
try:
child_service = ChildService()
child = child_service.save_child(postValues)
print("CHILD : ", child.__dict__)
if child:
parent.save()
parent.children.add(child)
#parent.save()
else:
raise CriticalError({'message': "The child '" + params.get(
'child_names') + "' of parent '" + params.get("parent_names") + "' was not saved. Try again "})
except Exception as e:
try:
child.delete()
parent.delete()
except Exception as ex:
print("ERROR ROLLING BACK", ex)
print("PARENT CHILD ERROR ", e)
raise CriticalError({'message': "Unkwon Error while saving child '" + params.get(
'child_names') + "' of parent '" + params.get("parent_names") + "'. Try again or contact system admin "})
return parent, child
def add_child(self, parentObj, child_id = None, regno = None):
if child_id:
parentObj.children.add(CrecheChild.objects.get(id= child_id))
if regno:
parentObj.children.add(CrecheChild.objects.get(regno=regno))
parentObj.save()
return parentObj
| projet2019/Creche_Parentale | creche/coreapp/service/parent_service.py | parent_service.py | py | 7,616 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "coreapp.service.base_service.BaseService",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "coreapp.service.base_service.BaseService.__init__",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "coreapp.service.base_service.BaseService",
"line_n... |
29210359176 | import streamlit as st
from PIL import Image
import numpy as np
def app():
display = Image.open('./Attendance-management.jpg')
display = np.array(display)
st.image(display)
st.markdown(""" <style> .font {
font-size:20px ; font-family: 'Cooper Black'; text-align: center; color: #000000;}
</style> """, unsafe_allow_html=True)
st.markdown('<h1 class="font">Made With ❤️ By Debasish</h1>', unsafe_allow_html=True)
| dest-royer02/Attendance_Application | pages/welcomePage.py | welcomePage.py | py | 468 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "streamlit.image",
"line_number"... |
2026763119 | import hashlib
import os
import shutil
import zipfile
import numpy as np
def extract_aab(aab_file, extract_dir):
"""
解压aab文件到指定目录
:param aab_file: aab文件路径
:param extract_dir: 解压目录
"""
with zipfile.ZipFile(aab_file, 'r') as z:
print(extract_dir)
z.extractall(extract_dir)
def get_aab_feature(aab_dir):
"""
提取aab文件的特征
:param aab_dir: aab文件解压后的目录
:return: 特征向量
"""
feature = []
print(aab_dir)
for root, dirs, files in os.walk(aab_dir):
for file in files:
file_path = os.path.join(root, file)
with open(file_path, 'rb') as f:
# 读取文件的前16个字节作为特征
feature.append(f.read(16))
if feature:
feature = np.vstack(feature)
print(feature)
else:
feature = np.zeros((0,), dtype=np.uint8)
return feature
def compare_aab_features(feature1, feature2):
"""
比较两个aab文件的特征
:param feature1: 第一个aab文件的特征向量
:param feature2: 第二个aab文件的特征向量
:return: 相似度分数,范围在0到1之间
"""
# 计算两个特征向量的哈希值
hash1 = hash(feature1.tobytes())
hash2 = hash(feature2.tobytes())
print("hash1: ", hash1)
print("hash2: ", hash2)
# 比较两个哈希值的汉明距离,返回相似度分数
hamming_distance = bin(hash1 ^ hash2).count('1')
print("hamming distance: ", hamming_distance)
similarity = 1 - hamming_distance / max(feature1.size * 8, feature2.size * 8)
return similarity
def compare_aab_files(aab_file_path1, aab_file_path2):
"""
比较两个aab文件的相似度
:param aab_file_path1: 第一个aab文件路径
:param aab_file_path2: 第二个aab文件路径
:return: 相似度分数,范围在0到1之间
"""
try:
# 解压第一个aab文件到临时目录
aab_dir1 = 'tmp1'
extract_aab(aab_file_path1, aab_dir1)
feature1 = get_aab_feature(aab_dir1)
shutil.rmtree(aab_dir1)
# 解压第二个aab文件到临时目录
aab_dir2 = 'tmp2'
extract_aab(aab_file_path2, aab_dir2)
feature2 = get_aab_feature(aab_dir2)
shutil.rmtree(aab_dir2)
# 比较两个aab文件的特征
similarity = compare_aab_features(feature1, feature2)
return similarity
except (IOError, zipfile.BadZipFile, KeyError) as e:
# 处理文件读写异常、文件格式错误等异常情况
print(f"Error: {str(e)}")
if __name__ == '__main__':
print(compare_aab_files("a.aab","b.aab"))
| Nienter/mypy | personal/aabcom.py | aabcom.py | py | 2,731 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "zipfile.ZipFile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,... |
6581285508 | from django.http import HttpResponse
from django.http import HttpResponse
import requests
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
import uuid
reference_id = uuid.uuid4()
def login(request):
url = "https://test.cashfree.com/api/v1/order/create"
payload = {
"appid": "TEST3931154d6e90b54bfbc3b4946d511393",
"secretKey": "TEST701a10a8d7389d719903c77dda9fa993fbc0db63",
"orderId": reference_id,
"orderAmount": "1",
"orderCurrency": "INR",
"oderNote": "pay",
"customerName": "mohan",
"customerEmail": "abcd@gmail.com",
"customerPhone": "8494863493",
# "returnUrl": "https://cashfree.com",
}
headers={
'content_type':'application/json'
}
response = requests.request("POST", url, data=payload,headers=headers)
print(response.text)
return render(request,'home.html',{"response":response.text})
def payment_info(request):
print(request.data)
if request.method == 'POST':
# Fetch the payment response details from the request
order_id = request.POST.get('order_id')
payment_status = request.POST.get('payment_status')
print(order_id)
print(payment_status)
return None
| Gunarjith/verceldeploy | masterlink/views.py | views.py | py | 1,350 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "uuid.uuid4",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 35,
"usage_type": "call"
}
] |
36895861215 | from __future__ import print_function
from builtins import chr
from builtins import zip
from builtins import map
from builtins import str
from builtins import filter
from builtins import range
from builtins import object
import getopt
import gzip
import locale
import operator
import os
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import traceback
import zlib
from functools import reduce
from glob import glob
import cfvtest
if hasattr(locale, 'getpreferredencoding'):
preferredencoding = locale.getpreferredencoding() or 'ascii'
else:
preferredencoding = 'ascii'
def is_encodable(s, enc=preferredencoding):
try:
s.encode(enc)
return True
except UnicodeError:
return False
fmt_info = {
# name:
# (hascrc, hassize, cancreate, available, istext, preferredencoding, iscoreutils)
'sha512':
(1, 0, 1, 1, 1, preferredencoding, 1),
'sha384':
(1, 0, 1, 1, 1, preferredencoding, 1),
'sha256':
(1, 0, 1, 1, 1, preferredencoding, 1),
'sha224':
(1, 0, 1, 1, 1, preferredencoding, 1),
'sha1':
(1, 0, 1, 1, 1, preferredencoding, 1),
'md5':
(1, 0, 1, 1, 1, preferredencoding, 1),
'bsdmd5':
(1, 0, 1, 1, 1, preferredencoding, 0),
'sfv':
(1, 0, 1, 1, 1, preferredencoding, 0),
'sfvmd5':
(1, 0, 1, 1, 1, preferredencoding, 0),
'csv':
(1, 1, 1, 1, 1, preferredencoding, 0),
'csv2':
(0, 1, 1, 1, 1, preferredencoding, 0),
'csv4':
(1, 1, 1, 1, 1, preferredencoding, 0),
'crc':
(1, 1, 1, 1, 1, preferredencoding, 0),
'par':
(1, 1, 0, 1, 0, 'utf-16-le', 0),
'par2':
(1, 1, 0, 1, 0, preferredencoding, 0),
'torrent':
(1, 1, 1, 1, 0, 'utf-8', 0),
}
def fmt_hascrc(f):
return fmt_info[f][0]
def fmt_hassize(f):
return fmt_info[f][1]
def fmt_cancreate(f):
return fmt_info[f][2]
def fmt_available(f):
return fmt_info[f][3]
def fmt_istext(f):
return fmt_info[f][4]
def fmt_preferredencoding(f):
return fmt_info[f][5]
def fmt_iscoreutils(f):
return fmt_info[f][6]
def allfmts():
return list(fmt_info.keys())
def allavailablefmts():
return list(filter(fmt_available, allfmts()))
def allcreatablefmts():
return list(filter(fmt_cancreate, allavailablefmts()))
def coreutilsfmts():
return list(filter(fmt_iscoreutils, allfmts()))
class rcurry(object):
def __init__(self, func, *args, **kw):
self.curry_func = func
self.curry_args = args
self.curry_kw = kw
def __call__(self, *_args, **_kwargs):
kw = self.curry_kw.copy()
kw.update(_kwargs)
return self.curry_func(*(_args + self.curry_args), **kw)
def pathfind(p, path=os.environ.get('PATH', os.defpath).split(os.pathsep)):
for d in path:
if os.path.exists(os.path.join(d, p)):
return 1
def pathjoin_and_mkdir(*components):
"""Join components of a filename together and create directory to contain the file, if needed.
"""
result = os.path.join(*components)
path = os.path.split(result)[0]
if not os.path.exists(path):
os.makedirs(path)
return result
def readfile(fn, textmode=False):
if textmode:
mode = 't'
else:
mode = 'b'
with open(fn, 'r' + mode) as f:
d = f.read()
return d
def writefile(fn, data):
with open(fn, 'wb') as f:
if data:
f.write(data)
def writefile_and_reopen(fn, data):
"""Write data to file, close, and then reopen readonly, and return the fd.
This is for the benefit of windows, where you need to close and reopen the
file as readonly in order for it to be openable simultaneously.
"""
writefile(fn, data)
f = open(fn, 'rb')
return f
class stats(object):
ok = 0
failed = 0
def logr(text):
logfile.write(text)
def log(text):
logr(text + '\n')
def test_log_start(cmd, kw):
log('*** testing ' + cmd + (kw and ' ' + str(kw) or ''))
def test_log_finish(cmd, s, r, output, kw):
if r:
stats.failed += 1
print('\n>>> failed test:', cmd, (kw and ' ' + str(kw) or ''))
if output is not None:
print(output)
result = 'FAILED'
if not isinstance(r, int) or r != 1:
result += ' (%s)' % r
else:
stats.ok += 1
sys.stdout.write('.')
sys.stdout.flush()
result = 'OK'
result_str = '%s (%s)' % (result, s)
log(result_str)
if r:
print(result_str)
traceback_str = '\n'.join(traceback.format_stack())
log(traceback_str)
print(traceback_str)
if run_exit_early:
sys.exit(1)
log('')
def test_log_results(cmd, s, o, r, kw):
"""
cmd=command being tested (info only)
s=return status
o=output
r=result (false=ok, anything else=fail (anything other than 1 will be printed))
"""
test_log_start(cmd, kw)
log(o)
test_log_finish(cmd, s, r, o, kw)
def test_external(cmd, test):
# TODO: replace this with subprocess
from subprocess import getstatusoutput
s, o = getstatusoutput(cmd)
r = test(s, o)
test_log_results(cmd, s, o, r, None)
def test_generic(cmd, test, **kw):
# s, o = cfvtest.runcfv(cmd)
s, o = cfvtest.runcfv(*(cmd,), **kw)
r = test(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd, s, o, r, kw)
class cst_err(Exception):
pass
def cfv_stdin_test(cmd, file):
s1 = s2 = None
o1 = o2 = ''
r = 0
try:
s1, o1 = cfvtest.runcfv(cmd + ' ' + file)
if s1:
raise cst_err(2)
s2, o2 = cfvtest.runcfv(cmd + ' -', stdin=file)
if s2:
raise cst_err(3)
x = re.search(r'^([^\r\n]*)' + re.escape(file) + r'(.*)$[\r\n]{0,2}^-: (\d+) files, (\d+) OK. [\d.]+ seconds, [\d.]+K(/s)?$', o1, re.M | re.DOTALL)
if not x:
raise cst_err(4)
x2 = re.search(r'^' + re.escape(x.group(1)) + r'[\t ]*' + re.escape(x.group(2)) + r'$[\r\n]{0,2}^-: (\d+) files, (\d+) OK. [\d.]+ seconds, [\d.]+K(/s)?$', o2, re.M)
if not x2:
raise cst_err(5)
except cst_err as er:
r = er
test_log_results('stdin/out of ' + cmd + ' with file ' + file, (s1, s2), o1 + '\n' + o2, r, None)
def cfv_stdin_progress_test(t, file):
s1 = s2 = None
o1 = o2 = c1 = c2 = ''
r = 0
dir = tempfile.mkdtemp()
try:
try:
cf1 = os.path.join(dir, 'cf1.' + t)
cf2 = os.path.join(dir, 'cf2.' + t)
s1, o1 = cfvtest.runcfv('%s --progress=yes -C -t %s -f %s %s' % (cfvcmd, t, cf1, file))
if s1:
raise cst_err(2)
s2, o2 = cfvtest.runcfv('%s --progress=yes -C -t %s -f %s -' % (cfvcmd, t, cf2), stdin=file)
if s2:
raise cst_err(3)
if t != 'csv2': # csv2 has only filesize, hence checksum never happens, so no progress
x = re.match(re.escape(file) + r' : (\.{20}[-\b.#\\|/]*)[ \r\n]+' + '\x1b\\[K' + re.escape(cf1) + r': (\d+) files, (\d+) OK. [\d.]+ seconds, [\d.]+K(/s)?$', o1, re.M | re.DOTALL)
if not x:
raise cst_err(4)
x2 = re.match(r' : (\.[-\b.#/|\\]*)[\t ]*[ \r\n]+' + '\x1b\\[K' + re.escape(cf2) + r': (\d+) files, (\d+) OK. [\d.]+ seconds, [\d.]+K(/s)?$', o2, re.M)
if not x2:
raise cst_err(5)
if t == 'crc':
c1 = readfile(cf1, textmode=True).replace(file, ' ' * len(file))
else:
c1 = readfile(cf1, textmode=True).replace(file, '')
c2 = readfile(cf2, textmode=True)
c1 = remove_varying_comments(t, c1)
c2 = remove_varying_comments(t, c2)
if c1 != c2:
raise cst_err(6)
except cst_err as er:
r = er
test_log_results('progress=yes stdin/out of ' + t + ' with file ' + file, (s1, s2), o1 + '\n' + o2 + '\n--\n' + c1 + '\n' + c2, r, None)
finally:
shutil.rmtree(dir)
def rx_test(pat, str):
if re.search(pat, str):
return 0
return 1
def status_test(s, o, expected=0):
if s == expected:
return 0
return 1
rx_Begin = r'^(?:.* )?(\d+) files, (\d+) OK'
rx_unv = r', (\d+) unverified'
rx_notfound = r', (\d+) not found'
rx_ferror = r', (\d+) file errors'
rx_bad = r', (\d+) bad(crc|size)'
rx_badcrc = r', (\d+) badcrc'
rx_badsize = r', (\d+) badsize'
rx_cferror = r', (\d+) chksum file errors'
rx_misnamed = r', (\d+) misnamed'
rx_End = r'(, \d+ differing cases)?(, \d+ quoted filenames)?. [\d.]+ seconds, [\d.]+K(/s)?$'
rxo_TestingFrom = re.compile(r'^testing from .* \((.+?)\b.*\)[\n\r]*$', re.M)
def optionalize(s):
return '(?:%s)?' % s
rx_StatusLine = rx_Begin + ''.join(map(optionalize, [rx_badcrc, rx_badsize, rx_notfound, rx_ferror, rx_unv, rx_cferror, rx_misnamed])) + rx_End
class OneOf(object):
def __init__(self, *possibilities):
self.possible = possibilities
def __eq__(self, a):
return a in self.possible
def __repr__(self):
return 'OneOf' + repr(self.possible)
def intize(s):
return s and int(s) or 0
def icomp(foo):
exp, act = foo
if exp == -1:
return False
return exp != act
def tail(s):
# the last line might not be what we want, since stdout and stderr can get mixed up in some cases.
# return string.split(s,'\n')[-1]
lines = s.splitlines()
lines.reverse()
for line in lines:
if re.search(rx_StatusLine, line):
return line
return ''
re_sfv_comment = re.compile('^; Generated by .* on .*$', re.M | re.I)
re_crc_comment = re.compile('^Generated at: .*$', re.M | re.I)
def remove_varying_comments(t, text):
if t in ('sfv', 'sfvmd5'):
text = re_sfv_comment.sub('', text, 1)
elif t == 'crc':
text = re_crc_comment.sub('', text, 1)
return text
def cfv_test(s, o, op=operator.gt, opval=0):
x = re.search(rx_Begin + rx_End, tail(o))
if s == 0 and x and x.group(1) == x.group(2) and op(int(x.group(1)), opval):
return 0
return 1
def cfv_substatus_test(s, o, unv=0, notfound=0, badcrc=0, badsize=0, cferror=0, ferror=0):
expected_status = (badcrc and 2) | (badsize and 4) | (notfound and 8) | (ferror and 16) | (unv and 32) | (cferror and 64)
if s & expected_status == expected_status and not s & 1:
return 0
return 'bad status expected %s got %s' % (expected_status, s)
def cfv_status_test(s, o, unv=0, notfound=0, badcrc=0, badsize=0, cferror=0, ferror=0):
expected_status = (badcrc and 2) | (badsize and 4) | (notfound and 8) | (ferror and 16) | (unv and 32) | (cferror and 64)
if s == expected_status:
return 0
return 'bad status expected %s got %s' % (expected_status, s)
def cfv_all_test(s, o, files=-2, ok=0, unv=0, notfound=0, badcrc=0, badsize=0, cferror=0, ferror=0, misnamed=0):
x = re.search(rx_StatusLine, tail(o))
if x:
if files == -2:
files = reduce(operator.add, [ok, badcrc, badsize, notfound, ferror])
expected = [files, ok, badcrc, badsize, notfound, ferror, unv, cferror, misnamed]
actual = list(map(intize, x.groups()[:9]))
if not list(filter(icomp, zip(expected, actual))):
sresult = cfv_status_test(s, o, unv=unv, notfound=notfound, badcrc=badcrc, badsize=badsize, cferror=cferror, ferror=ferror)
if sresult:
return sresult
return 0
return 'expected %s got %s' % (expected, actual)
return 'status line not found in output'
def cfv_unv_test(s, o, unv=1):
x = re.search(rx_Begin + rx_unv + rx_End, tail(o))
if s != 0 and x and x.group(1) == x.group(2) and int(x.group(1)) > 0:
if unv and int(x.group(3)) != unv:
return 1
return 0
return 1
def cfv_unvonly_test(s, o, unv=1):
x = re.search(rx_Begin + rx_unv + rx_End, tail(o))
if s != 0 and x and int(x.group(3)) == unv:
return 0
return 1
def cfv_notfound_test(s, o, unv=1):
x = re.search(rx_Begin + rx_notfound + rx_End, tail(o))
if s != 0 and x and int(x.group(2)) == 0 and int(x.group(1)) > 0:
if int(x.group(3)) != unv:
return 1
return 0
return 1
def cfv_cferror_test(s, o, bad=1):
x = re.search(rx_Begin + rx_cferror + rx_End, tail(o))
if s != 0 and x and int(x.group(3)) > 0:
if bad > 0 and int(x.group(3)) != bad:
return 1
return 0
return 1
def cfv_bad_test(s, o, bad=-1):
x = re.search(rx_Begin + rx_bad + rx_End, tail(o))
if s != 0 and x and int(x.group(1)) > 0 and int(x.group(3)) > 0:
if bad > 0 and int(x.group(3)) != bad:
return 1
return 0
return 1
def cfv_typerestrict_test(s, o, t):
matches = rxo_TestingFrom.findall(o)
if not matches:
return 1
for match in matches:
if match != t:
return 1
return 0
def cfv_listdata_test(s, o):
if s == 0 and re.search('^data1\0data2\0data3\0data4\0$', o, re.I):
return 0
return 1
def joincurpath(f):
return os.path.join(os.getcwd(), f)
def cfv_listdata_abs_test(s, o):
if s == 0 and re.search('^' + re.escape('\0'.join(map(joincurpath, ['data1', 'data2', 'data3', 'data4']))) + '\0$', o, re.I):
return 0
return 1
def cfv_listdata_unv_test(s, o):
if s == 32 and re.search('^testfix.csv\0unchecked.dat\0$', o, re.I):
return 0
return 1
def cfv_listdata_bad_test(s, o):
if s & 6 and not s & ~6 and re.search('^(d2.)?test4.foo\0test.ext.end\0test2.foo\0test3\0$', o, re.I):
return 0
return 1
def cfv_version_test(s, o):
x = re.search(r'cfv v([\d.]+(?:\.dev\d+)?) -', o)
with open(os.path.join(cfvtest.testpath, os.pardir, 'Changelog'), 'rt') as f:
x3 = re.search(r' v([\d.]+(?:\.dev\d+)?):', f.readline())
if x:
log('cfv: ' + x.group(1))
if x3:
log('Changelog: ' + x3.group(1))
# if os.path.isdir(os.path.join(os.pardir, 'debian')):
# with open(os.path.join(os.pardir, 'debian', 'changelog'), 'rt') as f:
# x4 = re.search(r'cfv \(([\d.]+)-\d+\) ', f.readline())
# if x4:
# log('deb changelog: ' + x4.group(1))
# if not x or not x4 or x4.group(1) != x.group(1):
# return 1
if x and x3 and x.group(1) == x3.group(1):
return 0
return 1
def cfv_cftypehelp_test(s, o, expected):
if s != expected:
return 1
for tname in allfmts() + ['auto']:
if o.count(tname) < 1:
return 'type %s not found in output' % tname
return 0
def cfv_nooutput_test(s, o, expected=0):
if s != expected:
return 1
if o:
return 'output: %s' % (repr(o),)
return 0
def T_test(f, extra=None):
cmd = cfvcmd
if extra:
cmd += ' ' + extra
test_generic(cmd + ' -T -f test' + f, cfv_test)
test_generic(cmd + ' -i -T -f test' + f, cfv_test) # all tests should work with -i
test_generic(cmd + ' -m -T -f test' + f, cfv_test) # all tests should work with -m
test_generic(cmd + ' -T --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=n-r --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=n-a --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=a-a --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=2-a --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=y-r --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=y-a --list0=ok -f test' + f, cfv_listdata_abs_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=1-a --list0=ok -f test' + f, cfv_listdata_abs_test, stderr='/dev/null')
# ensure all verbose stuff goes to stderr:
test_generic(cmd + ' -v -T --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -v -T --list0=unverified -f test' + f + ' unchecked.dat testfix.csv data1', cfv_listdata_unv_test, stderr='/dev/null')
# test progress stuff.
def progress_test(s, o):
if cfv_test(s, o):
return 1
if o.find('.' * 10) < 0:
return 2
return 0
def noprogress_test(s, o):
if cfv_test(s, o):
return 1
if o.find('.' * 10) >= 0:
return 2
return 0
if f.endswith('.csv2'): # csv2 has only filesize, hence checksum never happens, so no progress
test_generic(cmd + ' -T --progress=yes -f test' + f, noprogress_test)
else:
# test handling of COLUMNS env var #TODO: should actually check that the value is being respected...
os.environ['COLUMNS'] = '40'
try:
test_generic(cmd + ' -T --progress=yes -f test' + f, progress_test)
os.environ['COLUMNS'] = 'foobar'
test_generic(cmd + ' -T --progress=yes -f test' + f, progress_test)
finally:
del os.environ['COLUMNS']
test_generic(cmd + ' -T --progress=yes -f test' + f, progress_test)
test_generic(cmd + ' -T --progress=auto -f test' + f, noprogress_test)
test_generic(cmd + ' -T --progress=no -f test' + f, noprogress_test)
def gzC_test(f, extra=None, verify=None, t=None, d=None):
cmd = cfvcmd
if not t:
t = f
tmpd = tempfile.mkdtemp()
try:
f2 = os.path.join(tmpd, 'test.C.' + f + '.tmp.gz')
f = os.path.join(tmpd, 'test.C.' + f + '.gz')
if extra:
cmd += ' ' + extra
test_generic('%s -q -C -t %s -zz -f - %s' % (cmd, t, d), status_test, stdout=f2)
test_generic('%s -C -f %s %s' % (cmd, f, d), cfv_test)
try:
with gzip.open(f, 'rt') as ifd1:
if1 = ifd1.read()
except (IOError, zlib.error) as e:
if1 = '%s: %s' % (f, e)
try:
with gzip.open(f2, 'rt') as ifd2:
if2 = ifd2.read()
except (IOError, zlib.error) as e:
if2 = '%s: %s' % (f2, e)
if1 = remove_varying_comments(t, if1)
if2 = remove_varying_comments(t, if2)
r = if1 != if2
if r:
o = 'FILE1 %s:\n%s\nFILE2 %s:\n%s\n' % (f, if1, f2, if2)
else:
o = ''
test_log_results('zcompare %s %s' % (f, f2), r, o, r, None)
test_generic('%s -T -f %s' % (cmd, f), cfv_test)
test_generic('%s -zz -T -f -' % cmd, cfv_test, stdin=f)
if verify:
verify(f)
finally:
shutil.rmtree(tmpd)
def C_test(f, extra=None, verify=None, t=None, d='data?'):
gzC_test(f, extra=extra, t=t, d=d)
cmd = cfvcmd
if not t:
t = f
cfv_stdin_test(cmd + ' -t' + f + ' -C -f-', 'data4')
cfv_stdin_progress_test(f, 'data4')
tmpd = tempfile.mkdtemp()
try:
f = os.path.join(tmpd, 'test.C.' + f)
fgz = os.path.join(tmpd, f + '.gz')
if extra:
cmd += ' ' + extra
test_generic('%s -C -f %s %s' % (cmd, f, d), cfv_test)
test_generic('%s -T -f %s' % (cmd, f), cfv_test)
test_generic('%s -T -f -' % cmd, cfv_test, stdin=f)
with gzip.open(fgz, mode='wb') as of:
with open(f, 'rb') as in_file:
of.write(in_file.read())
test_generic('%s -zz -t%s -T -f -' % (cmd, t), cfv_test, stdin=fgz)
if verify:
verify(f)
finally:
shutil.rmtree(tmpd)
tmpd = tempfile.mkdtemp()
try:
test_generic('%s -p %s -C -f %s' % (cmd, tmpd, f), rcurry(cfv_test, operator.eq, 0))
finally:
os.rmdir(tmpd)
def C_test_encoding(enc):
d = tempfile.mkdtemp()
try:
with open(os.path.join(d, 'aoeu'), 'wt') as f2:
f2.write('a')
with open(os.path.join(d, 'kakexe'), 'wt') as f2:
f2.write('ba')
with open(os.path.join(d, 'foo bar.baz'), 'wt') as f2:
f2.write('baz')
test_generic(cfvcmd + ' --encoding=%s -v -C -p %s -t %s' % (enc, d, t), rcurry(cfv_all_test, ok=3))
test_generic(cfvcmd + ' --encoding=%s -v -T -p %s' % (enc, d,), rcurry(cfv_all_test, ok=3))
finally:
shutil.rmtree(d)
C_test_encoding('cp500')
C_test_encoding('utf-16be')
C_test_encoding('utf-16')
def create_funkynames(t, d, chr, deep):
num = 0
for i in range(1, 256):
n = chr(i)
if n in (os.sep, os.altsep):
continue
if fmt_istext(t) and len(('a' + n + 'a').splitlines()) > 1: # if n is a line separator (note that in unicode, this is more than just \r and \n)
continue
if t == 'torrent' and n in ('/', '\\'):
continue # 'ValueError: path \ disallowed for security reasons'
# if t == 'torrent' and n in ('~',): n = 'foo'+n; #same
# if n == os.curdir: n = 'foo'+n # can't create a file of name '.', but 'foo.' is ok.
# if t in ('sfv','sfvmd5') and n==';': n = 'foo'+n; # ';' is comment character in sfv files, filename cannot start with it.
if t == 'crc' and n.isspace():
n += 'foo' # crc format can't handle trailing whitespace in filenames
n = '%02x' % i + n
try:
if deep:
os.mkdir(os.path.join(d, n))
try:
f = open(os.path.join(d, n, n), 'wb')
except Exception:
# if making the dir succeeded but making the file fails, remove the dir so it won't confuse the tests which count the number of items in the top dir.
os.rmdir(os.path.join(d, n))
raise
else:
f = open(os.path.join(d, n), 'wb')
# important that all the funky files be two bytes long,
# since that is the torrent piece size needed in order for
# the undecodable filenames without raw test to work.
# (If the piece size doesn't match the file size, then some
# files that it can find will still be marked bad since it
# can't find the rest of the piece.)
f.write(b'%02x' % i)
f.close()
except (EnvironmentError, UnicodeError):
pass # stupid filesystem doesn't allow the character we wanted, oh well.
else:
num += 1
return num
def C_funkynames_test(t):
def fschr(i):
return os.fsdecode(b'%c' % i)
def is_fmtencodable(s, enc=fmt_preferredencoding(t)):
return is_encodable(s, enc)
def is_fmtokfn(s):
if fmt_istext(t):
return len(('a' + s + 'a').splitlines()) == 1
return True
for deep in (0, 1):
d = tempfile.mkdtemp()
try:
num = create_funkynames(t, d, chr, deep=deep)
# numencodable = len(filter(lambda fn: os.path.exists(os.path.join(d,fn)), os.listdir(d)))
numencodable = len(list(filter(is_fmtencodable, os.listdir(d))))
# cfv -C, unencodable filenames on disk, ferror on unencodable filename and ignore it
numunencodable = num - numencodable
cfn = os.path.join(d, 'funky%s.%s' % (deep and 'deep' or '', t))
test_generic(cfvcmd + '%s -v -C -p %s -t %s -f %s' % (deep and ' -rr' or '', d, t, cfn), rcurry(cfv_all_test, files=num, ok=numencodable, ferror=numunencodable))
test_generic(cfvcmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=numencodable, ok=numencodable))
test_generic(cfvcmd + ' -v -u -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=numencodable, ok=numencodable, unv=numunencodable))
os.unlink(cfn)
# cfv -C, unencodable filenames on disk, with --encoding=<something else> (eg, utf8), should work.
cfn = os.path.join(d, 'funky%s.%s' % (deep and 'deep' or '', t))
test_generic(cfvcmd + '%s --encoding=utf-8 -v -C -p %s -t %s -f %s' % (deep and ' -rr' or '', d, t, cfn), rcurry(cfv_all_test, files=num, ok=num))
test_generic(cfvcmd + ' -v --encoding=utf-8 -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=num, ok=num))
test_generic(cfvcmd + ' -v --encoding=utf-8 -u -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=num, ok=num, unv=0))
finally:
shutil.rmtree(d)
d3 = tempfile.mkdtemp()
try:
cnum = create_funkynames(t, d3, fschr, deep=deep)
ulist = os.listdir(d3)
numundecodable = 0 # listdir always returns filenames of type str if we use a path of type str (and this is what we do)
okcnum = len(ulist) - numundecodable
dcfn = os.path.join(d3, 'funky3%s.%s' % (deep and 'deep' or '', t))
# cfv -C, undecodable filenames on disk, with --encoding=raw just put everything in like before
test_generic(cfvcmd + '%s --encoding=raw -v --piece_size_pow2=1 -C -p %s -t %s -f %s' % (deep and ' -rr' or '', d3, t, dcfn), rcurry(cfv_all_test, files=cnum, ok=cnum))
# cfv -T, undecodable filenames on disk and in CF (same names), with --encoding=raw, read CF as raw strings and be happy
test_generic(cfvcmd + ' --encoding=raw -v -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, files=cnum, ok=cnum))
test_generic(cfvcmd + ' --encoding=raw -v -u -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, files=cnum, ok=cnum, unv=0))
# cfv -T, undecodable filenames on disk and in CF (same names), without raw, cferrors
test_generic(cfvcmd + ' -v -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_substatus_test, cferror=1)) # rcurry(cfv_all_test,ok=okcnum,cferror=numundecodable))
test_generic(cfvcmd + ' -v -u -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_substatus_test, cferror=1, unv=1)) # rcurry(cfv_all_test,ok=okcnum,cferror=numundecodable,unv=numundecodable))
test_generic(cfvcmd + ' -v -m -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_substatus_test, cferror=1)) # rcurry(cfv_all_test,ok=okcnum,cferror=numundecodable))
test_generic(cfvcmd + ' -v -m -u -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_substatus_test, cferror=1, unv=1)) # rcurry(cfv_all_test,ok=okcnum,cferror=numundecodable,unv=numundecodable))
# TODO: needs "deep" -s
if not deep:
renamelist = []
numrenamed = 0
for fn in os.listdir(d3):
if os.path.join(d3, fn) == dcfn:
continue
newfn = 'ren%3s' % numrenamed
renamelist.append((fn, newfn))
os.rename(os.path.join(d3, fn), os.path.join(d3, newfn))
if deep:
os.rename(os.path.join(d3, newfn, fn), os.path.join(d3, newfn, newfn))
numrenamed += 1
# cfv -T, correct filenames on disk, undecodable filenames in CF: check with -s, with --encoding=raw, read CF as raw strings and be happy
if t != 'torrent':
test_generic(cfvcmd + ' --encoding=raw -v -s -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=cnum, misnamed=numrenamed))
if fmt_hassize(t):
test_generic(cfvcmd + ' --encoding=raw -v -m -s -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=cnum, misnamed=numrenamed))
cnum += 1
# okcnum += 1
ulist = os.listdir(d3)
okcnum = len(list(filter(is_fmtencodable, ulist)))
numerr = len(ulist) - okcnum
dcfn = os.path.join(d3, 'funky3%s2.%s' % (deep and 'deep' or '', t))
test_generic(cfvcmd + '%s -v -C -p %s -t %s -f %s' % (deep and ' -rr' or '', d3, t, dcfn), rcurry(cfv_all_test, ok=okcnum, ferror=numerr))
for fn, newfn in renamelist:
if deep:
os.rename(os.path.join(d3, newfn, newfn), os.path.join(d3, newfn, fn))
os.rename(os.path.join(d3, newfn), os.path.join(d3, fn))
# cfv -T, undecodable filenames on disk, correct filenames in chksum file. want to check with -s, fix with -sn
if fmt_hassize(t):
test_generic(cfvcmd + ' -v -m -s -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=okcnum, misnamed=numrenamed))
if t != 'torrent': # needs -s support on torrents
test_generic(cfvcmd + ' -v -s -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=okcnum, misnamed=numrenamed))
if fmt_hascrc(t):
test_generic(cfvcmd + ' -v -s -n -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=okcnum, misnamed=numrenamed))
test_generic(cfvcmd + ' -v -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=okcnum))
finally:
shutil.rmtree(d3)
d3 = tempfile.mkdtemp()
try:
cnum = create_funkynames(t, d3, fschr, deep=deep)
ulist = os.listdir(d3)
okcnum = len(list(filter(is_fmtokfn, list(filter(is_fmtencodable, ulist)))))
numerr = len(ulist) - okcnum
dcfn = os.path.join(d3, 'funky3%s3.%s' % (deep and 'deep' or '', t))
# cfv -C, undecodable(and/or unencodable) filenames on disk: without raw, ferror on undecodable filename and ignore it
test_generic(cfvcmd + '%s -v -C -p %s -t %s -f %s' % (deep and ' -rr' or '', d3, t, dcfn), rcurry(cfv_all_test, files=cnum, ok=okcnum, ferror=numerr))
test_generic(cfvcmd + ' -v -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=okcnum))
test_generic(cfvcmd + ' -v -u -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=okcnum, unv=numerr))
finally:
shutil.rmtree(d3)
def ren_test(f, extra=None, verify=None, t=None):
join = os.path.join
dir = tempfile.mkdtemp()
try:
dir2 = join(dir, 'd2')
basecmd = cfvcmd + ' -r -p ' + dir
if extra:
basecmd += ' ' + extra
cmd = basecmd + ' --renameformat="%(name)s-%(count)i%(ext)s"'
os.mkdir(dir2)
fls = [join(dir, 'test.ext.end'),
join(dir, 'test2.foo'),
join(dir, 'test3'),
join(dir2, 'test4.foo')]
flsf = [join(dir, 'test.ext-%i.end'),
join(dir, 'test2-%i.foo'),
join(dir, 'test3-%i'),
join(dir2, 'test4-%i.foo')]
flsf_1 = [join(dir, 'test.ext.end-%i'),
join(dir, 'test2.foo-%i'),
join(dir2, 'test4.foo-%i')]
flsf_2 = [join(dir, 'test3-%i')]
def flsw(t):
for fl in fls:
with open(fl, 'wb') as f2:
f2.write(t)
def flscmp(t, n, fls):
for fl in fls:
fn = n is not None and fl % n or fl
try:
with open(fn, 'rb') as f2:
d = f2.read()
r = d != t
o = repr(d)
except IOError as e:
r = 1
o = str(e)
test_log_results('cmp %s for %s' % (fn, t.decode('ascii')), r, o, r, None)
flsw(b'hello')
test_generic('%s -C -t %s' % (cmd, f), cfv_test)
flsw(b'1')
test_generic(basecmd + ' --showpaths=0 -v -T --list0=bad', cfv_listdata_bad_test, stderr='/dev/null')
test_generic(basecmd + ' --showpaths=0 -q -T --list0=bad', cfv_listdata_bad_test)
test_generic('%s -Tn' % cmd, cfv_bad_test)
flsw(b'11')
test_generic('%s -Tn' % cmd, cfv_bad_test)
flsw(b'123')
test_generic('%s -Tn' % cmd, cfv_bad_test)
flsw(b'63')
test_generic(cmd + ' --renameformat="%(fullname)s" -Tn', cfv_bad_test) # test for formats without count too
flsw(b'hello')
test_generic('%s -Tn' % cmd, cfv_test)
flscmp(b'1', 0, flsf)
flscmp(b'11', 1, flsf)
flscmp(b'123', 2, flsf)
flscmp(b'63', 1, flsf_1)
flscmp(b'63', 3, flsf_2)
flscmp(b'hello', None, fls)
finally:
shutil.rmtree(dir)
def search_test(t, test_nocrc=0, extra=None):
cfn = os.path.join(os.getcwd(), 'test.' + t)
hassize = fmt_hassize(t)
if test_nocrc:
hascrc = 0
cmd = cfvcmd + ' -m'
else:
hascrc = fmt_hascrc(t)
cmd = cfvcmd
if extra:
cmd += ' ' + extra
if not hascrc and not hassize:
# if using -m and type doesn't have size, make sure -s doesn't do anything silly
d = tempfile.mkdtemp()
try:
for n, n2 in zip(list(range(1, 5)), list(range(4, 0, -1))):
shutil.copyfile('data%s' % n, os.path.join(d, 'fOoO%s' % n2))
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
test_generic(cmd + ' -v -s -n -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
test_generic(cmd + ' -v -s -u -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4, unv=4))
finally:
shutil.rmtree(d)
# then return, since all the following tests would be impossible.
return
d = tempfile.mkdtemp()
try:
def dont_find_same_file_twice_test(s, o):
if not (o.count('fOoO3') == 1 and o.count('fOoO4') == 1):
return str((o.count('fOoO3'), o.count('fOoO4')))
return cfv_all_test(s, o, ok=4, misnamed=4)
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
for n, n2 in zip(list(range(1, 5)), list(range(4, 0, -1))):
shutil.copyfile('data%s' % n, os.path.join(d, 'fOoO%s' % n2))
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, cfn), dont_find_same_file_twice_test)
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
test_generic(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=4, misnamed=4))
test_generic(cmd + ' -v -u -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=4))
finally:
shutil.rmtree(d)
# the following tests two things:
# 1) that it will copy/link to a file that is already OK rather than just renaming it again
# 2) that it doesn't use the old cached value of a file's checksum before it got renamed out of the way.
d = tempfile.mkdtemp()
try:
misnamed1 = misnamed2 = 4
if hassize and hascrc:
experrs = {'badcrc': 1, 'badsize': 2}
elif hassize:
experrs = {'badsize': 2, 'ok': 1}
misnamed1 = 3
misnamed2 = OneOf(3, 4) # this depends on what order os.listdir finds stuff. (could be 3 or 4)
else: # if hascrc:
experrs = {'badcrc': 3}
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
for n, n2 in zip([1, 3, 4], [4, 2, 1]):
shutil.copyfile('data%s' % n, os.path.join(d, 'data%s' % n2))
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=1, **experrs))
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=4, misnamed=misnamed1))
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=1, **experrs))
test_generic(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=4, misnamed=misnamed2))
test_generic(cmd + ' -v -u -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=4))
finally:
shutil.rmtree(d)
# test whether ferrors during searching are ignored
if hasattr(os, 'symlink'):
d = tempfile.mkdtemp()
try:
for n, n2 in zip([4], [2]):
shutil.copyfile('data%s' % n, os.path.join(d, 'foo%s' % n2))
for n in string.ascii_lowercase:
os.symlink('noexist', os.path.join(d, n))
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=1, misnamed=1, notfound=3))
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
test_generic(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=1, misnamed=1, notfound=3))
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=1, notfound=3))
finally:
shutil.rmtree(d)
# test if an error while renaming a misnamed file is properly handled
d = tempfile.mkdtemp()
ffoo = fdata4 = None
try:
with open('data4', 'rb') as f:
ffoo = writefile_and_reopen(os.path.join(d, 'foo'), f.read())
# note that we leave the file open. This is because windows
# allows renaming of files in a readonly dir, but doesn't allow
# renaming of open files. So if we do both the test will work
# on both nix and win.
os.chmod(d, stat.S_IRUSR | stat.S_IXUSR)
try:
os.rename(os.path.join(d, 'foo'), os.path.join(d, 'foo2'))
print('rename of open file in read-only dir worked? skipping this test.')
except EnvironmentError:
# if the rename failed, then we're good to go for these tests..
test_generic(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=4, ok=1, misnamed=1, ferror=1, notfound=3))
os.chmod(d, stat.S_IRWXU)
fdata4 = writefile_and_reopen(os.path.join(d, 'data4'), '')
os.chmod(d, stat.S_IRUSR | stat.S_IXUSR)
test_generic(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=4, ok=1, misnamed=1, ferror=2, notfound=3))
finally:
os.chmod(d, stat.S_IRWXU)
if ffoo:
ffoo.close()
if fdata4:
fdata4.close()
shutil.rmtree(d)
# test if misnamed stuff and/or renaming stuff doesn't screw up the unverified file checking
d = tempfile.mkdtemp()
try:
shutil.copyfile('data4', os.path.join(d, 'foo'))
test_generic(cmd + ' -v -uu -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=4, ok=1, misnamed=1, notfound=3, unv=0))
test_generic(cmd + ' -v -uu -s -n -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=4, ok=1, misnamed=1, notfound=3, unv=0))
open(os.path.join(d, 'data1'), 'wb').close()
if hassize:
experrs = {'badsize': 1}
else:
experrs = {'badcrc': 1}
test_generic(cmd + ' -v -uu -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=4, ok=1, misnamed=0, notfound=2, unv=0, **experrs))
test_generic(cmd + ' -v -uu -s -n -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=4, ok=1, misnamed=0, notfound=2, unv=1, **experrs))
finally:
shutil.rmtree(d)
if fmt_cancreate(t):
# test deep handling
d = tempfile.mkdtemp()
try:
dcfn = os.path.join(d, 'deep.' + t)
os.mkdir(os.path.join(d, 'aOeU.AoEu'))
os.mkdir(os.path.join(d, 'aOeU.AoEu', 'boO.FaRr'))
shutil.copyfile('data1', os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'DaTa1'))
test_generic(cmd + ' -v -rr -C -p %s -t %s -f %s' % (d, t, dcfn), rcurry(cfv_all_test, files=1, ok=1))
os.rename(os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'DaTa1'), os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'Foo1'))
shutil.copyfile('data4', os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'DaTa1'))
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, dcfn), rcurry(cfv_all_test, files=1, ok=1, misnamed=1))
shutil.rmtree(os.path.join(d, 'aOeU.AoEu'))
os.mkdir(os.path.join(d, 'AoEu.aOeU'))
os.mkdir(os.path.join(d, 'AoEu.aOeU', 'BOo.fArR'))
shutil.copyfile('data4', os.path.join(d, 'AoEu.aOeU', 'BOo.fArR', 'dAtA1'))
shutil.copyfile('data1', os.path.join(d, 'AoEu.aOeU', 'BOo.fArR', 'Foo1'))
test_generic(cmd + ' -i -v -s -T -p %s -f %s' % (d, dcfn), rcurry(cfv_all_test, files=1, ok=1, misnamed=1))
if hassize:
experrs = {'badsize': 1}
else:
experrs = {'badcrc': 1}
test_generic(cmd + ' -i -v -T -p %s -f %s' % (d, dcfn), rcurry(cfv_all_test, files=1, ok=0, **experrs))
test_generic(cmd + ' -i -v -s -n -T -p %s -f %s' % (d, dcfn), rcurry(cfv_all_test, files=1, ok=1, misnamed=1))
test_generic(cmd + ' -i -v -T -p %s -f %s' % (d, dcfn), rcurry(cfv_all_test, files=1, ok=1))
finally:
shutil.rmtree(d)
if fmt_cancreate(t) and hassize:
d = tempfile.mkdtemp()
try:
dcfn = os.path.join(d, 'foo.' + t)
os.mkdir(os.path.join(d, 'aoeu'))
dirsize = os.path.getsize(os.path.join(d, 'aoeu'))
with open(os.path.join(d, 'idth'), 'wb') as f:
f.write(b'a' * dirsize)
test_generic(cmd + ' -v -C -p %s -t %s -f %s' % (d, t, dcfn), rcurry(cfv_all_test, files=1, ok=1))
os.remove(os.path.join(d, 'idth'))
os.rename(os.path.join(d, 'aoeu'), os.path.join(d, 'idth'))
def dont_find_dir_test(s, o):
if not o.count('idth') == 1:
return str((o.count('idth'),))
return cfv_all_test(s, o, ok=0, notfound=1)
test_generic(cmd + ' -v -m -T -p %s -f %s' % (d, dcfn), dont_find_dir_test) # test not finding non-file things in normal mode
test_generic(cmd + ' -v -m -s -T -p %s -f %s' % (d, dcfn), dont_find_dir_test) # test not finding non-file things in search mode
finally:
shutil.rmtree(d)
def quoted_search_test():
d = tempfile.mkdtemp()
try:
join = os.path.join
with open(join(d, 'foo.sfv'), 'w') as f:
f.write(r""""data1" B2A9E441
"/data4" FA323C6D
"aa1/data1" B2A9E441
"c:/aa1/data4" FA323C6D
"aa3/data3" 841ADFA2
"\aa3\data4" FA323C6D
"c:\aa4\bb4\data1" B2A9E441
"aa4/bb4/data4" FA323C6D""")
shutil.copyfile('data1', pathjoin_and_mkdir(d, 'foo1'))
shutil.copyfile('data4', pathjoin_and_mkdir(d, 'foo4'))
shutil.copyfile('data1', pathjoin_and_mkdir(d, 'aa1', 'foo1'))
shutil.copyfile('data4', pathjoin_and_mkdir(d, 'aa1', 'foo4'))
shutil.copyfile('data3', pathjoin_and_mkdir(d, 'aa3', 'foo3'))
shutil.copyfile('data4', pathjoin_and_mkdir(d, 'aa3', 'foo4'))
shutil.copyfile('data1', pathjoin_and_mkdir(d, 'aa4', 'bb4', 'foo1'))
shutil.copyfile('data4', pathjoin_and_mkdir(d, 'aa4', 'bb4', 'foo4'))
test_generic(cfvcmd + r' -v --unquote=yes --strippaths=0 --fixpaths \\/ -s -T -p ' + d, rcurry(cfv_all_test, ok=8, misnamed=8))
finally:
shutil.rmtree(d)
def symlink_test():
dir = tempfile.mkdtemp()
dir1 = 'd1'
dir2 = 'd2'
try:
os.mkdir(os.path.join(dir, dir1))
os.mkdir(os.path.join(dir, dir2))
if hasattr(os, 'symlink'):
os.symlink(os.path.join(os.pardir, dir2), os.path.join(dir, dir1, 'l2'))
os.symlink(os.path.join(os.pardir, dir1), os.path.join(dir, dir2, 'l1'))
test_generic(cfvcmd + ' -l -r -p ' + dir, rcurry(cfv_test, operator.eq, 0))
test_generic(cfvcmd + ' -L -r -p ' + dir, rcurry(cfv_test, operator.eq, 0))
test_generic(cfvcmd + ' -l -r -C -p ' + dir, rcurry(cfv_test, operator.eq, 0))
test_generic(cfvcmd + ' -L -r -C -p ' + dir, rcurry(cfv_test, operator.eq, 0))
open(os.path.join(dir, dir1, 'foo'), 'wb').close()
open(os.path.join(dir, dir2, 'bar'), 'wb').close()
def r_unv_test(s, o):
if cfv_unvonly_test(s, o, 2):
return 1
if o.count('not verified') != 1:
return 1
return 0
test_generic(cfvcmd + ' -l -r -u -p ' + dir, r_unv_test)
test_generic(cfvcmd + ' -L -r -u -p ' + dir, r_unv_test)
test_generic(cfvcmd + ' -l -u -p ' + dir, r_unv_test)
test_generic(cfvcmd + ' -L -u -p ' + dir, r_unv_test)
def r_unv_verbose_test(s, o):
if cfv_unvonly_test(s, o, 2):
return 1
if o.count('not verified') != 2:
return 1
return 0
test_generic(cfvcmd + ' -l -uu -p ' + dir, r_unv_verbose_test)
test_generic(cfvcmd + ' -L -uu -p ' + dir, r_unv_verbose_test)
test_generic(cfvcmd + ' -l -r -uu -p ' + dir, r_unv_verbose_test)
test_generic(cfvcmd + ' -L -r -uu -p ' + dir, r_unv_verbose_test)
finally:
shutil.rmtree(dir)
def deep_unverified_test():
dir = tempfile.mkdtemp()
try:
join = os.path.join
a = 'a'
a_C = join(a, 'C')
B = 'B'
B_ushallow = join(B, 'ushallow')
B_ushallow_d = join(B_ushallow, 'd')
u = 'u'
u_u2 = join(u, 'u2')
e = 'e'
e_es = join(e, 'es')
e2 = 'e2'
e2_e2s = join(e2, 'e2s')
e2_e2u = join(e2, 'e2u')
for d in a, a_C, B, B_ushallow, B_ushallow_d, u, u_u2, e, e_es, e2, e2_e2s, e2_e2u:
os.mkdir(join(dir, d))
datafns = ('DATa1', 'UnV1',
join(a, 'dAta2'), join(a, 'Unv2'), join(a_C, 'dATa4'), join(a_C, 'unV4'),
join(B, 'daTA3'), join(B, 'uNv3'),
join(B_ushallow, 'uNvs'), join(B_ushallow_d, 'unvP'), join(B_ushallow_d, 'datA5'),
join(u, 'uNVu'), join(u, 'UnvY'), join(u_u2, 'UNVX'),
join(e2_e2s, 'DaTaE'), join(e2_e2u, 'unVe2'),)
lower_datafns = list(map(lambda s: s.lower(), datafns))
for fn in datafns:
open(join(dir, fn), 'wb').close()
with open(join(dir, 'deep.md5'), 'wt') as f:
s = ('d41d8cd98f00b204e9800998ecf8427e *%s\n' * 6) % (
os.path.join('b', 'DaTa3'),
os.path.join('B', 'ushAllOw', 'D', 'daTa5'),
os.path.join('a', 'c', 'DatA4'),
os.path.join('A', 'dATA2'),
os.path.join('E2', 'e2S', 'DAtae'),
'daTA1')
f.write(s)
def r_test(s, o):
if cfv_test(s, o, operator.eq, 6):
return 1
if o.count('not verified') != 0:
return 1
return 0
def r_unv_test(s, o):
if cfv_unvonly_test(s, o, 10):
return 1
if o.count('not verified') != 8:
return 1
if o.find(os.path.join('e', '*')) >= 0:
return 1
if o.find(os.path.join('e2', '*')) >= 0:
return 1
return 0
def r_unv_verbose_test(s, o):
if cfv_unvonly_test(s, o, 10):
return 1
if o.count('not verified') != 10:
return 1
if o.find('*') >= 0:
return 1
return 0
test_generic(cfvcmd + ' -i -U -p ' + dir, r_test)
test_generic(cfvcmd + ' -i -u -p ' + dir, r_unv_test)
test_generic(cfvcmd + ' -i -uu -p ' + dir, r_unv_verbose_test)
test_generic(cfvcmd + ' -i -U -p ' + dir + ' ' + ' '.join(lower_datafns), r_test)
test_generic(cfvcmd + ' -i -u -p ' + dir + ' ' + ' '.join(lower_datafns), r_unv_verbose_test)
test_generic(cfvcmd + ' -i -uu -p ' + dir + ' ' + ' '.join(lower_datafns), r_unv_verbose_test)
finally:
shutil.rmtree(dir)
def test_encoding_detection():
datad = tempfile.mkdtemp()
d = tempfile.mkdtemp()
try:
datafns = ['data1', 'data3', 'data4']
destfns = [
'\u0061', # LATIN SMALL LETTER A
'\u00c4', # LATIN CAPITAL LETTER A WITH DIAERESIS
'\u03a0', # GREEK CAPITAL LETTER PI
'\u0470', # CYRILLIC CAPITAL LETTER PSI
'\u2605', # BLACK STAR
'\u3052', # HIRAGANA LETTER GE
'\u6708', # CJK UNIFIED IDEOGRAPH-6708
]
BOM = '\uFEFF'
utfencodings = ['utf-8', 'utf-16le', 'utf-16be', 'utf-32le', 'utf-32be', ]
fnerrs = fnok = 0
for i, destfn in enumerate(destfns):
srcfn = datafns[i % len(datafns)]
try:
shutil.copyfile(srcfn, os.path.join(datad, destfn))
except (EnvironmentError, UnicodeError):
fnerrs += 1
else:
fnok += 1
for t in allcreatablefmts():
if fmt_istext(t):
utf8cfn = os.path.join(d, 'utf8nobom.' + t)
test_generic(cfvcmd + ' -C --encoding=utf-8 -p %s -t %s -f %s' % (datad, t, utf8cfn), rcurry(cfv_all_test, ok=fnok))
chksumdata = readfile(utf8cfn).decode('utf-8')
for enc in utfencodings:
bommedcfn = os.path.join(d, enc + '.' + t)
try:
writefile(bommedcfn, (BOM + chksumdata).encode(enc))
except LookupError:
pass
else:
test_generic(cfvcmd + ' -T -p %s -t %s -f %s' % (datad, t, bommedcfn), rcurry(cfv_all_test, ok=fnok))
test_generic(cfvcmd + ' -T -p %s -f %s' % (datad, bommedcfn), rcurry(cfv_all_test, ok=fnok))
finally:
shutil.rmtree(d)
shutil.rmtree(datad)
def test_encoding2():
"""Non-trivial (actual non-ascii characters) encoding test.
These tests will probably always fail unless you use a unicode locale and python 2.3+.
"""
d = tempfile.mkdtemp()
d2 = tempfile.mkdtemp()
try:
cfn = os.path.join(d, '\u3070\u304B.torrent')
shutil.copyfile('testencoding2.torrent.foo', cfn)
datafns = [
('data1', '\u2605'),
('data2', '\u2606'),
('data3', '\u262E'),
('data4', '\u2600'),
]
fnerrs = fnok = 0
for srcfn, destfn in datafns:
try:
shutil.copyfile(srcfn, os.path.join(d2, destfn))
except (EnvironmentError, UnicodeError):
fnerrs += 1
else:
fnok += 1
test_generic(cfvcmd + ' -q -T -p ' + d, rcurry(cfv_status_test, notfound=fnok, ferror=fnerrs))
test_generic(cfvcmd + ' -v -T -p ' + d, rcurry(cfv_all_test, ok=0, notfound=fnok, ferror=fnerrs))
bakad = os.path.join(d, '\u3070\u304B')
os.mkdir(bakad)
for srcfn, destfn in datafns:
try:
shutil.copyfile(srcfn, os.path.join(bakad, destfn))
except (EnvironmentError, UnicodeError):
pass
test_generic(cfvcmd + ' -q -m -T -p ' + d, rcurry(cfv_status_test, ferror=fnerrs))
test_generic(cfvcmd + ' -v -m -T -p ' + d, rcurry(cfv_all_test, ok=fnok, ferror=fnerrs))
test_generic(cfvcmd + ' -v -m -u -T -p ' + d, rcurry(cfv_all_test, ok=fnok, ferror=fnerrs, unv=0))
if not fnerrs:
# if some of the files can't be found, checking of remaining files will fail due to missing pieces
test_generic(cfvcmd + ' -q -T -p ' + d, rcurry(cfv_status_test))
test_generic(cfvcmd + ' -v -T -p ' + d, rcurry(cfv_all_test, ok=4))
test_generic(cfvcmd + ' -v -u -T -p ' + d, rcurry(cfv_all_test, ok=4, unv=0))
raw_fnok = 0
files_fnok = files_fnerrs = 0
raw_files_fnok = raw_files_fnerrs = 0
dirn = list(filter(lambda s: not s.endswith('torrent'), os.listdir(d)))[0]
try:
files = [os.path.join(dirn, s) for s in os.listdir(os.path.join(d, dirn))]
except EnvironmentError:
files = []
else:
for fn in files:
flag_ok_raw = flag_ok_files = False
for srcfn, destfn in datafns:
if os.path.join('\u3070\u304B', destfn) == fn:
raw_fnok += 1
flag_ok_raw = True
try:
open(os.path.join(d, fn), 'rb')
except (EnvironmentError, UnicodeError):
files_fnerrs += 1
else:
files_fnok += 1
flag_ok_files = True
if flag_ok_files and flag_ok_raw:
raw_files_fnok += 1
else:
raw_files_fnerrs += 1
raw_fnerrs = len(datafns) - raw_fnok
# print(len(files), files)
# print('raw', raw_fnok, raw_fnerrs)
# print('files', files_fnok, files_fnerrs)
# print('raw_files', raw_files_fnok, raw_files_fnerrs)
if files:
test_generic(cfvcmd + ' -v -m -T -p ' + d + ' ' + ' '.join(files), rcurry(cfv_all_test, ok=files_fnok, notfound=files_fnerrs))
if files_fnok == len(datafns):
test_generic(cfvcmd + ' -v -T -p ' + d + ' ' + ' '.join(files), rcurry(cfv_all_test, ok=files_fnok, notfound=files_fnerrs))
test_generic(cfvcmd + ' --encoding=raw -v -m -T -p ' + d + ' ' + ' '.join(files), rcurry(cfv_all_test, ok=raw_files_fnok))
if raw_files_fnok == len(datafns):
test_generic(cfvcmd + ' --encoding=raw -v -T -p ' + d + ' ' + ' '.join(files), rcurry(cfv_all_test, ok=raw_files_fnok))
test_generic(cfvcmd + ' --encoding=raw -m -v -T -p ' + d, rcurry(cfv_all_test, ok=raw_fnok, notfound=raw_fnerrs))
test_generic(cfvcmd + ' --encoding=raw -m -v -u -T -p ' + d, rcurry(cfv_all_test, ok=raw_fnok, unv=fnok - raw_fnok, notfound=raw_fnerrs))
if raw_fnok == len(datafns):
test_generic(cfvcmd + ' --encoding=raw -v -T -p ' + d, rcurry(cfv_all_test, ok=raw_fnok, notfound=raw_fnerrs))
test_generic(cfvcmd + ' --encoding=raw -v -u -T -p ' + d, rcurry(cfv_all_test, ok=raw_fnok, unv=fnok - raw_fnok, notfound=raw_fnerrs))
except Exception:
test_log_results('test_encoding2', 'foobar', ''.join(traceback.format_exception(*sys.exc_info())), 'foobar', {}) # yuck. I really should switch this crap all to unittest ...
# finally:
shutil.rmtree(d2)
shutil.rmtree(d)
def largefile2GB_test():
# hope you have sparse file support ;)
fn = os.path.join('bigfile2', 'bigfile')
f = open(fn, 'wb')
try:
f.write(b'hi')
f.seek(2 ** 30)
f.write(b'foo')
f.seek(2 ** 31)
f.write(b'bar')
f.close()
test_generic(cfvcmd + ' -v -T -p %s' % 'bigfile2', rcurry(cfv_all_test, ok=6))
finally:
os.unlink(fn)
def largefile4GB_test():
# hope you have sparse file support ;)
fn = os.path.join('bigfile', 'bigfile')
f = open(fn, 'wb')
try:
f.write(b'hi')
f.seek(2 ** 30)
f.write(b'foo')
f.seek(2 ** 31)
f.write(b'bar')
f.seek(2 ** 32)
f.write(b'baz')
f.close()
test_generic(cfvcmd + ' -v -T -p %s' % 'bigfile', rcurry(cfv_all_test, ok=10))
finally:
os.unlink(fn)
def manyfiles_test(t):
try:
max_open = os.sysconf('SC_OPEN_MAX')
except (AttributeError, ValueError, OSError):
max_open = 1024
if not run_long_tests and max_open > 4096:
print('max open files is big (%i)' % max_open, end=' ')
max_open = 4096
print('clipping to %i. Use --long to try the real value' % max_open)
num = max_open + 1
d = tempfile.mkdtemp()
try:
for i in range(0, num):
n = '%04i' % i
with open(os.path.join(d, n), 'wt') as f:
f.write(n)
cfn = os.path.join(d, 'manyfiles.' + t)
test_generic(cfvcmd + ' -C -p %s -t %s -f %s' % (d, t, cfn), rcurry(cfv_all_test, ok=num))
test_generic(cfvcmd + ' -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=num))
finally:
shutil.rmtree(d)
def specialfile_test(cfpath):
try:
import threading
except ImportError:
return
d = tempfile.mkdtemp()
cfn = os.path.split(cfpath)[1]
try:
fpath = os.path.join(d, 'foo.bar')
try:
os.mkfifo(fpath)
except (AttributeError, EnvironmentError):
return
shutil.copyfile(cfpath, os.path.join(d, cfn))
def pusher(fpath):
with open(fpath, 'wb') as f:
f.write(b'a' * 0x4000)
f.flush()
time.sleep(0.1)
f.write(b'b' * 0x4000)
f.flush()
time.sleep(0.1)
f.write(b'c' * 0x4000)
t = threading.Thread(target=pusher, args=(fpath,))
t.start()
s, o = cfvtest.runcfv('%s --progress=yes -T -p %s -f %s' % (cfvcmd, d, cfn))
t.join()
r = 0
if s:
r = 1
elif o.count('#') > 1:
r = 'count(#) = %s' % (o.count('#'))
elif o.count('..'):
r = 3
test_log_results('specialfile_test(%s)' % cfn, s, o, r, None)
finally:
shutil.rmtree(d)
def unrecognized_cf_test():
def cfv_unrectype(s, o):
r = cfv_all_test(s, o, cferror=1)
if r:
return r
if not o.count('type'):
return "'type' not found in output"
if o.count('encoding'):
return "'encoding' found in output"
return 0
def cfv_unrecenc(s, o):
r = cfv_all_test(s, o, cferror=1)
if r:
return r
if not o.count('type'):
return "'type' not found in output"
if not o.count('encoding'):
return "'encoding' not found in output"
return 0
# data1 is not a valid checksum file, but it is valid latin1, so it should only generate an unrecognized type error
test_generic(cfvcmd + ' -T --encoding=latin1 -f data1', cfv_unrectype)
# data1 is not a valid checksum file, nor is it valid utf-16 (no bom, odd number of bytes), so it should generate an unrecognized type or encoding error
test_generic(cfvcmd + ' -T --encoding=utf-16 -f data1', cfv_unrecenc)
def private_torrent_test():
cmd = cfvcmd
tmpd = tempfile.mkdtemp()
try:
needle = b'7:privatei1'
f = os.path.join(tmpd, 'test.torrent')
test_generic('%s -C -f %s data1' % (cmd, f), cfv_test)
data = readfile(f)
test_log_results('should not contain private flag', 0, repr(data), needle in data, None)
f = os.path.join(tmpd, 'test2.torrent')
test_generic('%s --private_torrent -C -f %s data1' % (cmd, f), cfv_test)
data = readfile(f)
test_log_results('should contain private flag', 0, repr(data), needle not in data, None)
finally:
shutil.rmtree(tmpd)
def all_unittest_tests():
if not run_internal:
return 0
test_log_start('all_unittests_suite', None)
from unittest import TextTestRunner
suite = cfvtest.all_unittests_suite()
runner = TextTestRunner(stream=logfile, descriptions=1, verbosity=2)
result = runner.run(suite)
if not result.wasSuccessful():
r = '%i failures, %i errors' % tuple(map(len, (result.failures, result.errors)))
else:
r = 0
test_log_finish('all_unittests_suite', not result.wasSuccessful(), r, None, None)
return len(result.failures) + len(result.errors)
run_internal = 1
run_long_tests = 0
run_unittests_only = 0
run_exit_early = 0
def show_help_and_exit(err=None):
if err:
print('error:', err)
print()
print('usage: test.py [-i|-e] [--long] [--unit] [--exit-early] [cfv]')
print(' -i run tests internally')
print(' -e launch seperate cfv process for each test')
print(' --long include tests that may use large amounts of CPU or disk')
print(' --unit run only unittests, no integration tests')
print(' --exit-early exit after first error')
print(' --help show this help')
print()
print('default [cfv] is:', cfvtest.cfvfn)
print('default run mode is:', run_internal and 'internal' or 'external')
sys.exit(1)
try:
optlist, args = getopt.getopt(sys.argv[1:], 'ie', ['long', 'help', 'unit', 'exit-early'])
except getopt.error as e:
show_help_and_exit(e)
if len(args) > 1:
show_help_and_exit('too many arguments')
for o, a in optlist:
if o == '--help':
show_help_and_exit()
elif o == '--long':
run_long_tests = 1
elif o == '--unit':
run_unittests_only = 1
elif o == '--exit-early':
run_exit_early = 1
elif o == '-i':
run_internal = 1
elif o == '-e':
run_internal = 0
else:
show_help_and_exit('bad opt %r' % o)
cfvtest.setcfv(fn=args and args[0] or None, internal=run_internal)
if run_unittests_only:
logfile = sys.stdout
all_unittest_tests()
sys.exit()
# set everything to default in case user has different in config file
cfvcmd = '-ZNVRMUI --unquote=no --fixpaths="" --strippaths=0 --showpaths=auto-relative --progress=no --announceurl=url --noprivate_torrent'
logfile = open(os.path.join(tempfile.gettempdir(), 'cfv_%s_test-%s.log' % (cfvtest.ver_cfv, time.strftime('%Y%m%dT%H%M%S'))), 'wt')
def all_tests():
stats.ok = stats.failed = 0
symlink_test()
deep_unverified_test()
for fmt in coreutilsfmts():
ren_test(fmt)
ren_test('md5', extra='-rr')
ren_test('bsdmd5')
ren_test('sfv')
ren_test('sfvmd5')
ren_test('csv')
ren_test('csv2')
ren_test('csv4')
ren_test('crc')
ren_test('torrent')
for t in allavailablefmts():
if t != 'torrent':
search_test(t)
search_test(t, test_nocrc=1)
# search_test('torrent',test_nocrc=1,extra='--strip=1')
quoted_search_test()
for fmt in coreutilsfmts():
T_test('.' + fmt)
T_test('.md5.gz')
T_test('comments.md5')
T_test('.bsdmd5')
# test par spec 1.0 files:
T_test('.par')
T_test('.p01')
# test par spec 0.9 files:
T_test('v09.par')
T_test('v09.p01')
T_test('.par2')
T_test('.vol0+1.par2')
T_test('.csv')
T_test('.sfv')
T_test('noheader.sfv')
T_test('.sfvmd5')
T_test('.csv2')
T_test('.csv4')
T_test('.crc')
T_test('nosize.crc')
T_test('nodims.crc')
T_test('nosizenodimsnodesc.crc')
for fmt in coreutilsfmts():
T_test('crlf.' + fmt)
T_test('crlf.bsdmd5')
T_test('crlf.csv')
T_test('crlf.csv2')
T_test('crlf.csv4')
T_test('crlf.sfv')
T_test('noheadercrlf.sfv')
T_test('crlf.crc')
for fmt in coreutilsfmts():
T_test('crcrlf.' + fmt)
T_test('crcrlf.bsdmd5')
T_test('crcrlf.csv')
T_test('crcrlf.csv2')
T_test('crcrlf.csv4')
T_test('crcrlf.sfv')
T_test('noheadercrcrlf.sfv')
T_test('crcrlf.crc')
for strip in (0, 1):
T_test('.torrent', extra='--strip=%s' % strip)
T_test('smallpiece.torrent', extra='--strip=%s' % strip)
T_test('encoding.torrent', extra='--strip=%s' % strip)
def cfv_torrentcommentencoding_test(s, o):
r = cfv_all_test(s, o, ok=1)
if r:
return r
tcount = o.count('Test_Comment-Text.')
if tcount != 1:
return 'encoded text count: %s' % tcount
return 0
test_generic(cfvcmd + ' -T -v -f testencodingcomment.torrent', cfv_torrentcommentencoding_test)
test_encoding2()
test_encoding_detection()
unrecognized_cf_test()
# test handling of directory args in recursive testmode. (Disabled since this isn't implemented, and I'm not sure if it should be. It would change the meaning of cfv *)
# test_generic(cfvcmd + ' -r a', cfv_test)
# test_generic(cfvcmd + ' -ri a', cfv_test)
# test_generic(cfvcmd + ' -ri A', cfv_test)
# test_generic(cfvcmd + ' -rm a', cfv_test)
# test_generic(cfvcmd + ' -rim a', cfv_test)
# test_generic(cfvcmd + ' -r a/C', cfv_test)
# test_generic(cfvcmd + ' -ri A/c', cfv_test)
# test handling of testfile args in recursive testmode
test_generic(cfvcmd + ' -r -p a ' + os.path.join('C', 'foo.bar'), cfv_test)
test_generic(cfvcmd + ' -ri -p a ' + os.path.join('c', 'fOo.BaR'), cfv_test)
test_generic(cfvcmd + ' -r -u -p a ' + os.path.join('C', 'foo.bar'), cfv_test)
test_generic(cfvcmd + ' -ri -u -p a ' + os.path.join('c', 'fOo.BaR'), cfv_test)
def cfv_notfound_or_bad_test(path):
if os.path.exists(path):
return cfv_bad_test
else:
return cfv_notfound_test
test_generic(cfvcmd + ' --strippaths=0 -T -f teststrip0.csv4', cfv_test)
test_generic(cfvcmd + ' --strippaths=1 -T -f teststrip1.csv4', cfv_test)
test_generic(cfvcmd + ' --strippaths=2 -T -f teststrip2.csv4', cfv_test)
test_generic(cfvcmd + ' --strippaths=all -T -f teststrip-1.csv4', cfv_test)
test_generic(cfvcmd + ' --strippaths=none -T -f teststrip-none.csv4', cfv_notfound_or_bad_test('/data1'))
test_generic(cfvcmd + r' --strippaths=0 --fixpaths \\/ -T -f testdrivestrip.md5', rcurry(cfv_all_test, ok=4))
test_generic(cfvcmd + r' --strippaths=0 --unquote=yes --fixpaths \\/ -T -f testdrivestripquoted.md5', rcurry(cfv_all_test, ok=4))
test_generic(cfvcmd + r' --strippaths=0 --unquote=yes --fixpaths \\/ -T -f testdrivestripquoted.md5 data1 data3 data4', rcurry(cfv_all_test, ok=3))
test_generic(cfvcmd + ' -i -T -f testcase.csv', cfv_test)
test_generic(cfvcmd + ' -T --unquote=yes -f testquoted.sfv', cfv_test)
test_generic(cfvcmd + ' -i --unquote=yes -T -f testquotedcase.sfv', cfv_test)
test_generic(cfvcmd + ' -i --unquote=yes -T -f testquotedcase.sfv DaTa1 ' + os.path.join('a', 'C', 'Foo.bar'), rcurry(cfv_all_test, ok=2))
test_generic(cfvcmd + ' -i -T -f testquoted.csv4', cfv_test)
test_generic(cfvcmd + r' --fixpaths \\/ -T -f testfix.csv', cfv_test)
test_generic(cfvcmd + r' --fixpaths \\/ -T -f testfix.csv4', cfv_test)
test_generic(cfvcmd + r' -i --fixpaths \\/ -T -f testfix.csv4', cfv_test)
C_test('bsdmd5', '-t bsdmd5') # ,verify=lambda f: test_generic('md5 -c ' + f, status_test)) #bsd md5 seems to have no way to check, only create
for fmt in coreutilsfmts():
if pathfind(fmt + 'sum'): # don't report pointless errors on systems that don't have e.g. sha1sum
def coreutils_verify(f):
test_external(fmt + 'sum -c ' + f, status_test)
else:
print('skipping %s verify using external tool %ssum, as it is not installed.' % (fmt, fmt))
coreutils_verify = None
C_test(fmt, verify=coreutils_verify)
C_test('csv')
if pathfind('cksfv'): # don't report pointless errors on systems that don't have cksfv
def sfvverify(f):
test_external('cksfv -f ' + f, status_test)
else:
print('skipping sfv verify using external tool cksfv, as it is not installed.')
sfvverify = None
C_test('sfv', verify=sfvverify)
C_test('sfvmd5', '-t sfvmd5')
C_test('csv2', '-t csv2')
C_test('csv4', '-t csv4')
C_test('crc')
private_torrent_test()
# test_generic('../cfv -V -T -f test.md5', cfv_test)
# test_generic('../cfv -V -tcsv -T -f test.md5', cfv_test)
for t in allavailablefmts():
if fmt_istext(t):
test_generic(cfvcmd + ' --encoding=cp500 -T -f test.' + t, rcurry(cfv_all_test, cferror=1))
else:
if t == 'par':
try:
open('data1'.encode('utf-16le').decode('utf-16be'), 'rb')
except UnicodeError:
nf = 0
err = 4
except Exception:
nf = 4
err = 0
test_generic(cfvcmd + ' --encoding=utf-16be -T -f test.' + t, rcurry(cfv_all_test, notfound=nf, ferror=err))
test_generic(cfvcmd + ' --encoding=cp500 -T -f test.' + t, rcurry(cfv_all_test, cferror=4))
test_generic(cfvcmd + ' --encoding=cp500 -i -T -f test.' + t, rcurry(cfv_all_test, cferror=4))
else:
try:
open(b'data1'.decode('cp500'), 'rb')
except UnicodeError:
nf = 0
err = 4
except Exception:
nf = 4
err = 0
test_generic(cfvcmd + ' --encoding=cp500 -T -f test.' + t, rcurry(cfv_all_test, notfound=nf, ferror=err))
test_generic(cfvcmd + ' --encoding=cp500 -i -T -f test.' + t, rcurry(cfv_all_test, notfound=nf, ferror=err))
if fmt_cancreate(t):
C_funkynames_test(t)
manyfiles_test(t)
for fn in glob(os.path.join('fifotest', 'fifo.*')):
specialfile_test(fn)
test_generic(cfvcmd + ' -m -v -T -t sfv', lambda s, o: cfv_typerestrict_test(s, o, 'sfv'))
test_generic(cfvcmd + ' -m -v -T -t sfvmd5', lambda s, o: cfv_typerestrict_test(s, o, 'sfvmd5'))
test_generic(cfvcmd + ' -m -v -T -t bsdmd5', lambda s, o: cfv_typerestrict_test(s, o, 'bsdmd5'))
for fmt in coreutilsfmts():
test_generic(cfvcmd + ' -m -v -T -t ' + fmt, lambda s, o: cfv_typerestrict_test(s, o, fmt))
test_generic(cfvcmd + ' -m -v -T -t csv', lambda s, o: cfv_typerestrict_test(s, o, 'csv'))
test_generic(cfvcmd + ' -m -v -T -t par', lambda s, o: cfv_typerestrict_test(s, o, 'par'))
test_generic(cfvcmd + ' -m -v -T -t par2', lambda s, o: cfv_typerestrict_test(s, o, 'par2'))
test_generic(cfvcmd + ' -u -t md5 -f test.md5 data* unchecked.dat test.md5', cfv_unv_test)
test_generic(cfvcmd + ' -u -f test.md5 data* unchecked.dat', cfv_unv_test)
test_generic(cfvcmd + ' -u -f test.md5 data* unchecked.dat test.md5', cfv_unv_test)
test_generic(cfvcmd + r' -i -tcsv --fixpaths \\/ -Tu', lambda s, o: cfv_unv_test(s, o, None))
test_generic(cfvcmd + ' -T -t md5 -f non_existant_file', cfv_cferror_test)
test_generic(cfvcmd + ' -T -f ' + os.path.join('corrupt', 'missingfiledesc.par2'), cfv_cferror_test)
test_generic(cfvcmd + ' -T -f ' + os.path.join('corrupt', 'missingmain.par2'), cfv_cferror_test)
test_generic(cfvcmd + ' -T -m -f ' + os.path.join('corrupt', 'missingfiledesc.par2'), cfv_cferror_test)
test_generic(cfvcmd + ' -T -m -f ' + os.path.join('corrupt', 'missingmain.par2'), cfv_cferror_test)
test_generic(cfvcmd + ' -T -f foo.torrent', cfv_test)
test_generic(cfvcmd + ' -T --strip=none -p foo -f ../foo.torrent', rcurry(cfv_all_test, notfound=7))
for strip in (0, 1):
test_generic(cfvcmd + ' -T --strippaths=%s -p foo -f %s' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=7))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2err -f %s' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=4, badcrc=3))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2err -f %s foo1 foo4' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=0, badcrc=2))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2err1 -f %s' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=6, badcrc=1))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2err1 -f %s foo1 foo4' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=2))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2badsize -f %s' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=5, badsize=1, badcrc=1))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2badsize -f %s foo1 foo4' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=1, badcrc=1))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2missing -f %s' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=4, badcrc=2, notfound=1))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2missing -f %s foo1 foo4' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=0, badcrc=2))
d = tempfile.mkdtemp()
try:
open(os.path.join(d, 'foo'), 'wb').close()
cmd = cfvcmd.replace(' --announceurl=url', '')
test_generic(cmd + ' -C -p %s -f foo.torrent' % d, rcurry(cfv_all_test, files=1, cferror=1))
test_log_results('non-creation of empty torrent on missing announceurl?', '', repr(os.listdir(d)), len(os.listdir(d)) > 1, {})
finally:
shutil.rmtree(d)
if run_long_tests:
largefile2GB_test()
largefile4GB_test()
test_generic(cfvcmd + ' -t aoeu', rcurry(cfv_cftypehelp_test, 1), stdout='/dev/null')
test_generic(cfvcmd + ' -t aoeu', rcurry(cfv_nooutput_test, 1), stderr='/dev/null')
test_generic(cfvcmd + ' -t help', rcurry(cfv_cftypehelp_test, 0), stderr='/dev/null')
test_generic(cfvcmd + ' -t help', rcurry(cfv_nooutput_test, 0), stdout='/dev/null')
test_generic(cfvcmd + ' -h', cfv_nooutput_test, stdout='/dev/null')
test_generic(cfvcmd + ' -h', cfv_version_test, stderr='/dev/null')
donestr = '\n>>> tests finished: ok: %i failed: %i' % (stats.ok, stats.failed)
log(donestr)
print(donestr)
return stats.failed
def copytree(src, dst, ignore=None):
if ignore is None:
ignore = []
for name in os.listdir(src):
if name in ignore:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.islink(srcname):
continue
elif os.path.isfile(srcname):
shutil.copy(srcname, dstname)
elif os.path.isdir(srcname):
os.mkdir(dstname)
copytree(srcname, dstname, ignore)
else:
print('huh?', srcname)
# copy the testdata into a temp dir in order to avoid .svn dirs breaking some tests
tmpdatapath = tempfile.mkdtemp()
try:
copytree(cfvtest.datapath, tmpdatapath, ignore=['.svn'])
os.chdir(tmpdatapath) # do this after the setcfv, since the user may have specified a relative path
failed = 0
print('>>> testing...')
failed += all_unittest_tests()
failed += all_tests()
if cfvtest.ver_mmap:
print('>>> testing without mmap...')
cfvtest.setenv('CFV_NOMMAP', 'x')
assert not cfvtest.ver_mmap
failed += all_tests()
sys.exit(failed)
finally:
shutil.rmtree(tmpdatapath)
| cfv-project/cfv | test/test.py | test.py | py | 74,606 | python | en | code | 46 | github-code | 6 | [
{
"api_name": "locale.getpreferredencoding",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "builtins.filter",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "builtins.filter",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "builtin... |
20194791845 | # -*- coding: utf-8 -*-
import datetime
import json
import sys
from threading import Thread
from resources.lib.common import tools
from resources.lib.indexers.trakt import TraktAPI
from resources.lib.modules import database
from resources.lib.modules.trakt_sync.shows import TraktSyncDatabase
from resources.lib.modules.trakt_sync.hidden import TraktSyncDatabase as HiddenDatabase
try:
from Queue import Queue
except:
from queue import Queue
sysaddon = sys.argv[0]
try:
syshandle = int(sys.argv[1])
except:
syshandle = ''
trakt = TraktAPI()
language_code = tools.get_language_code()
trakt_database = TraktSyncDatabase()
hidden_database = HiddenDatabase()
class Menus:
def __init__(self):
self.itemList = []
self.threadList = []
self.direct_episode_threads = []
self.title_appends = tools.getSetting('general.appendtitles')
self.task_queue = Queue(40)
######################################################
# MENUS
######################################################
def onDeckShows(self):
hidden_shows = hidden_database.get_hidden_items('progress_watched', 'shows')
trakt_list = trakt.json_response('sync/playback/episodes', limit=True)
if trakt_list is None:
return
trakt_list = [i for i in trakt_list if i['show']['ids']['trakt'] not in hidden_shows]
trakt_list = sorted(trakt_list, key=lambda i: tools.datetime_workaround(i['paused_at'][:19],
format="%Y-%m-%dT%H:%M:%S",
date_only=False), reverse=True)
filter_list = []
showList = []
sort_list = []
for i in trakt_list:
if i['show']['ids']['trakt'] not in filter_list:
if int(i['progress']) != 0:
showList.append(i)
filter_list.append(i['show']['ids']['trakt'])
sort_list.append(i['show']['ids']['trakt'])
sort = {'type': 'showInfo', 'id_list': sort_list}
self.mixedEpisodeBuilder(showList, sort=sort)
tools.closeDirectory('tvshows')
def discoverShows(self):
tools.addDirectoryItem(tools.lang(32007), 'showsPopular&page=1', '', '')
if tools.getSetting('trakt.auth') is not '':
tools.addDirectoryItem(tools.lang(32008), 'showsRecommended', '', '')
# tools.addDirectoryItem('This Years Most Popular', '', '', '')
tools.addDirectoryItem(tools.lang(32009), 'showsTrending&page=1', '', '')
tools.addDirectoryItem(tools.lang(32067), 'showsNew', '', '')
tools.addDirectoryItem(tools.lang(32010), 'showsPlayed&page=1', '', '')
tools.addDirectoryItem(tools.lang(32011), 'showsWatched&page=1', '', '')
tools.addDirectoryItem(tools.lang(32012), 'showsCollected&page=1', '', '')
tools.addDirectoryItem(tools.lang(32013), 'showsAnticipated&page=1', '', '')
tools.addDirectoryItem(tools.lang(32014), 'showsUpdated&page=1', '', '')
tools.addDirectoryItem(tools.lang(40121), 'showsNetworks', '', '')
tools.addDirectoryItem(tools.lang(40123), 'showYears', '', '')
tools.addDirectoryItem(tools.lang(32062), 'tvGenres', '', '')
tools.addDirectoryItem(tools.lang(40151), 'showsByActor', '', '')
# show genres is now labeled as tvGenres to support genre icons in skins
if tools.getSetting('searchHistory') == 'false':
tools.addDirectoryItem(tools.lang(32016), 'showsSearch', '', '')
else:
tools.addDirectoryItem(tools.lang(32016), 'showsSearchHistory', '', '')
tools.closeDirectory('addons')
def myShows(self):
tools.addDirectoryItem(tools.lang(32063), 'onDeckShows', None, None)
tools.addDirectoryItem(tools.lang(32017), 'showsMyCollection', '', '')
tools.addDirectoryItem(tools.lang(32018), 'showsMyWatchlist', '', '')
tools.addDirectoryItem('Next Up', 'showsNextUp', '', '')
tools.addDirectoryItem('Upcoming Episodes', 'myUpcomingEpisodes', '', '')
tools.addDirectoryItem('Unfinished Shows in Collection', 'showsMyProgress', '', '')
tools.addDirectoryItem('Recent Episodes', 'showsMyRecentEpisodes', '', '')
tools.addDirectoryItem('My Show Lists', 'myTraktLists&actionArgs=shows', '', '')
tools.closeDirectory('addons')
def myShowCollection(self):
trakt_list = trakt_database.get_collected_episodes()
trakt_list = [i for i in trakt_list if i is not None]
trakt_list = list(set([i['show_id'] for i in trakt_list]))
trakt_list = [{'ids': {'trakt': i}} for i in trakt_list]
trakt_list = [i for i in trakt_list if i is not None]
if trakt_list is None:
return
self.showListBuilder(trakt_list)
tools.closeDirectory('tvshows', sort='title')
def myShowWatchlist(self):
trakt_list = trakt.json_response('users/me/watchlist/shows', limit=False)
if trakt_list is None:
return
try:
sort_by = trakt.response_headers['X-Sort-By']
sort_how = trakt.response_headers['X-Sort-How']
trakt_list = trakt.sort_list(sort_by, sort_how, trakt_list, 'show')
except:
tools.log('Failed to sort trakt list by response headers', 'error')
pass
self.showListBuilder(trakt_list)
tools.closeDirectory('tvshows')
def myProgress(self):
collected_episodes = trakt_database.get_collected_episodes()
collection = list(set([i['show_id'] for i in collected_episodes]))
if len(collection) == 0:
return
show_dicts = []
for i in collection:
show_dicts.append({'show': {'ids': {'trakt': i}}})
show_meta_list = trakt_database.get_show_list(show_dicts)
unfinished = []
for show in show_meta_list:
if show['info']['playcount'] == 0:
unfinished.append(show)
self.showListBuilder(unfinished)
tools.closeDirectory('tvshows', sort='title')
def newShows(self):
hidden = hidden_database.get_hidden_items('recommendations', 'shows')
datestring = datetime.datetime.today() - datetime.timedelta(days=29)
trakt_list = database.get(trakt.json_response, 12, 'calendars/all/shows/new/%s/30?languages=%s' %
(datestring.strftime('%d-%m-%Y'), language_code))
if trakt_list is None:
return
# For some reason trakt messes up their list and spits out tons of duplicates so we filter it
duplicate_filter = []
temp_list = []
for i in trakt_list:
if not i['show']['ids']['tvdb'] in duplicate_filter:
duplicate_filter.append(i['show']['ids']['tvdb'])
temp_list.append(i)
trakt_list = temp_list
trakt_list = [i for i in trakt_list if i['show']['ids']['trakt'] not in hidden]
if len(trakt_list) > 40:
trakt_list = trakt_list[:40]
self.showListBuilder(trakt_list)
tools.closeDirectory('tvshows')
def myNextUp(self, ):
watched_shows = trakt_database.get_watched_shows()
hidden_shows = hidden_database.get_hidden_items('progress_watched', 'shows')
watched_shows = [i for i in watched_shows if i['trakt_id'] not in hidden_shows]
watched_episodes = trakt_database.get_watched_episodes()
self._start_queue_workers()
for show in watched_shows:
self.task_queue.put((self._get_next_episode_to_watch, (show, watched_episodes)), block=True)
self._finish_queue_workers()
if tools.getSetting('nextup.sort') == '1':
watched_list = trakt.json_response('users/me/watched/shows')
watched_list = sorted(watched_list, key=lambda i: i['last_watched_at'], reverse=True)
watched_list = [i['show']['ids']['trakt'] for i in watched_list]
sort = {'type': 'showInfo', 'id_list': watched_list}
else:
sort = None
episodes = self.itemList
self.itemList = []
self.mixedEpisodeBuilder(episodes, sort=sort, hide_watched=True)
tools.closeDirectory('tvshows')
def _get_next_episode_to_watch(self, show_db_dict, watched_episodes):
try:
show_id = show_db_dict['trakt_id']
if show_db_dict['kodi_meta'] == {}:
show_db_dict['kodi_meta'] = trakt_database.get_single_show(show_id)
watched_episodes = [i for i in watched_episodes if i['show_id'] == show_id]
watched_episodes = sorted(watched_episodes, key=lambda episode: episode['season'], reverse=True)
season = watched_episodes[0]['season']
season_meta = trakt_database.get_single_season(show_id, season)
watched_episodes = [i for i in watched_episodes if i['season'] == season]
watched_episodes = sorted(watched_episodes, key=lambda episode: episode['number'], reverse=True)
last_watched_episode = watched_episodes[0]['number']
next_episode = int(watched_episodes[0]['number']) + 1
if season_meta is None:
tools.log('Could not acquire season meta information for %s Season %s' % (show_id, season), 'error')
return
if season_meta['info']['episode_count'] == len(watched_episodes) \
or season_meta['info']['episode_count'] == last_watched_episode:
if int(show_db_dict['kodi_meta']['info']['season_count']) > season:
season += 1
next_episode = 1
episode_dict = {'show': {'ids': {'trakt': show_id}},
'episode': {'season': season, 'number': next_episode}}
self.itemList.append(episode_dict)
except KeyError:
import traceback
traceback.print_exc()
pass
except:
import traceback
traceback.print_exc()
def myRecentEpisodes(self):
hidden_shows = hidden_database.get_hidden_items('calendar', 'shows')
datestring = datetime.datetime.today() - datetime.timedelta(days=13)
trakt_list = database.get(trakt.json_response, 12, 'calendars/my/shows/%s/14' %
datestring.strftime('%d-%m-%Y'))
if trakt_list is None:
return
trakt_list = [i for i in trakt_list if i['show']['ids']['trakt'] not in hidden_shows]
self.mixedEpisodeBuilder(trakt_list)
tools.closeDirectory('episodes')
def myUpcomingEpisodes(self):
tomorrow = (datetime.date.today() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
upcoming_episodes = database.get(trakt.json_response, 24, 'calendars/my/shows/%s/30' % tomorrow)
sort = sorted(upcoming_episodes, key=lambda i: i['first_aired'])
sort = [i['episode']['ids']['trakt'] for i in sort]
sort = {'type': None, 'id_list': sort}
self.mixedEpisodeBuilder(upcoming_episodes, sort=sort, hide_watched=False, hide_unaired=False,
prepend_date=True)
tools.closeDirectory('episodes')
def showsNetworks(self):
trakt_list = database.get(trakt.json_response, 24, 'networks')
if trakt_list is None:
return
list_items = []
for i in trakt_list:
list_items.append(tools.addDirectoryItem(i['name'], 'showsNetworkShows&actionArgs=%s&page=1' % i['name'],
'', '', bulk_add=True))
tools.addMenuItems(syshandle, list_items, len(list_items))
tools.closeDirectory('addons')
def showsNetworkShows(self, network, page):
trakt_list = database.get(trakt.json_response, 24, 'shows/popular?networks=%s&page=%s' % (network, page))
if trakt_list is None:
return
self.showListBuilder(trakt_list)
if len(trakt_list) == int(tools.getSetting('item.limit')):
tools.addDirectoryItem(tools.lang(32019), 'showsNetworkShows&actionArgs=%s&page=%s' %
(network, int(page) + 1), '', '')
tools.closeDirectory('tvshows')
def showsPopular(self, page):
trakt_list = database.get(trakt.json_response, 12, 'shows/popular?page=%s' % page)
if trakt_list is None:
return
self.showListBuilder(trakt_list)
tools.addDirectoryItem(tools.lang(32019), 'showsPopular&page=%s' % (int(page) + 1), '', '')
tools.closeDirectory('tvshows')
def showsRecommended(self):
trakt_list = database.get(trakt.json_response, 12, 'recommendations/shows?ignore_collected=true',
limit=True, limitOverride=100)
if trakt_list is None:
return
self.showListBuilder(trakt_list)
tools.closeDirectory('tvshows')
def showsTrending(self, page):
trakt_list = database.get(trakt.json_response, 12, 'shows/trending?page=%s' % page)
if trakt_list is None:
return
self.showListBuilder(trakt_list)
tools.addDirectoryItem(tools.lang(32019), 'showsTrending&page=%s' % (int(page) + 1), '', '')
tools.closeDirectory('tvshows')
def showsPlayed(self, page):
trakt_list = database.get(trakt.json_response, 12, 'shows/played?page=%s' % page)
if trakt_list is None:
return
self.showListBuilder(trakt_list)
tools.addDirectoryItem(tools.lang(32019), 'showsPlayed&page=%s' % (int(page) + 1), '', '')
tools.closeDirectory('tvshows')
def showsWatched(self, page):
trakt_list = database.get(trakt.json_response, 12, 'shows/watched?page=%s' % page)
if trakt_list is None:
return
self.showListBuilder(trakt_list)
tools.addDirectoryItem(tools.lang(32019), 'showsWatched&page=%s' % (int(page) + 1), '', '')
tools.closeDirectory('tvshows')
def showsCollected(self, page):
trakt_list = database.get(trakt.json_response, 12, 'shows/collected?page=%s' % page)
if trakt_list is None:
return
self.showListBuilder(trakt_list)
tools.addDirectoryItem(tools.lang(32019), 'showsCollected&page=%s' % (int(page) + 1), '', '')
tools.closeDirectory('tvshows')
def showsAnticipated(self, page):
trakt_list = database.get(trakt.json_response, 12, 'shows/anticipated?page=%s&language=%s'
% (page, language_code))
if trakt_list is None:
return
self.showListBuilder(trakt_list)
tools.addDirectoryItem(tools.lang(32019), 'showsAnticipated&page=%s' % (int(page) + 1), '', '')
tools.closeDirectory('tvshows')
def showsUpdated(self, page):
import datetime
date = datetime.date.today() - datetime.timedelta(days=31)
date = date.strftime('%Y-%m-%d')
trakt_list = database.get(trakt.json_response, 12, 'shows/updates/%s?page=%s' % (date, page))
if trakt_list is None:
return
self.showListBuilder(trakt_list)
tools.addDirectoryItem(tools.lang(32019), 'showsUpdated&page=%s' % (int(page) + 1), '', '')
tools.closeDirectory('tvshows')
def showSearchHistory(self):
history = database.getSearchHistory('show')
tools.addDirectoryItem(tools.lang(40142), 'showsSearch', '', '')
tools.addDirectoryItem(tools.lang(40140), 'clearSearchHistory', '', '', isFolder=False)
for i in history:
tools.addDirectoryItem(i, 'showsSearch&actionArgs=%s' % tools.quote(i), '', '')
tools.closeDirectory('addon')
def showsSearch(self, actionArgs=None):
if actionArgs == None:
k = tools.showKeyboard('', tools.lang(32016))
k.doModal()
query = (k.getText() if k.isConfirmed() else None)
if query == None or query == '':
return
else:
query = actionArgs
database.addSearchHistory(query, 'show')
query = tools.deaccentString(tools.display_string(query))
tools.quote(query)
tools.closeAllDialogs()
tools.closeDirectory('tvshows')
tools.execute("Container.Update(%s?action=showsSearchResults&actionArgs=%s, replace)'" % (sysaddon, query))
def showsSearchResults(self, query):
query = tools.quote_plus(tools.unquote(query))
trakt_list = trakt.json_response('search/show?query=%s&extended=full&type=show&field=title' % query)
if trakt_list is None:
return
self.showListBuilder(trakt_list)
tools.closeDirectory('tvshows')
def showsByActor(self, actionArgs):
if actionArgs == None:
k = tools.showKeyboard('', tools.lang(32016))
k.doModal()
query = (k.getText() if k.isConfirmed() else None)
if query == None or query == '':
return
else:
query = tools.unquote(actionArgs)
database.addSearchHistory(query, 'showActor')
query = tools.deaccentString(query)
query = query.replace(' ', '-')
query = tools.quote_plus(query)
trakt_list = trakt.json_response('people/%s/shows' % query, limit=True)
try:
trakt_list = trakt_list['cast']
except:
import traceback
traceback.print_exc()
trakt_list = []
trakt_list = [i['show'] for i in trakt_list]
self.showListBuilder(trakt_list)
tools.closeDirectory('tvshows')
def showSeasons(self, args):
args = tools.get_item_information(args)
self.seasonListBuilder(args['ids']['trakt'])
tools.closeDirectory('seasons')
def seasonEpisodes(self, args):
args = tools.get_item_information(args)
show_id = args['showInfo']['ids']['trakt']
if 'seasonInfo' in args:
season_number = args['seasonInfo']['info']['season']
else:
season_number = args['info']['season']
self.episodeListBuilder(show_id, season_number)
tools.closeDirectory('episodes', sort='episode')
def showGenres(self):
tools.addDirectoryItem(tools.lang(32065), 'showGenresGet', '', '', isFolder=True)
genres = database.get(trakt.json_response, 24, 'genres/shows')
if genres is None:
return
for i in genres:
tools.addDirectoryItem(i['name'], 'showGenresGet&actionArgs=%s' % i['slug'], '', '', isFolder=True)
tools.closeDirectory('addons')
def showGenreList(self, args, page):
if page is None:
page = 1
if args is None:
genre_display_list = []
genre_string = ''
genres = database.get(trakt.json_response, 24, 'genres/shows')
for genre in genres:
genre_display_list.append(genre['name'])
genre_multiselect = tools.showDialog.multiselect(tools.addonName + ": Genre Selection", genre_display_list)
if genre_multiselect is None: return
for selection in genre_multiselect:
genre_string += ', %s' % genres[selection]['slug']
genre_string = genre_string[2:]
else:
genre_string = args
page = int(page)
trakt_list = database.get(trakt.json_response, 12,
'shows/popular?genres=%s&page=%s' % (genre_string, page))
if trakt_list is None:
return
self.showListBuilder(trakt_list)
tools.addDirectoryItem(tools.lang(32019),
'showGenresGet&actionArgs=%s&page=%s' % (genre_string, page + 1), None, None)
tools.closeDirectory('tvshows')
def showsRelated(self, args):
trakt_list = database.get(trakt.json_response, 12, 'shows/%s/related' % args)
if trakt_list is None:
return
self.showListBuilder(trakt_list)
tools.closeDirectory('tvshows')
def showYears(self, year=None, page=None):
if year is None:
current_year = int(tools.datetime_workaround(datetime.datetime.today().strftime('%Y-%m-%d')).year)
all_years = reversed([year for year in range(1900, current_year+1)])
menu_items = []
for year in all_years:
menu_items.append(tools.addDirectoryItem(str(year), 'showYears&actionArgs=%s' % year, '', '',
bulk_add=True))
tools.addMenuItems(syshandle, menu_items, len(menu_items))
tools.closeDirectory('tvshows')
else:
if page is None:
page = 1
trakt_list = trakt.json_response('shows/popular?years=%s&page=%s' % (year, page))
self.showListBuilder(trakt_list)
tools.addDirectoryItem(tools.lang(32019),
'showYears&actionArgs=%s&page=%s' % (year, int(page) + 1), None, None)
tools.closeDirectory('tvshows')
######################################################
# MENU TOOLS
######################################################
def seasonListBuilder(self, show_id, smartPlay=False):
self.itemList = trakt_database.get_season_list(show_id)
self.itemList = [x for x in self.itemList if x is not None and 'info' in x]
self.itemList = sorted(self.itemList, key=lambda k: k['info']['season'])
if len(self.itemList) == 0:
tools.log('We received no titles to build a list', 'error')
return
hide_specials = False
if tools.getSetting('general.hideSpecials') == 'true':
hide_specials = True
item_list = []
for item in self.itemList:
try:
if hide_specials and int(item['info']['season']) == 0:
continue
action = 'seasonEpisodes'
args = {'trakt_id': item['showInfo']['ids']['trakt'],
'season': item['info']['season'],
'item_type': 'season'}
args = tools.quote(json.dumps(args, sort_keys=True))
item['trakt_object']['show_id'] = item['showInfo']['ids']['trakt']
name = item['info']['season_title']
if not self.is_aired(item['info']) or 'aired' not in item['info']:
if tools.getSetting('general.hideUnAired') == 'true':
continue
name = tools.colorString(name, 'red')
name = tools.italic_string(name)
item['info']['title'] = name
item['info'] = tools.clean_air_dates(item['info'])
except:
import traceback
traceback.print_exc()
continue
if smartPlay is True:
return args
cm = []
if tools.getSetting('trakt.auth') != '':
cm.append(('Trakt Manager', 'RunPlugin(%s?action=traktManager&actionArgs=%s)' % (sysaddon, args)))
if tools.context_addon():
cm = []
item_list.append(tools.addDirectoryItem(name, action, item['info'], item['art'], cm=cm, isFolder=True,
isPlayable=False, actionArgs=args, set_ids=item['ids'],
bulk_add=True))
tools.addMenuItems(syshandle, item_list, len(item_list))
def episodeListBuilder(self, show_id, season_number, smartPlay=False, hide_unaired=False):
try:
item_list = []
self.itemList = trakt_database.get_season_episodes(show_id, season_number)
self.itemList = [x for x in self.itemList if x is not None and 'info' in x]
if len(self.itemList) == 0:
tools.log('We received no titles to build a list', 'error')
return
try:
self.itemList = sorted(self.itemList, key=lambda k: k['info']['episode'])
except:
pass
for item in self.itemList:
cm = []
try:
if tools.getSetting('smartplay.playlistcreate') == 'true' and smartPlay is False:
action = 'smartPlay'
playable = False
else:
playable = True
action = 'getSources'
args = {'trakt_id': item['showInfo']['ids']['trakt'],
'season': item['info']['season'],
'episode': item['info']['episode'],
'item_type': 'episode'}
args = tools.quote(json.dumps(args, sort_keys=True))
name = item['info']['title']
if not self.is_aired(item['info']):
if tools.getSetting('general.hideUnAired') == 'true' or hide_unaired:
continue
else:
name = tools.colorString(name, 'red')
name = tools.italic_string(name)
item['info']['title'] = name
item['info'] = tools.clean_air_dates(item['info'])
except:
import traceback
traceback.print_exc()
continue
cm.append((tools.lang(32070),
'XBMC.PlayMedia(%s?action=shufflePlay&actionArgs=%s)' % (sysaddon, args)))
cm.append(('Browse Season',
'XBMC.Container.Update(%s?action=seasonEpisodes&actionArgs=%s)' %
(sysaddon,
tools.quote(json.dumps({'trakt_id': item['showInfo']['ids']['trakt'],
'season': item['info']['season'],
'item_type': 'season'})))))
cm.append((tools.lang(33022),
'PlayMedia(%s?action=getSources&seren_reload=true&actionArgs=%s)' % (sysaddon, args)))
cm.append((tools.lang(32066),
'PlayMedia(%s?action=getSources&source_select=true&actionArgs=%s)' % (sysaddon, args)))
if tools.getSetting('trakt.auth') != '':
cm.append(('Trakt Manager', 'RunPlugin(%s?action=traktManager&actionArgs=%s)' % (sysaddon, args)))
if tools.context_addon():
cm = []
if tools.getSetting('premiumize.enabled') == 'true' and tools.getSetting('premiumize.pin') != '':
cm.append((tools.lang(32068),
'XBMC.RunPlugin(%s?action=filePicker&actionArgs=%s)' % (sysaddon, args)))
item_list.append(tools.addDirectoryItem(name, action, item['info'], item['art'], isFolder=False,
isPlayable=playable, actionArgs=args, bulk_add=True,
set_ids=item['ids'], cm=cm))
if smartPlay is True:
return item_list
else:
tools.addMenuItems(syshandle, item_list, len(item_list))
except:
import traceback
traceback.print_exc()
def mixedEpisodeBuilder(self, trakt_list, sort=None, hide_watched=False, smartPlay=False, hide_unaired=True,
prepend_date=False):
self.threadList = []
try:
if len(trakt_list) == 0:
tools.log('We received no titles to build a list', 'error')
return
self.itemList = trakt_database.get_episode_list(trakt_list)
self.itemList = [x for x in self.itemList if x is not None and 'info' in x]
self.itemList = [i for i in self.itemList if 'info' in i and i['info'].get('premiered', None) is not None]
if sort is None:
self.itemList = sorted(self.itemList,
key=lambda i: tools.datetime_workaround(i['info']['premiered'],
tools.trakt_gmt_format, False),
reverse=True)
elif sort is not False:
sort_list = []
for trakt_id in sort['id_list']:
try:
if not sort['type']:
item = [i for i in self.itemList if i['ids']['trakt'] == trakt_id][0]
else:
item = [i for i in self.itemList if i[sort['type']]['ids']['trakt'] == trakt_id][0]
sort_list.append(item)
except IndexError:
continue
except:
import traceback
traceback.print_exc()
self.itemList = sort_list
item_list = []
for item in self.itemList:
if item is None:
continue
if item['info'].get('title', '') == '':
continue
if hide_watched and item['info']['playcount'] != 0:
continue
cm = []
try:
name = tools.display_string(item['info']['title'])
if not self.is_aired(item['info']) and hide_unaired is True:
continue
elif not self.is_aired(item['info']):
name = tools.colorString(name, 'red')
name = tools.italic_string(name)
item['info']['title'] = name
item['info'] = tools.clean_air_dates(item['info'])
args = {'trakt_id': item['showInfo']['ids']['trakt'],
'season': item['info']['season'],
'episode': item['info']['episode'],
'item_type': 'episode'}
args = tools.quote(json.dumps(args, sort_keys=True))
if tools.getSetting('smartplay.playlistcreate') == 'true' and smartPlay is False:
action = 'smartPlay'
playable = False
else:
playable = True
action = 'getSources'
if self.title_appends == 'true':
name = "%s: %sx%s %s" % (tools.colorString(item['showInfo']['info']['tvshowtitle']),
tools.display_string(item['info']['season']).zfill(2),
tools.display_string(item['info']['episode']).zfill(2),
tools.display_string(item['info']['title']))
if prepend_date:
release_day = tools.datetime_workaround(item['info']['aired'])
release_day = release_day.strftime('%d %b')
name = '[%s] %s' % (release_day, name)
cm.append((tools.lang(32069),
'XBMC.Container.Update(%s?action=showSeasons&actionArgs=%s)' %
(sysaddon, tools.quote(json.dumps({'trakt_id': item['showInfo']['ids']['trakt'],
'item_type': 'show'})))))
cm.append(('Browse Season',
'XBMC.Container.Update(%s?action=seasonEpisodes&actionArgs=%s)' %
(sysaddon,
tools.quote(json.dumps({'trakt_id': item['showInfo']['ids']['trakt'],
'season': item['info']['season'],
'item_type': 'season'})))))
cm.append((tools.lang(32070),
'XBMC.PlayMedia(%s?action=shufflePlay&actionArgs=%s)' % (sysaddon, args)))
cm.append((tools.lang(32066),
'PlayMedia(%s?action=getSources&source_select=true&actionArgs=%s)' % (sysaddon, args)))
cm.append((tools.lang(33022),
'PlayMedia(%s?action=getSources&seren_reload=true&actionArgs=%s)' % (sysaddon, args)))
if tools.getSetting('trakt.auth') != '':
cm.append(('Trakt Manager', 'RunPlugin(%s?action=traktManager&actionArgs=%s)'
% (sysaddon, tools.quote(json.dumps(item['trakt_object'])))))
if tools.context_addon():
cm = []
if tools.getSetting('premiumize.enabled') == 'true' and tools.getSetting('premiumize.pin') != '':
cm.append((tools.lang(32068),
'XBMC.RunPlugin(%s?action=filePicker&actionArgs=%s)' % (sysaddon, args)))
item['info']['title'] = item['info']['originaltitle'] = name
item_list.append(tools.addDirectoryItem(name, action, item['info'], item['art'], isFolder=False,
isPlayable=playable, actionArgs=args, bulk_add=True,
set_ids=item['ids'], cm=cm))
except:
import traceback
traceback.print_exc()
continue
if smartPlay is True:
return item_list
else:
tools.addMenuItems(syshandle, item_list, len(item_list))
except:
import traceback
traceback.print_exc()
def showListBuilder(self, trakt_list, forceResume=False, info_only=False):
try:
if len(trakt_list) == 0:
tools.log('We received no titles to build a list', 'error')
return
except:
import traceback
traceback.print_exc()
return
if 'show' in trakt_list[0]:
trakt_list = [i['show'] for i in trakt_list]
show_ids = [i['ids']['trakt'] for i in trakt_list]
self.itemList = trakt_database.get_show_list(show_ids)
self.itemList = [x for x in self.itemList if x is not None and 'info' in x]
self.itemList = tools.sort_list_items(self.itemList, trakt_list)
item_list = []
for item in self.itemList:
try:
# Add Arguments to pass with items
args = {'trakt_id': item['ids']['trakt'], 'item_type': 'show'}
args = tools.quote(json.dumps(args, sort_keys=True))
cm = []
name = tools.display_string(item['info']['tvshowtitle'])
if info_only == True:
return args
if not self.is_aired(item['info']):
if tools.getSetting('general.hideUnAired') == 'true':
continue
name = tools.colorString(name, 'red')
name = tools.italic_string(name)
item['info'] = tools.clean_air_dates(item['info'])
if 'setCast' in item:
set_cast = item['setCast']
else:
set_cast = False
if tools.getSetting('smartplay.clickresume') == 'true' or forceResume is True:
action = 'playbackResume'
else:
action = 'showSeasons'
# Context Menu Items
cm.append((tools.lang(32070),
'XBMC.PlayMedia(%s?action=shufflePlay&actionArgs=%s)' % (sysaddon, args)))
cm.append((tools.lang(32020),
'Container.Update(%s?action=showsRelated&actionArgs=%s)' % (sysaddon, item['ids']['trakt'])))
cm.append((tools.lang(32069),
'XBMC.Container.Update(%s?action=showSeasons&actionArgs=%s)' % (sysaddon, args)))
if tools.getSetting('trakt.auth') != '':
cm.append(('Trakt Manager', 'RunPlugin(%s?action=traktManager&actionArgs=%s)' % (sysaddon, args)))
cm.append((tools.lang(40153),
'XBMC.PlayMedia(%s?action=playFromRandomPoint&actionArgs=%s' % (sysaddon, args)))
if tools.context_addon():
cm = []
except:
import traceback
traceback.print_exc()
continue
item_list.append(tools.addDirectoryItem(name, action, item['info'], item['art'], cm=cm, isFolder=True,
isPlayable=False, actionArgs=args, bulk_add=True, set_cast=set_cast,
set_ids=item['ids']))
tools.addMenuItems(syshandle, item_list, len(item_list))
def runThreads(self, join=True):
for thread in self.threadList:
thread.start()
if join == True:
for thread in self.threadList:
thread.join()
def _start_queue_workers(self):
self.queue_finished = False
for i in range(40):
self.threadList.append(Thread(target=self._queue_worker))
for i in self.threadList:
i.start()
def _finish_queue_workers(self):
self.queue_finished = True
for i in self.threadList:
i.join()
self.threadList = []
def _queue_worker(self):
while not self.task_queue.empty() or not self.queue_finished:
try:
target = self.task_queue.get(timeout=3)
except:
continue
try:
target[0](*target[1])
except:
import traceback
traceback.print_exc()
pass
def is_aired(self, info):
try:
try:air_date = info['aired']
except: air_date = info.get('premiered')
if air_date == '' or air_date is None:
return False
if int(air_date[:4]) < 1970:
return True
time_format = tools.trakt_gmt_format
if len(air_date) == 10:
time_format = '%Y-%m-%d'
air_date = tools.gmt_to_local(air_date, format=time_format)
if tools.getSetting('general.datedelay') == 'true':
air_date = tools.datetime_workaround(air_date, time_format, False)
air_date += datetime.timedelta(days=1)
else:
air_date = tools.datetime_workaround(air_date, time_format, False)
if air_date > datetime.datetime.now():
return False
else:
return True
except:
import traceback
traceback.print_exc()
# Assume an item is not aired if we do not have any information on it or fail to identify
return False
| Ed57/plugin.video.seren | resources/lib/gui/tvshowMenus.py | tvshowMenus.py | py | 39,641 | python | en | code | null | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "resources.lib.indexers.trakt.TraktAPI",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "res... |
2000640644 | import webapp2
import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
import os
from snippets import Words
# from Tkinter import *
JINJA_ENVIRONMENT=jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
class Add(webapp2.RequestHandler):
def split(self, word):
return [char for char in word]
def merge(self, word):
new=''
for x in word:
new += x
return new
def sort(self, word):
split_data =self.split(word)
# print(split_data)
sorted_alphabets = sorted(split_data)
# print(sorted_alphabets)
merged_word = self.merge(sorted_alphabets)
return merged_word
# def incrementCounter(self, user)
def get(self):
self.response.headers['Content-Type'] = 'text/html'
display_message='Add words in the Anagram Engine'
Template_values ={
'display_message': display_message
}
template = JINJA_ENVIRONMENT.get_template('add.html')
self.response.write(template.render(Template_values))
def post(self):
action = self.request.get('add_button')
# print (action)
user = users.get_current_user()
# print(myuser)
add_string=self.request.get('word_input')
sorted_alphabets = sorted(self.split(add_string.lower()))
keyword=user.user_id() + self.merge(sorted_alphabets)
if action == 'Add':
key = ndb.Key('Words', keyword)
word = key.get()
myuser_key= ndb.Key('MyUser', user.user_id())
myuser=myuser_key.get()
if word == None:
word = Words(id=keyword)
word.count_of_words=0
word.put()
myuser.uniqueAnagramCounter=myuser.uniqueAnagramCounter+1
myuser.put()
string = keyword
if string == '' or string == None or len(string)<3:
self.redirect('/add')
return
word_doesnt_exists = True
List = []
for i in word.wordsList:
print(i)
List.append(i)
print(List.count(add_string))
if List.count(add_string.lower())>0:
word_doesnt_exists=False
print('word exists')
if(word_doesnt_exists):
word.wordsList.append(add_string.lower())
word.count_of_words=word.count_of_words+1
word.alphabet_no_List.append(len(add_string))
word.put()
myuser.wordCounter= myuser.wordCounter+1
myuser.put()
# Code to read from text document
# root = Tk()
# root.fileName = filedialog.askopenfilename(filetypes =(("All text file", "*.txt")))
dict = []
f = open("words.txt", "r")
for line in f.readlines():
# sorted_word_from_text = self.merge(sorted(self.split(line.strip())))
#
if(line.rstrip()):
# print (sorted_word_from_text)
dict.append(line.rstrip())
# user = users.get_current_user()
file_action = self.request.get('add_from_files')
if file_action=='Add':
print(len(dict))
for i in dict:
if len(i)>0:
keyword1=user.user_id() + self.sort(i)
key = ndb.Key('Words', keyword1)
word = key.get()
# print(word)
new_word = False
if word!=None:
if word.wordsList.count(i)==0:
word.wordsList.append(i)
word.count_of_words=word.count_of_words+1
word.alphabet_no_List.append(len(i))
word.put()
# word.wordsList.append(i)
# word.count_of_words=word.count_of_words+1
# word.alphabet_no_List.append(len(i))
# word.put()
else:
new_word=True
if(new_word):
word = Words(id=keyword1)
print(i + " word is added")
word.wordsList.append(i)
word.count_of_words=1
word.alphabet_no_List.append(len(i))
word.put()
print(i)
self.redirect('/add')
self.redirect('/add')
# raw_word =self.request.get('word_input')
# sorted_alphabets = sorted(self.split(raw_word.lower()))
# user = users.get_current_user()
# # used as a key to display only certain content to certain
# keyword =user.user_id()+merge(sorted_alphabets)
# print(keyword)
# # if action =='Add':
#
#
# # use user_id()+ keyword as the key.
# word_key = ndb.Key('Words', keyword)
# word = word_key.get()
#
# if word==None:
# # word = Word(id=keyword)
# word.word = raw_word
# word.count_of_alphabets = len(raw_word)
# word.count_of_words = 1
# word.put()
# word.word.append(raw_word)
# word.put()
# self.redirect('/add')
#
# else:
# word.word.append(raw_word)
# word.count_of_alphabets = len(raw_word)
# countOfWords = word.count_of_words
# word.count_of_words=countOfWords+1
# word.put()
# self.redirect('/add')
| yolo117/Anagram-Checker | add.py | add.py | py | 5,784 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "jinja2.Environment",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
8372518963 | import pycosat
from pprint import pprint
# number of cells in sudoku
NUM_CELLS = 81
# this can be used to interate all squares subset in 3x 3
# V V V
# 1 2 3 4 5 6 7 8 9
#> 1 |0 0 0| 0 0 0| 0 0 0|
# 2 |0 0 0| 0 0 0| 0 0 0|
# 3 |0 0 0| 0 0 0| 0 0 0|
# ---------------------
#> 4 |0 0 0| 0 0 0| 0 0 0|
# 5 |0 0 0| 0 0 0| 0 0 0|
# 6 |0 0 0| 0 0 0| 0 0 0|
# --------------------
#> 7 |0 0 0| 0 0 0| 0 0 0|
# 8 |0 0 0| 0 0 0| 0 0 0|
# 9 |0 0 0| 0 0 0| 0 0 0|
SUBSQUARE_BOUDARIES = [1, 4, 7]
# most digit in sudoku
NUM_DIGITS = 9
def get_cell_value(row, column, digit):
"""
make unique id to a cell
first number is a row
second is a column
third is a digit value
Ex: the cell (1,3) and (3,1) with digit 7 there are diferentes id
cell (1,3) = 137
cell (3,1) = 317
"""
return row*100+column*10+ digit
def get_base_clauses():
"""
make all NUM_VARIABLES used in sudoku board, named by id
Ex:
for cell (1,1) and digit 7 can be named: 117
for cell (2,3) and digit 5 can be named: 235
"""
base_clauses = []
for row in range(1, NUM_DIGITS+1):
for column in range(1, NUM_DIGITS+1):
clauses = []
for digit in range(1, NUM_DIGITS+1):
clauses.append(get_cell_value(row,column,digit))
base_clauses.append(clauses)
return base_clauses
def get_unique_cells_clauses():
"""
make clauses guarantee that a cell can just appear once for sudoku board.
to make this each cell will have the next cell with the clause:
~current_digit or ~next_digit
Example:
to cell 111 there the clauses:
(-111,-112),(-111,-113),(-111,-114),...,(-111,-999)
"""
unique_digits_clauses = []
for row in range(1, NUM_DIGITS+1):
for column in range(1,NUM_DIGITS+1):
for digit in range(1, NUM_DIGITS+1):
for next_digit in range(digit+1, NUM_DIGITS+1):
cell_id = -get_cell_value(row,column,digit)
next_cell_id = -get_cell_value(row,column,next_digit)
unique_digits_clauses.append([cell_id,next_cell_id])
return unique_digits_clauses
def get_unique_subset_clauses(board_subset):
"""
this guarantee that a cell appear only once in the board subset
"""
subset_clauses = []
subset = enumerate(board_subset)
for index, first_tuple in enumerate(board_subset):
for n_index, n_tuple in enumerate(board_subset):
if index < n_index:
for digit in range(1, NUM_DIGITS + 1):
clause = [-get_cell_value(
first_tuple[0], first_tuple[1], digit),
-get_cell_value(
n_tuple[0], n_tuple[1], digit)]
subset_clauses.append(clause)
return subset_clauses
def get_row_unique_clauses():
"""
this guarantee that a cell in row appear only once in the row
"""
unique_clauses = []
for row in range(1,NUM_DIGITS +1):
subset = []
for column in range(1, NUM_DIGITS+1):
subset.append((row,column))
unique_clauses.extend(get_unique_subset_clauses(subset))
return unique_clauses
def get_columns_unique_clauses():
"""
this guarantee that a cell in column appear only once in the columns
"""
unique_clauses = []
for row in range(1,NUM_DIGITS +1):
subset = []
for column in range(1, NUM_DIGITS+1):
subset.append((column,row))
unique_clauses.extend(get_unique_subset_clauses(subset))
return unique_clauses
def get_square_unique_clauses():
"""
this guarantee that a cell in square appear only once in the squares
"""
subset_clauses = []
for row in SUBSQUARE_BOUDARIES:
for column in SUBSQUARE_BOUDARIES:
subset = [] # make subset 3x3
for k in range(9):
subset.append((row+k%3,column+k//3))
subset_clauses.extend(get_unique_subset_clauses(subset))
return subset_clauses
def get_sudoku_clauses():
"""
mix all defined clauses to guarantee a valid sudoku
"""
sudoku_clauses = []
sudoku_clauses.extend(get_base_clauses())
sudoku_clauses.extend(get_unique_cells_clauses())
sudoku_clauses.extend(get_row_unique_clauses())
sudoku_clauses.extend(get_columns_unique_clauses())
sudoku_clauses.extend(get_square_unique_clauses())
return sudoku_clauses
def get_single_clauses(sudoku_board):
"""
This method make a clauses that can be answer true
to solve the sudoku board
"""
single_clauses = []
for row in range(1, NUM_DIGITS+1):
for column in range(1,NUM_DIGITS+1):
cell_value = sudoku_board[row-1][column-1]
if cell_value:
single_clauses.append(
[get_cell_value(row,column,cell_value)])
return single_clauses
def get_cell_solution(sudoku_solution, row, column):
"""
verify a cell id in a sudoku solution list
"""
for digit in range(1, NUM_DIGITS+1):
if get_cell_value(row,column,digit) in sudoku_solution:
return digit
return -1
def solve_sudoku(sudoku_board):
"""
Generate a sudoku clauses, apply in a pycosat and get sudoku solution
"""
sudoku_clauses = get_sudoku_clauses()
single_clauses = get_single_clauses(sudoku_board)
sudoku_clauses.extend(single_clauses)
sudoku_solution = set(pycosat.solve(sudoku_clauses))
for row in range(1, NUM_DIGITS+1):
for column in range(1, NUM_DIGITS+1):
sudoku_board[row-1][column-1] = get_cell_solution(
sudoku_solution, row, column)
return sudoku_board
def main():
print ("Sudoku problem:")
sudoku_problem = [[0, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 6, 0, 0, 0, 0, 3],
[0, 7, 4, 0, 8, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 3, 0, 0, 2],
[0, 8, 0, 0, 4, 0, 0, 1, 0],
[6, 0, 0, 5, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 7, 8, 0],
[5, 0, 0, 0, 0, 9, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 4, 0]]
pprint(sudoku_problem)
print('\nGenerating solution:')
sudoku_solution = solve_sudoku(sudoku_problem)
pprint(sudoku_solution)
if __name__ == '__main__':
main()
| macartur/programming_ai | sudoku.py | sudoku.py | py | 6,456 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pycosat.solve",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 208,
"usage_type": "call"
}
] |
43111552824 | from typing import Any, Dict, Iterable
import pandas as pd
from fugue import DataFrame, FugueWorkflow, PandasDataFrame, out_transform, transform
from fugue.constants import FUGUE_CONF_WORKFLOW_CHECKPOINT_PATH
def test_transform():
pdf = pd.DataFrame([[1, 10], [0, 0], [1, 1], [0, 20]], columns=["a", "b"])
def f1(df: pd.DataFrame) -> pd.DataFrame:
return df.sort_values("b").head(1)
result = transform(pdf, f1, schema="*")
assert isinstance(result, pd.DataFrame)
assert result.values.tolist() == [[0, 0]]
# schema: *
def f2(df: pd.DataFrame) -> pd.DataFrame:
return df.sort_values("b").head(1)
result = transform(pdf, f2)
assert isinstance(result, pd.DataFrame)
assert result.values.tolist() == [[0, 0]]
result = transform(pdf, f2, partition=dict(by=["a"]))
assert isinstance(result, pd.DataFrame)
assert sorted(result.values.tolist(), key=lambda x: x[0]) == [[0, 0], [1, 1]]
result = transform(
pdf, f2, partition=dict(by=["a"]), force_output_fugue_dataframe=True
)
assert isinstance(result, DataFrame)
ppdf = PandasDataFrame(pdf)
assert isinstance(transform(ppdf, f2), DataFrame)
# schema: *
def f3(df: pd.DataFrame, called: callable) -> pd.DataFrame:
called()
return df
cb = Callback()
result = transform(pdf, f3, callback=cb.called)
assert 1 == cb.ct
def test_transform_from_yield(tmpdir):
# schema: *,x:int
def f(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(x=1)
dag = FugueWorkflow()
dag.df([[0]], "a:int").yield_dataframe_as("x1")
dag.df([[1]], "b:int").yield_dataframe_as("x2")
dag.run("", {FUGUE_CONF_WORKFLOW_CHECKPOINT_PATH: str(tmpdir)})
result = transform(dag.yields["x1"], f)
assert isinstance(result, DataFrame)
assert result.as_array(type_safe=True) == [[0, 1]]
result = transform(
dag.yields["x2"],
f,
engine_conf={FUGUE_CONF_WORKFLOW_CHECKPOINT_PATH: str(tmpdir)},
)
assert isinstance(result, DataFrame)
assert result.as_array(type_safe=True) == [[1, 1]]
def test_out_transform(tmpdir):
pdf = pd.DataFrame([[1, 10], [0, 0], [1, 1], [0, 20]], columns=["a", "b"])
class T:
def __init__(self):
self.n = 0
def f(self, df: Iterable[Dict[str, Any]]) -> None:
self.n += 1
t = T()
out_transform(pdf, t.f)
assert 1 == t.n
t = T()
out_transform(pdf, t.f, partition=dict(by=["a"]))
assert 2 == t.n
dag = FugueWorkflow()
dag.df(pdf).yield_dataframe_as("x1")
dag.df(pdf).yield_dataframe_as("x2")
dag.run("", {FUGUE_CONF_WORKFLOW_CHECKPOINT_PATH: str(tmpdir)})
t = T()
out_transform(dag.yields["x1"], t.f)
assert 1 == t.n
t = T()
out_transform(
dag.yields["x2"],
t.f,
partition=dict(by=["a"]),
engine_conf={FUGUE_CONF_WORKFLOW_CHECKPOINT_PATH: str(tmpdir)},
)
assert 2 == t.n
# schema: *
def f3(df: pd.DataFrame, called: callable) -> pd.DataFrame:
called()
return df
cb = Callback()
result = out_transform(pdf, f3, callback=cb.called)
assert 1 == cb.ct
class Callback:
def __init__(self):
self.ct = 0
def called(self) -> None:
self.ct += 1
| ofili/Wrangle-and-Analyze-Data | venv/Lib/site-packages/tests/fugue/test_interfaceless.py | test_interfaceless.py | py | 3,306 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.DataFrame",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "fugue.transform",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.DataFra... |
38416857272 | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import *
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPE_URL = reverse('recipe:recipe-list')
# /api/recipe/recipes
# /api/recipe/recipes/1/
def detail_url(recipe_id):
"""return recipe detail url"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main Course'):
"""Create and return sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user,**params):
"""Create and return sample recipe"""
test_recipe = {
'title': 'Mushroom Chicken',
'time_minutes': 10,
'price': 5.00
}
# update will create/update keys in dictionary
test_recipe.update(params)
return Recipe.objects.create(user=user, **test_recipe)
class PublicRecipeTests(TestCase):
"""Test publicly avaialble tags API"""
def setup(self):
self.client = APIClient()
def test_auth_required(self):
res = self.client.get(RECIPE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsTests(TestCase):
"""Test the unauthenticated recipe API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test12@test.com',
'testpass',
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_recipe(self):
"""Test retrieving a list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited(self):
"""Test that recipes are retrieved for user"""
user2 = get_user_model().objects.create_user(
'new@test.com',
'testpass'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""TEst viewing recipe detail"""
recipe=sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_recipe(self):
"""Test creating recipe"""
new_recipe = {
'title': 'Cake',
'time_minutes': 30,
'price': 15.00
}
res = self.client.post(RECIPE_URL, new_recipe)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in new_recipe.keys():
self.assertEqual(new_recipe[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Creating recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
new_recipe = {
'title': 'CheeseCake',
'tags': [tag1.id, tag2.id],
'time_minutes': 30,
'price': 15.00
}
res = self.client.post(RECIPE_URL, new_recipe)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Shrimp')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
new_recipe = {
'title': 'Prawn curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 25,
'price': 20.00
}
res = self.client.post(RECIPE_URL, new_recipe)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""test updating recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url,payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_reciple(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti',
'time_minutes': 15,
'price': 10.00
}
url = detail_url(recipe.id)
self.client.put(url,payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
| deveshp530/recipe-app-api | app/recipe/tests/test_recipe.py | test_recipe.py | py | 6,459 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.reverse",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.test.TestCase",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "rest_fra... |
25950565947 | import sys
import fileinput
import csv
import operator
import numpy as np
import scipy.spatial.distance as sd
import pickle
#python compare_google.py [google_match-norm] [avg-$count-norm] states.p statescores$count
google = []
matrix = []
infile = sys.argv[2]
google_reader = csv.reader(open(sys.argv[1], 'rb'), delimiter=',')
matrix_reader = csv.reader(open(infile, 'rb'), delimiter=',')
states = pickle.load(open(sys.argv[3], 'rb'))
results = open(sys.argv[4], 'wb')
weeks = 52
def compute_score(m, g):
sum = 0
for i in range(m.shape[1]):
sum += sd.euclidean(m[:,i],g[:,i])
score = sum/float(m.shape[1])
return score
#store google data
for row in google_reader:
row = [(float(x) if x else 0) for x in row]
google.append(np.array(row))
google = np.array(google)
for row in matrix_reader:
row = [(float(x) if x else 0) for x in row]
matrix.append(np.array(row))
matrix = np.array(matrix)
scores = {}
for i in range(google.shape[1]):
scores[str(i) + ' ' + states[i]] = sd.euclidean(matrix[:,i],google[:,i])
sorted_scores = sorted(scores.iteritems(), key=operator.itemgetter(1))
for name,score in sorted_scores:
results.write(name+':'+str(score)+'\n')
results.close()
| kris-samala/LBSN | data_analysis/compare_states.py | compare_states.py | py | 1,233 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number"... |
72580436348 | from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from reconocer_form import analyze_general_documents
from base_datos import crear_registro
import asyncio
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials = True,
allow_methods = ["*"],
allow_headers = ["*"]
)
class Registro(BaseModel):
url: str
@app.get("/")
def verRegistros():
return "registros"
@app.post("/post")
async def crearRegistro(regitsro: Registro):
repuesta = await analyze_general_documents(regitsro.url)
registro_guardado = await crear_registro(regitsro.url,repuesta)
if registro_guardado:
return repuesta
return "no se pudo guardar"
@app.put("/post")
def crearRegistro():
return "hola"
@app.delete("/delete")
def crearRegistro():
return "hola" | jefryne/web_placas | ia/detectar documento/api.py | api.py | py | 898 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "fastapi.middleware.cors.CORSMiddleware",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 19,
"usage_type": "name"
},
{
"api_... |
11816060482 | import numpy as np
import torch
with open("pdtSEIKA.csv", "r") as f:
f_reader = np.loadtxt(f, delimiter=',', dtype=np.float32)
predict = f_reader
f.close()
tensor = torch.from_numpy(np.load(r"C:\Users\cchen\PycharmProjects\LearnPyTorch/K05_excluded_xyz.npy")) # 101778,
# 15,10,10
with open("ai__K05_SEIKA2.csv", "w") as f:
for item, result in zip(tensor, predict):
pos = (item[0, 0, 0], item[5, 0, 0], item[10, 0, 0])
row = "{},{},{},{},{},{},{},{}\n".format(*pos, *result)
f.write(row)
f.close()
| cchenyixuan/Banira | utils/predict_map.py | predict_map.py | py | 545 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.loadtxt",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line... |
16813784584 | import os
import copy
from typing import Dict
import numpy as np
import torch
from collections import defaultdict
from hsp.algorithms.population.policy_pool import PolicyPool
from hsp.runner.shared.base_runner import make_trainer_policy_cls
from hsp.utils.shared_buffer import SharedReplayBuffer
from hsp.algorithms.population.utils import _t2n
class TrainerPool:
"""TrainerPool maintains a pool of trainers, each trainer corresponding to one policy, both have the same name.
For policies that are not trained, use null trainer.
By specifying mapping from (env_id, agent_id) to trainer_name, TrainerPool creates buffer for each policy.
"""
def __init__(self, args, policy_pool: PolicyPool, device = torch.device("cpu")):
self.all_args = args
self.device = device
self.policy_pool = policy_pool
self.trainer_pool = {}
self.trainer_total_num_steps = defaultdict(int)
self.use_policy_in_env = dict(args._get_kwargs()).get('use_policy_in_env', False)
self.__loaded_population = False
self.__initialized = False
def policy_config(self, trainer_name):
return self.policy_pool.policy_config[trainer_name]
def policy_type(self, trainer_name):
if trainer_name.startswith("ppo") and trainer_name[-1] in "123":
return eval(trainer_name[-1])
elif trainer_name.startswith("policy"):
# preference policy
return 4
else:
raise RuntimeError(f"Cannot recognize policy type for {trainer_name}.")
def policy_id(self, trainer_name):
return int(self.policy_pool.policy_info[trainer_name][1]["id"] * self.policy_pool.num_policies - 1)
def init_population(self):
self.on_training = []
self.best_r = defaultdict(float)
for policy_name, policy, policy_config, policy_train in self.policy_pool.all_policies():
# use the same name for trainer and policy
trainer_name = policy_name
trainer_cls, _ = make_trainer_policy_cls(policy_config[0].algorithm_name, use_single_network=policy_config[0].use_single_network)
trainer = trainer_cls(policy_config[0], policy, device = self.device)
self.trainer_pool[trainer_name] = trainer
self.best_r[trainer_name] = -1e9
if policy_train:
self.on_training.append(trainer_name)
# trans policies in policy pool to EvalPolicy
self.policy_pool.trans_to_eval()
# train info would update when a trainer performs training
self.train_infos = {}
self.train_infos.update({f"{trainer_name}-total_num_steps":0 for trainer_name in self.trainer_pool.keys()})
self.__loaded_population = True
def reset(self, map_ea2t, n_rollout_threads, num_agents, load_unused_to_cpu=False, **kwargs):
assert self.__loaded_population
self.map_ea2t = map_ea2t
self.n_rollout_threads = n_rollout_threads
self.num_agents = num_agents
self.control_agent_count = defaultdict(int)
self.control_agents = defaultdict(list)
for (e, a), trainer_name in self.map_ea2t.items():
self.control_agent_count[trainer_name] += 1
self.control_agents[trainer_name].append((e, a))
self.active_trainers = []
self.buffer_pool: Dict[str, SharedReplayBuffer] = {}
for trainer_name in self.trainer_pool.keys():
# set n_rollout_threads as control_agent_count[trainer_name] and num_agents as 1
if self.control_agent_count[trainer_name] > 0:
policy_args, obs_space, share_obs_space, act_space = self.policy_config(trainer_name)
self.buffer_pool[trainer_name] = SharedReplayBuffer(
policy_args, 1, obs_space, share_obs_space, act_space,
n_rollout_threads=self.control_agent_count[trainer_name])
self.trainer_pool[trainer_name].to(self.device)
self.active_trainers.append(trainer_name)
else:
if load_unused_to_cpu:
self.trainer_pool[trainer_name].to(torch.device("cpu"))
else:
self.trainer_pool[trainer_name].to(self.device)
self.buffer_pool[trainer_name] = None
#print("active trainers:", self.active_trainers)
self.__initialized = True
def extract_elements(self, trainer_name, x):
return np.stack([x[e][a] for e, a in self.control_agents[trainer_name]])
def skip(self, trainer_name):
# produce actions in parallel envs, skip this trainer
return (self.use_policy_in_env and trainer_name not in self.on_training) or (trainer_name.startswith("script:"))
def init_first_step(self, share_obs:np.ndarray, obs:np.ndarray):
assert self.__initialized
for trainer_name in self.active_trainers:
# extract corresponding (e, a) and add num_agent=1 dimension
obs_lst = np.expand_dims(self.extract_elements(trainer_name, obs), axis=1)
share_obs_lst = np.expand_dims(self.extract_elements(trainer_name, share_obs), axis=1)
self.buffer_pool[trainer_name].share_obs[0] = share_obs_lst.copy()
self.buffer_pool[trainer_name].obs[0] = obs_lst.copy()
self._step = 0
def reward_shaping_steps(self):
"""This should differ among algorithms and should be overrided by subclasses.
"""
reward_shaping_steps = []
for e in range(self.n_rollout_threads):
train_tot_num_steps = [self.trainer_total_num_steps[self.map_ea2t[(e, a)]] * int(self.map_ea2t[(e, a)] in self.on_training) for a in range(self.num_agents)]
reward_shaping_steps.append(max(train_tot_num_steps))
return reward_shaping_steps
@torch.no_grad()
def step(self, step):
assert self.__initialized
actions = np.full((self.n_rollout_threads, self.num_agents), fill_value=None).tolist()
self.step_data = dict()
for trainer_name in self.active_trainers:
self.trainer_total_num_steps[trainer_name] += self.control_agent_count[trainer_name]
self.train_infos[f"{trainer_name}-total_num_steps"] = self.trainer_total_num_steps[trainer_name]
if self.skip(trainer_name):
continue
trainer = self.trainer_pool[trainer_name]
buffer = self.buffer_pool[trainer_name]
trainer.prep_rollout()
value, action, action_log_prob, rnn_states, rnn_states_critic \
= trainer.policy.get_actions(np.concatenate(buffer.share_obs[step]),
np.concatenate(buffer.obs[step]),
np.concatenate(buffer.rnn_states[step]),
np.concatenate(buffer.rnn_states_critic[step]),
np.concatenate(buffer.masks[step]))
value = np.expand_dims(np.array(_t2n(value)), axis=1)
action = np.expand_dims(np.array(_t2n(action)), axis=1)
action_log_prob = np.expand_dims(np.array(_t2n(action_log_prob)), axis=1)
rnn_states = np.expand_dims(np.array(_t2n(rnn_states)), axis=1)
rnn_states_critic = np.expand_dims(np.array(_t2n(rnn_states_critic)), axis=1)
self.step_data[trainer_name] = value, action, action_log_prob, rnn_states, rnn_states_critic
for i, (e, a) in enumerate(self.control_agents[trainer_name]):
actions[e][a] = action[i][0]
return actions
def insert_data(self, share_obs, obs, rewards, dones, active_masks=None, bad_masks=None, infos=None):
"""
ndarrays of shape (n_rollout_threads, num_agents, *)
"""
assert self.__initialized
self._step += 1
for trainer_name in self.active_trainers:
if self.skip(trainer_name):
continue
trainer = self.trainer_pool[trainer_name]
buffer = self.buffer_pool[trainer_name]
value, action, action_log_prob, rnn_states, rnn_states_critic = self.step_data[trainer_name]
# (control_agent_count[trainer_name], 1, *)
obs_lst = np.expand_dims(self.extract_elements(trainer_name, obs), axis=1)
share_obs_lst = np.expand_dims(self.extract_elements(trainer_name, share_obs), axis=1)
rewards_lst = np.expand_dims(self.extract_elements(trainer_name, rewards), axis=1)
dones_lst = np.expand_dims(self.extract_elements(trainer_name, dones), axis=1)
rnn_states[dones_lst == True] = np.zeros(((dones_lst == True).sum(), buffer.recurrent_N, buffer.hidden_size), dtype=np.float32)
rnn_states_critic[dones_lst == True] = np.zeros(((dones_lst == True).sum(), *buffer.rnn_states_critic.shape[3:]), dtype=np.float32)
masks = np.ones((self.control_agent_count[trainer_name], 1, 1), dtype=np.float32)
masks[dones_lst == True] = np.zeros(((dones_lst == True).sum(), 1), dtype=np.float32)
bad_masks_lst = active_masks_lst = None
if bad_masks is not None:
bad_masks_lst = np.expand_dims(self.extract_elements(trainer_name, bad_masks), axis=1)
if active_masks is not None:
active_masks_lst = np.expand_dims(self.extract_elements(trainer_name, active_masks), axis=1)
if self.all_args.use_task_v_out:
value = value[:, :, self.policy_id(trainer_name)][:, :, np.newaxis]
buffer.insert(share_obs_lst, obs_lst, rnn_states, rnn_states_critic, action, action_log_prob, value, rewards_lst, masks, active_masks=active_masks_lst, bad_masks=bad_masks_lst)
if infos is not None:
if self.all_args.env_name == "Overcooked" and self.all_args.predict_other_shaped_info:
if not hasattr(buffer, "other_shaped_info"):
buffer.other_shaped_info = np.zeros((buffer.episode_length + 1, buffer.n_rollout_threads, 1, 12), dtype=np.int32)
for i, (e, a) in enumerate(self.control_agents[trainer_name]):
buffer.other_shaped_info[self._step, i, 0] = infos[e]["vec_shaped_info_by_agent"][1-a] # insert other agent's shaped info
# partner policy info
if self.all_args.env_name == "Overcooked":
if self.all_args.policy_group_normalization and not hasattr(buffer, "other_policy_type"):
buffer.other_policy_type = np.zeros((buffer.episode_length + 1, buffer.n_rollout_threads, 1, 1), dtype=np.int32)
for i, (e, a) in enumerate(self.control_agents[trainer_name]):
buffer.other_policy_type[:, i, :, :] = self.policy_type(self.map_ea2t[(e, 1-a)])
if not hasattr(buffer, "other_policy_id"):
buffer.other_policy_id = np.zeros((buffer.episode_length + 1, buffer.n_rollout_threads, 1, 1), dtype=np.int32)
for i, (e, a) in enumerate(self.control_agents[trainer_name]):
buffer.other_policy_id[:, i, :, :] = self.policy_id(self.map_ea2t[(e, 1-a)])
self.step_data = None
def compute_advantages(self):
all_adv = defaultdict(list)
for trainer_name in self.active_trainers:
trainer = self.trainer_pool[trainer_name]
buffer = self.buffer_pool[trainer_name]
if trainer_name in self.on_training:
advantages = trainer.compute_advantages(buffer)
for i, (e, a) in enumerate(self.control_agents[trainer_name]):
all_adv[(self.map_ea2t[(e, 0)], self.map_ea2t[(e, 1)], a)].append(advantages[:, i].mean())
return all_adv
def train(self, **kwargs):
assert self.__initialized
for trainer_name in self.active_trainers:
trainer = self.trainer_pool[trainer_name]
buffer = self.buffer_pool[trainer_name]
if trainer_name in self.on_training:
trainer.prep_rollout()
# compute returns
next_values = trainer.policy.get_values(np.concatenate(buffer.share_obs[-1]),
np.concatenate(buffer.rnn_states_critic[-1]),
np.concatenate(buffer.masks[-1]))
next_values = np.expand_dims(np.array(_t2n(next_values)), axis=1)
if self.all_args.use_task_v_out:
next_values = next_values[:, :, self.policy_id(trainer_name)][:, :, np.newaxis]
buffer.compute_returns(next_values, trainer.value_normalizer)
# train
trainer.prep_training()
train_info = trainer.train(buffer, turn_on=(self.trainer_total_num_steps[trainer_name] >= self.all_args.critic_warmup_horizon))
self.train_infos.update({f"{trainer_name}-{k}": v for k, v in train_info.items()})
self.train_infos.update({f"{trainer_name}-average_episode_rewards": np.mean(buffer.rewards) * buffer.episode_length})
# place first step observation of next episode
buffer.after_update()
return copy.deepcopy(self.train_infos)
def lr_decay(self, episode, episodes):
for trainer_name in self.on_training:
self.trainer_pool[trainer_name].policy.lr_decay(episode, episodes)
def update_best_r(self, d, save_dir=None):
for trainer_name, r in d.items():
trainer = self.trainer_pool[trainer_name]
if r > self.best_r[trainer_name]:
self.best_r[trainer_name] = r
if trainer_name in self.on_training and save_dir is not None:
if not os.path.exists(str(save_dir) + "/{}".format(trainer_name)):
os.makedirs(str(save_dir) + "/{}".format(trainer_name))
#print("save", str(save_dir) + "/{}".format(trainer_name), f"best_r")
if self.policy_config(trainer_name)[0].use_single_network:
policy_model = trainer.policy.model
torch.save(policy_model.state_dict(), str(save_dir) + "/{}/model_best_r.pt".format(trainer_name))
else:
policy_actor = trainer.policy.actor
torch.save(policy_actor.state_dict(), str(save_dir) + "/{}/actor_best_r.pt".format(trainer_name))
policy_critic = trainer.policy.critic
torch.save(policy_critic.state_dict(), str(save_dir) + "/{}/critic_best_r.pt".format(trainer_name))
def save(self, step, save_dir):
for trainer_name in self.on_training:
trainer = self.trainer_pool[trainer_name]
if not os.path.exists(str(save_dir) + "/{}".format(trainer_name)):
os.makedirs(str(save_dir) + "/{}".format(trainer_name))
trainer_step = self.trainer_total_num_steps[trainer_name]
#print("save", str(save_dir) + "/{}".format(trainer_name), f"periodic_{trainer_step}")
if self.policy_config(trainer_name)[0].use_single_network:
policy_model = trainer.policy.model
torch.save(policy_model.state_dict(), str(save_dir) + "/{}/model_periodic_{}.pt".format(trainer_name, trainer_step))
else:
policy_actor = trainer.policy.actor
torch.save(policy_actor.state_dict(), str(save_dir) + "/{}/actor_periodic_{}.pt".format(trainer_name, trainer_step))
policy_critic = trainer.policy.critic
torch.save(policy_critic.state_dict(), str(save_dir) + "/{}/critic_periodic_{}.pt".format(trainer_name, trainer_step))
| samjia2000/HSP | hsp/algorithms/population/trainer_pool.py | trainer_pool.py | py | 15,925 | python | en | code | 15 | github-code | 6 | [
{
"api_name": "hsp.algorithms.population.policy_pool.PolicyPool",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 22,
"usage_type": "call"
},
{
... |
18680639270 | import collections
import torchvision.transforms as transforms
import os
import json
try:
from IPython import embed
except:
pass
_DATASETS = {}
Dataset = collections.namedtuple(
'Dataset', ['trainset', 'testset'])
def _add_dataset(dataset_fn):
_DATASETS[dataset_fn.__name__] = dataset_fn
return dataset_fn
def _get_transforms(augment=True, normalize=None):
if normalize is None:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
basic_transform = [transforms.ToTensor(), normalize]
transform_train = []
if augment:
transform_train += [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
]
else:
transform_train += [
transforms.Resize(256),
transforms.CenterCrop(224),
]
transform_train += basic_transform
transform_train = transforms.Compose(transform_train)
transform_test = [
transforms.Resize(256),
transforms.CenterCrop(224),
]
transform_test += basic_transform
transform_test = transforms.Compose(transform_test)
return transform_train, transform_test
def _get_mnist_transforms(augment=True, invert=False, transpose=False):
transform = [
transforms.ToTensor(),
]
if invert:
transform += [transforms.Lambda(lambda x: 1. - x)]
if transpose:
transform += [transforms.Lambda(lambda x: x.transpose(2, 1))]
transform += [
transforms.Normalize((.5,), (.5,)),
transforms.Lambda(lambda x: x.expand(3, 32, 32))
]
transform_train = []
transform_train += [transforms.Pad(padding=2)]
if augment:
transform_train += [transforms.RandomCrop(32, padding=4)]
transform_train += transform
transform_train = transforms.Compose(transform_train)
transform_test = []
transform_test += [transforms.Pad(padding=2)]
transform_test += transform
transform_test = transforms.Compose(transform_test)
return transform_train, transform_test
def _get_cifar_transforms(augment=True):
transform = [
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
transform_train = []
if augment:
transform_train += [
transforms.Pad(padding=4, fill=(125, 123, 113)),
transforms.RandomCrop(32, padding=0),
transforms.RandomHorizontalFlip()]
transform_train += transform
transform_train = transforms.Compose(transform_train)
transform_test = []
transform_test += transform
transform_test = transforms.Compose(transform_test)
return transform_train, transform_test
def set_metadata(trainset, testset, config, dataset_name):
trainset.metadata = {
'dataset': dataset_name,
'task_id': config.task_id,
'task_name': trainset.task_name,
}
testset.metadata = {
'dataset': dataset_name,
'task_id': config.task_id,
'task_name': testset.task_name,
}
return trainset, testset
@_add_dataset
def inat2018(root, config):
from dataset.inat import iNat2018Dataset
transform_train, transform_test = _get_transforms()
trainset = iNat2018Dataset(root, split='train', transform=transform_train, task_id=config.task_id)
testset = iNat2018Dataset(root, split='val', transform=transform_test, task_id=config.task_id)
trainset, testset = set_metadata(trainset, testset, config, 'inat2018')
return trainset, testset
def load_tasks_map(tasks_map_file):
assert os.path.exists(tasks_map_file), tasks_map_file
with open(tasks_map_file, 'r') as f:
tasks_map = json.load(f)
tasks_map = {int(k): int(v) for k, v in tasks_map.items()}
return tasks_map
@_add_dataset
def cub_inat2018(root, config):
"""This meta-task is the concatenation of CUB-200 (first 25 tasks) and iNat (last 207 tasks).
- The first 10 tasks are classification of the animal species inside one of 10 orders of birds in CUB-200
(considering all orders except passeriformes).
- The next 15 tasks are classification of species inside the 15 families of the order of passerifomes
- The remaining 207 tasks are classification of the species inside each of 207 families in iNat
As noted above, for CUB-200 10 taks are classification of species inside an order, rather than inside of a family
as done in the iNat (recall order > family > species). This is done because CUB-200 has very few images
in each family of bird (expect for the families of passeriformes). Hence, we go up step in the taxonomy and
consider classification inside a orders and not families.
"""
NUM_CUB = 25
NUM_CUB_ORDERS = 10
NUM_INAT = 207
assert 0 <= config.task_id < NUM_CUB + NUM_INAT
transform_train, transform_test = _get_transforms()
if 0 <= config.task_id < NUM_CUB:
# CUB
from dataset.cub import CUBTasks, CUBDataset
tasks_map_file = os.path.join(root, 'cub/CUB_200_2011', 'final_tasks_map.json')
tasks_map = load_tasks_map(tasks_map_file)
task_id = tasks_map[config.task_id]
if config.task_id < NUM_CUB_ORDERS:
# CUB orders
train_tasks = CUBTasks(CUBDataset(root, split='train'))
trainset = train_tasks.generate(task_id=task_id,
use_species_names=True,
transform=transform_train)
test_tasks = CUBTasks(CUBDataset(root, split='test'))
testset = test_tasks.generate(task_id=task_id,
use_species_names=True,
transform=transform_test)
else:
# CUB passeriformes families
train_tasks = CUBTasks(CUBDataset(root, split='train'))
trainset = train_tasks.generate(task_id=task_id,
task='family',
taxonomy_file='passeriformes.txt',
use_species_names=True,
transform=transform_train)
test_tasks = CUBTasks(CUBDataset(root, split='test'))
testset = test_tasks.generate(task_id=task_id,
task='family',
taxonomy_file='passeriformes.txt',
use_species_names=True,
transform=transform_test)
else:
# iNat2018
from dataset.inat import iNat2018Dataset
tasks_map_file = os.path.join(root, 'inat2018', 'final_tasks_map.json')
tasks_map = load_tasks_map(tasks_map_file)
task_id = tasks_map[config.task_id - NUM_CUB]
trainset = iNat2018Dataset(root, split='train', transform=transform_train, task_id=task_id)
testset = iNat2018Dataset(root, split='val', transform=transform_test, task_id=task_id)
trainset, testset = set_metadata(trainset, testset, config, 'cub_inat2018')
return trainset, testset
@_add_dataset
def imat2018fashion(root, config):
NUM_IMAT = 228
assert 0 <= config.task_id < NUM_IMAT
from dataset.imat import iMat2018FashionDataset, iMat2018FashionTasks
transform_train, transform_test = _get_transforms()
train_tasks = iMat2018FashionTasks(iMat2018FashionDataset(root, split='train'))
trainset = train_tasks.generate(task_id=config.task_id,
transform=transform_train)
test_tasks = iMat2018FashionTasks(iMat2018FashionDataset(root, split='validation'))
testset = test_tasks.generate(task_id=config.task_id,
transform=transform_test)
trainset, testset = set_metadata(trainset, testset, config, 'imat2018fashion')
return trainset, testset
@_add_dataset
def split_mnist(root, config):
assert isinstance(config.task_id, tuple)
from dataset.mnist import MNISTDataset, SplitMNISTTask
transform_train, transform_test = _get_mnist_transforms()
train_tasks = SplitMNISTTask(MNISTDataset(root, train=True))
trainset = train_tasks.generate(classes=config.task_id, transform=transform_train)
test_tasks = SplitMNISTTask(MNISTDataset(root, train=False))
testset = test_tasks.generate(classes=config.task_id, transform=transform_test)
trainset, testset = set_metadata(trainset, testset, config, 'split_mnist')
return trainset, testset
@_add_dataset
def split_cifar(root, config):
assert 0 <= config.task_id < 11
from dataset.cifar import CIFAR10Dataset, CIFAR100Dataset, SplitCIFARTask
transform_train, transform_test = _get_cifar_transforms()
train_tasks = SplitCIFARTask(CIFAR10Dataset(root, train=True), CIFAR100Dataset(root, train=True))
trainset = train_tasks.generate(task_id=config.task_id, transform=transform_train)
test_tasks = SplitCIFARTask(CIFAR10Dataset(root, train=False), CIFAR100Dataset(root, train=False))
testset = test_tasks.generate(task_id=config.task_id, transform=transform_test)
trainset, testset = set_metadata(trainset, testset, config, 'split_cifar')
return trainset, testset
@_add_dataset
def cifar10_mnist(root, config):
from dataset.cifar import CIFAR10Dataset
from dataset.mnist import MNISTDataset
from dataset.expansion import UnionClassificationTaskExpander
transform_train, transform_test = _get_cifar_transforms()
trainset = UnionClassificationTaskExpander(merge_duplicate_images=False)(
[CIFAR10Dataset(root, train=True), MNISTDataset(root, train=True, expand=True)], transform=transform_train)
testset = UnionClassificationTaskExpander(merge_duplicate_images=False)(
[CIFAR10Dataset(root, train=False), MNISTDataset(root, train=False, expand=True)], transform=transform_test)
return trainset, testset
@_add_dataset
def cifar10(root):
from torchvision.datasets import CIFAR10
transform = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
trainset = CIFAR10(root, train=True, transform=transform, download=True)
testset = CIFAR10(root, train=False, transform=transform)
return trainset, testset
@_add_dataset
def cifar100(root):
from torchvision.datasets import CIFAR100
transform = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
trainset = CIFAR100(root, train=True, transform=transform, download=True)
testset = CIFAR100(root, train=False, transform=transform)
return trainset, testset
@_add_dataset
def mnist(root):
from torchvision.datasets import MNIST
transform = transforms.Compose([
lambda x: x.convert("RGB"),
transforms.Resize(224),
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (1., 1., 1.)),
])
trainset = MNIST(root, train=True, transform=transform, download=True)
testset = MNIST(root, train=False, transform=transform)
return trainset, testset
@_add_dataset
def letters(root):
from torchvision.datasets import EMNIST
transform = transforms.Compose([
lambda x: x.convert("RGB"),
transforms.Resize(224),
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (1., 1., 1.)),
])
trainset = EMNIST(root, train=True, split='letters', transform=transform, download=True)
testset = EMNIST(root, train=False, split='letters', transform=transform)
return trainset, testset
@_add_dataset
def kmnist(root):
from torchvision.datasets import KMNIST
transform = transforms.Compose([
lambda x: x.convert("RGB"),
transforms.Resize(224),
transforms.ToTensor(),
])
trainset = KMNIST(root, train=True, transform=transform, download=True)
testset = KMNIST(root, train=False, transform=transform)
return trainset, testset
@_add_dataset
def stl10(root):
from torchvision.datasets import STL10
transform = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
trainset = STL10(root, split='train', transform=transform, download=True)
testset = STL10(root, split='test', transform=transform)
trainset.targets = trainset.labels
testset.targets = testset.labels
return trainset, testset
def get_dataset(root, config=None):
return _DATASETS[config.name](os.path.expanduser(root), config)
| awslabs/aws-cv-task2vec | datasets.py | datasets.py | py | 12,841 | python | en | code | 96 | github-code | 6 | [
{
"api_name": "collections.namedtuple",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 25,
"usage_type": "name"
},
{
"ap... |
14052300522 | # vim set fileencoding=utf-8
from setuptools import setup
with open('README.rst') as f:
long_description = f.read()
setup(
name = 'AnthraxEplasty',
version = '0.0.3',
author = 'Szymon Pyżalski',
author_email = 'zefciu <szymon@pythonista.net>',
description = 'Anthrax - generating forms from Elephantoplasty objects',
url = 'http://github.com/zefciu/Anthrax',
keywords = 'form web orm database',
long_description = long_description,
install_requires = ['anthrax', 'Elephantoplasty'],
tests_require = ['nose>=1.0', 'nose-cov>=1.0'],
test_suite = 'nose.collector',
package_dir = {'': 'src'},
namespace_packages = ['anthrax'],
packages = [
'anthrax', 'anthrax.eplasty'
],
classifiers = [
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
entry_points = """[anthrax.reflector]
eplasty = anthrax.eplasty.reflector:EplastyReflector
[anthrax.field_mixins]
eplasty_unique = anthrax.eplasty.field:UniqueMixin
""",
)
| zefciu/anthrax-eplasty | setup.py | setup.py | py | 1,242 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
}
] |
71737201468 | from typing import List
from project.appliances.appliance import Appliance
from project.people.child import Child
class Room:
def __init__(self, name: str, budget: float, members_count: int):
self.family_name = name
self.budget = budget
self.members_count = members_count
self.children: List[Child] = []
self.expenses = 0
@property
def expenses(self):
return self.__expenses
@expenses.setter
def expenses(self, value):
if value < 0:
raise ValueError("Expenses cannot be negative")
self.__expenses = value
def calculate_expenses(self, *args):
total_expenses = 0
for list_obj in args:
for obj in list_obj:
if isinstance(obj, Appliance):
total_expenses += obj.get_monthly_expense()
else:
total_expenses += obj.cost * 30
self.expenses = total_expenses
def room_info(self):
result_str = [
f"{self.family_name} with {self.members_count} members."
f" Budget: {self.budget:.2f}$, Expenses: {self.expenses:.2f}$"]
for idx, child in enumerate(self.children):
result_str.append(f"--- Child {idx + 1} monthly cost: {(child.cost * 30):.2f}$")
if hasattr(self, 'appliances'):
appliances_monthly_cost = sum([a.get_monthly_expense() for a in self.appliances])
result_str.append(f"--- Appliances monthly cost: {appliances_monthly_cost:.2f}$")
return '\n'.join(result_str)
| tonytech83/Python-OOP | OOP_Exams/11_OOP_22_Aug_2020/hotel_everland/project/rooms/room.py | room.py | py | 1,573 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "project.people.child.Child",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "project.appliances.appliance.Appliance",
"line_number": 32,
"usage_type": "argument"
}
] |
38907524035 | """Create category
Revision ID: bc8fb2b5aaaa
Revises: cf3388347129
Create Date: 2023-05-06 09:44:36.431462
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bc8fb2b5aaaa'
down_revision = 'cf3388347129'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.Column('icon', sa.String(length=50), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('create_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
sa.Column('update_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index(op.f('ix_category_id'), 'category', ['id'], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_category_id'), table_name='category')
op.drop_table('category')
# ### end Alembic commands ###
| rasimatics/excursio-backend | migrations/versions/bc8fb2b5aaaa_create_category.py | bc8fb2b5aaaa_create_category.py | py | 1,266 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
32989650417 | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SelectField
from wtforms.validators import InputRequired, Optional
sizes = ['XS','S', 'M', 'L', 'XL']
ratings = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5,
6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0]
class AddCupcakeForm(FlaskForm):
"""Form for adding cupcakes"""
flavor = StringField("Flavor", validators=[InputRequired(message="Flavor cannot be blank")])
size = SelectField("Size", choices=[(size, size) for size in sizes], validators=[InputRequired(message="Size cannot be blank")])
rating = SelectField("Rating", coerce=float, choices=[(rating, rating) for rating in ratings], validators=[InputRequired(message="Rating cannot be blank")])
image = StringField("Image Link", validators=[Optional()])
| BradButler96/flask-cupcakes | forms.py | forms.py | py | 825 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask_wtf.FlaskForm",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "wtforms.StringField",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.InputRequired",
"line_number": 12,
"usage_type": "call"
},
{
"api_name"... |
71375478588 | # Import the utils
import math
import sys
import os
sys.path.append(os.path.abspath('../QUtils'))
from qutils import pprint, graph
# Import the QISKit SDK
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import execute
# Create a Quantum Register with 2 qubits.
q = QuantumRegister(3)
# Create a Classical Register with 2 bits.
c = ClassicalRegister(3)
# Create a Quantum Circuit
qc = QuantumCircuit(q, c)
# move the qubits into a superpostion such that when they have an H gate and a Measure
# applied they are equally likely to collapse to 0 or 1
qc.h(q)
qc.s(q)
pi = math.pi
# add the gates to get the normal distribution
"""
# makes 111 less likely
qc.crz(-0.3*pi, q[2], q[1])
qc.crz(-0.3*pi, q[1], q[0])
# some how encourages 100
qc.x(q[2])
qc.crz(-0.4*pi, q[0], q[1])
qc.crz(-0.4*pi, q[1], q[2])
qc.crz(-0.4*pi, q[0], q[2])
qc.x(q[2])
"""
# THE 2 QUBIT CIRCUIT
# I don't get why but this bumps up the ends and drops the middle
# aka the exact opposite of what we want
#qc.crz(-0.5*pi, q[1], q[0])
#qc.crz(-0.5*pi, q[0], q[1])
# this then flips it the right way up
#qc.rz(pi, q[0])
# ATTEMPT AT 3 QUBIT CIRCUIT - kinda works?
# ups 011 and 100, but also 000 and 111
qc.crz(-0.5*pi, q[1], q[0])
qc.crz(-0.5*pi, q[0], q[1])
qc.crz(-0.5*pi, q[2], q[1])
qc.rz(pi, q[2])
qc.h(q)
# Add a Measure gate to see the state.
qc.measure(q, c)
# Compile and run the Quantum circuit on a simulator backend
job_sim = execute(qc, "local_qasm_simulator", shots=7000)
sim_result = job_sim.result()
# Show the results
print("simulation: ", sim_result)
# Returns a dict
pprint(sim_result.get_counts(qc))
graph(sim_result.get_counts(qc)) | maddy-tod/quantum | Code/MonteCarlo/NormalTest.py | NormalTest.py | py | 1,675 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
27054375172 | import numpy as np
import pandas as pd
import config
import sys
import tensorflow as tf
from keras import Sequential
from keras.layers import Dense
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from service.Preprocess import Preprocess
from service.FeatureEngineer import FeatureEngineer
class TaxiFaresPredictionNYC:
def __init__(self):
self.df = None
self.x = None
self.y = None
self.x_train = None
self.y_test = None
self.x_test = None
self.y_train = None
self.df_prescaled = None
self.f_engineer = None
self.model = Sequential()
self.preprocessObj = Preprocess()
def feature_engineer(self):
self.f_engineer = FeatureEngineer(self.df)
self.f_engineer.create_date_columns()
self.f_engineer.create_dist_column()
self.f_engineer.create_airport_dist_features()
def preprocess(self):
self.preprocessObj.remove_missing_values()
self.preprocessObj.remove_fare_amount_outliers()
self.preprocessObj.replace_passenger_count_outliers()
self.preprocessObj.remove_lat_long_outliers()
self.df = self.preprocessObj.get_dataset()
self.feature_engineer()
self.df = self.f_engineer.get_dataset()
self.df_prescaled = self.df.copy()
self.df = self.preprocessObj.scale()
self.x = self.df.loc[:, self.df.columns != 'fare_amount']
self.y = self.df.fare_amount
self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(self.x, self.y, test_size=0.2)
def create(self):
self.model.add(Dense(128, activation='relu', input_dim=self.x_train.shape[1]))
self.model.add(Dense(64, activation='relu'))
self.model.add(Dense(32, activation='relu'))
self.model.add(Dense(8, activation='relu'))
self.model.add(Dense(1))
def compile(self):
self.model.compile(loss='mse', optimizer='adam', metrics=['mse'])
self.model.fit(self.x_train, self.y_train, epochs=1)
def predict(self):
train_predict = self.model.predict(self.x_train)
train_mrs_error = np.sqrt(mean_squared_error(self.y_train, train_predict))
print("Train RMSE: {:0.2f}".format(train_mrs_error))
test_predict = self.model.predict(self.x_test)
test_mrs_error = np.sqrt(mean_squared_error(self.y_test, test_predict))
print("Test RMSE: {:0.2f}".format(test_mrs_error))
def get_dataset(self):
return self.df
| kalinVn/new_york_city_taxi_fare_predicton | service/TaxiFaresPredictionNYC.py | TaxiFaresPredictionNYC.py | py | 2,573 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "keras.Sequential",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "service.Preprocess.Preprocess",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "service.FeatureEngineer.FeatureEngineer",
"line_number": 32,
"usage_type": "call"
},
{... |
38048663522 | from constants import API_STACK_BASE_URL
import requests
import sys
class IpStack:
def __init__(self, api_token: str, base_url: str):
if base_url is None or base_url == '':
base_url = API_STACK_BASE_URL
self.api_token = api_token
self.base_url = base_url
def get_ip_location(self, ip_address: str) -> tuple[str,str]:
endpoint = f"{self.base_url}/{ip_address}?access_key={self.api_token}"
try:
response = requests.get(endpoint, timeout=60)
json_response = response.json()
except requests.exceptions.RequestException as error:
print(f"Error: The request could not be resolved")
print(f"Provided base url: {self.base_url}")
if 'doc' in error.__dict__:
print(error.__dict__['doc'])
sys.exit(1)
if 'error' in json_response:
error_code = json_response['error']['code']
error_mesage = json_response['error']['info']
print(f"Error {error_code}: {error_mesage}")
sys.exit(1)
latitude = json_response['latitude']
longitude = json_response['longitude']
if latitude == 0 and longitude == 0:
print("Location not found")
sys.exit(1)
return latitude, longitude
| AlejandroGC-SS/ip_stack_interface | ip_stack.py | ip_stack.py | py | 1,318 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "constants.API_STACK_BASE_URL",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sys... |
72609945147 | import requests
from parsel import Selector
url='http://www.porters.vip/verify/uas/index.html'
# headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}
headers = {'User-Agent': 'PostmanRuntime/7.26.2',
'Host': 'www.porters.vip',
'Accept':'*/*',
'Connection':'keep-alive',
'Accept-Endcoding':'gzip,deflate,br'}
r=requests.get(url,headers=headers)
sel = Selector(r.text)
print(r.status_code)
print(sel.css('.list-group-item::text').extract())
| 0xdeciverAngel/anti-web-crawler | user agent.py | user agent.py | py | 569 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "parsel.Selector",
"line_number": 14,
"usage_type": "call"
}
] |
8784244942 | import os
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
from django.urls import reverse, reverse_lazy
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic.list import ListView
from Venter import upload_to_google_drive
from Venter.models import Category, Profile
from Venter.forms import CSVForm, ProfileForm, UserForm
from .manipulate_csv import EditCsv
def upload_csv_file(request):
"""
View logic for uploading CSV file by a logged in user.
For POST request-------
1) The POST data, uploaded csv file and a request parameter are being sent to CSVForm as arguments
2) If form.is_valid() returns true, the user is assigned to the uploaded_by field
3) csv_form is saved and currently returns a simple httpresponse inplace of prediction results
For GET request-------
The csv_form is rendered in the template
"""
if request.method == 'POST':
csv_form = CSVForm(request.POST, request.FILES, request=request)
if csv_form.is_valid():
file_uploaded = csv_form.save(commit=False)
file_uploaded.uploaded_by = request.user
csv_form.save()
return HttpResponse("<h1>Your csv file was uploaded, redirect user to prediction page (pie charts, tables..)</h1>")
else:
return render(request, './Venter/upload_file.html', {'csv_form': csv_form})
elif request.method == 'GET':
csv_form = CSVForm(request=request)
return render(request, './Venter/upload_file.html', {'csv_form': csv_form})
def handle_user_selected_data(request):
"""This function is used to handle the selected categories by the user"""
if not request.user.is_authenticated:
# Authentication security check
return redirect(settings.LOGIN_REDIRECT_URL)
else:
rows = request.session['Rows']
correct_category = []
company = request.session['company']
if request.method == 'POST':
file_name = request.session['filename']
user_name = request.user.username
for i in range(rows):
# We are getting a list of values because the select tag was multiple select
selected_category = request.POST.getlist(
'select_category' + str(i) + '[]')
if request.POST['other_category' + str(i)]:
# To get a better picture of what we are getting try to print "request.POST.['other_category' + str(i)]", request.POST['other_category' + str(i)
# others_list=request.POST['other_category' + str(i)]
# for element in others_list:
# print(element)
# tuple = (selected_category,element)
tuple = (selected_category,
request.POST['other_category' + str(i)])
# print(request.POST['other_category' + str(i)])
# print(tuple)
# So here the correct_category will be needing a touple so the data will be like:
# [(selected_category1, selected_category2), (other_category1, other_category2)] This will be the output of the multi select
correct_category.append(tuple)
else:
# So here the correct_category will be needing a touple so the data will be like:
# [(selected_category1, selected_category2)] This will be the output of the multi select
correct_category.append(selected_category)
csv = EditCsv(file_name, user_name, company)
csv.write_file(correct_category)
if request.POST['radio'] != "no":
# If the user want to send the file to Google Drive
path_folder = request.user.username + "/CSV/output/"
path_file = 'MEDIA/' + request.user.username + \
"/CSV/output/" + request.session['filename']
path_file_diff = 'MEDIA/' + request.user.username + "/CSV/output/Difference of " + request.session[
'filename']
upload_to_google_drive.upload_to_drive(path_folder,
'results of ' +
request.session['filename'],
"Difference of " +
request.session['filename'],
path_file,
path_file_diff)
return redirect("/download")
def file_download(request):
if not request.user.is_authenticated:
return redirect(settings.LOGIN_REDIRECT_URL)
else:
# Refer to the source: https://stackoverflow.com/questions/36392510/django-download-a-file/36394206
path = os.path.join(settings.MEDIA_ROOT, request.user.username,
"CSV", "output", request.session['filename'])
with open(path, 'rb') as csv:
response = HttpResponse(
csv.read()) # Try using HttpStream instead of this. This method will create problem with large numbers of rows like 25k+
response['Content-Type'] = 'application/force-download'
response['Content-Disposition'] = 'attachment;filename=results of ' + \
request.session['filename']
return response
def handle_uploaded_file(f, username, filename):
"""Just a precautionary step if signals.py doesn't work for any reason."""
data_directory_root = settings.MEDIA_ROOT
path = os.path.join(data_directory_root, username,
"CSV", "input", filename)
path_input = os.path.join(data_directory_root, username, "CSV", "input")
path_output = os.path.join(data_directory_root, username, "CSV", "output")
if not os.path.exists(path_input):
os.makedirs(path_input)
if not os.path.exists(path_output):
os.makedirs(path_output)
with open(path, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
def user_logout(request):
logout(request)
return redirect(settings.LOGIN_REDIRECT_URL)
class CategoryListView(LoginRequiredMixin, ListView):
"""
Arguments------
1) ListView: View to display the category list for the organisation to which the logged in user belongs
2) LoginRequiredMixin: Request to update profile details by non-authenticated users, will throw an HTTP 404 error
Functions------
1) get_queryset(): Returns a new QuerySet filtering categories based on the organisation name passed in the parameter.
"""
model = Category
def get_queryset(self):
return Category.objects.filter(organisation_name=self.request.user.profile.organisation_name)
class UpdateProfileView(LoginRequiredMixin, UpdateView):
"""
Arguments------
1) UpdateView: View to update the user profile details for the logged in user
2) LoginRequiredMixin: Request to update profile details by non-authenticated users, will throw an HTTP 404 error
"""
model = Profile
success_url = reverse_lazy('home')
def post(self, request, *args, **kwargs):
user_form = UserForm(request.POST, instance=request.user)
profile_form = ProfileForm(request.POST, request.FILES, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid(): # pylint: disable = R1705
user_form.save()
profile_form.save()
messages.success(request, 'Your profile was successfully updated!')
return HttpResponseRedirect(reverse_lazy('home'))
else:
messages.error(request, 'Please correct the error below.')
def get(self, request, *args, **kwargs):
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, './Venter/update_profile.html', {'user_form': user_form, 'profile_form': profile_form})
class CreateProfileView(CreateView):
"""
Arguments------
1) CreateView: View to create the user profile for a new user.
Note------
profile_form.save(commit=False) returns an instance of Profile that hasn't yet been saved to the database.
The profile.save() returns an instance of Profile that has been saved to the database.
This occurs only after the profile is created for a new user with the 'profile.user = user'
"""
model = Profile
def post(self, request, *args, **kwargs):
user_form = UserForm(request.POST)
profile_form = ProfileForm(request.POST, request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
return HttpResponseRedirect(reverse('home', args=[]))
else:
messages.warning(
request, 'Something went wrong in Venter, please try again')
return HttpResponse("<h1>NO Profile created</h1>")
def get(self, request, *args, **kwargs):
user_form = UserForm()
profile_form = ProfileForm()
return render(request, './Venter/registration.html', {'user_form': user_form, 'profile_form': profile_form})
| simranmadhok/Venter_CMS | Venter/views.py | views.py | py | 9,673 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "Venter.forms.CSVForm",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": ... |
30364500871 | import sys
import mock
import six
from okonomiyaki.errors import InvalidMetadataField
from ..python_implementation import PythonABI, PythonImplementation
from hypothesis import given
from hypothesis.strategies import sampled_from
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestPythonImplementation(unittest.TestCase):
@given(sampled_from((
('2', '7', 'cp27'), ('3', '8', 'cp38'),
('4', '15', 'cp415'), ('3', '11', 'cp311'))))
def test_creation(self, version):
# Given
kind = 'cpython'
major, minor, r_tag = version
# When
tag = PythonImplementation(kind, major, minor)
# Then
self.assertEqual(tag.abbreviated_implementation, 'cp')
self.assertEqual(str(tag), r_tag)
self.assertIsInstance(six.text_type(tag), six.text_type)
def test_from_running_python(self):
# When
with mock.patch(
"okonomiyaki.platforms.python_implementation."
"_abbreviated_implementation",
return_value="cp"):
with mock.patch("sys.version_info", (2, 7, 9, 'final', 0)):
py = PythonImplementation.from_running_python()
# Then
self.assertEqual(py.pep425_tag, u"cp27")
# When
with mock.patch("sys.pypy_version_info", "pypy 1.9", create=True):
with mock.patch("sys.version_info", (2, 7, 9, 'final', 0)):
py = PythonImplementation.from_running_python()
# Then
self.assertEqual(py.pep425_tag, u"pp27")
# When
with mock.patch("sys.platform", "java 1.7", create=True):
with mock.patch("sys.version_info", (2, 7, 9, 'final', 0)):
py = PythonImplementation.from_running_python()
# Then
self.assertEqual(py.pep425_tag, u"jy27")
# When
with mock.patch("sys.platform", "cli", create=True):
with mock.patch("sys.version_info", (2, 7, 9, 'final', 0)):
py = PythonImplementation.from_running_python()
# Then
self.assertEqual(py.pep425_tag, u"ip27")
@given(sampled_from((
("cpython", "cp"), ("python", "py"),
("pypy", "pp"), ("dummy", "dummy"))))
def test_abbreviations(self, kinds):
# Given
major = 2
minor = 7
kind, r_abbreviated = kinds
# When
tag = PythonImplementation(kind, major, minor)
# Then
self.assertEqual(tag.abbreviated_implementation, r_abbreviated)
@given(sampled_from((
(2, 7, 'cp27'), (3, 8, 'cp38'),
(3, 4, 'cpython34'), (4, 5, 'cp4_5'),
(24, 7, 'cp24_7'),
(4, 15, 'cp415'), (3, 11, 'cp311'))))
def test_from_string(self, data):
# Given
major, minor, tag_string = data
# When
tag = PythonImplementation.from_string(tag_string)
# Then
self.assertEqual(tag.kind, "cpython")
self.assertEqual(tag.major, major)
if minor is not None:
self.assertEqual(tag.minor, minor)
@given(sampled_from(('cp2', 'py3', 'cp', 'pp4567')))
def test_from_string_errors(self, data):
# When/Then
message = r"^Invalid value for metadata field 'python_tag': '{}'$"
with self.assertRaisesRegexp(
InvalidMetadataField, message.format(data)):
PythonImplementation.from_string(data)
class TestPythonABI(unittest.TestCase):
def test_pep425_tag_string_none(self):
# Given
abi_tag = None
# When
abi_tag_string = PythonABI.pep425_tag_string(abi_tag)
# Then
self.assertEqual(abi_tag_string, u"none")
self.assertIsInstance(abi_tag_string, six.text_type)
def test_pep425_tag_string(self):
# Given
abi_tag = PythonABI(u"cp27mu")
# When
abi_tag_string = PythonABI.pep425_tag_string(abi_tag)
# Then
self.assertEqual(abi_tag_string, u"cp27mu")
self.assertIsInstance(abi_tag_string, six.text_type)
| enthought/okonomiyaki | okonomiyaki/platforms/tests/test_python_implementation.py | test_python_implementation.py | py | 4,083 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "sys.version_info",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "python_implementation.PythonImplementation",
"line_number": 30,
"usage_type": "call"
},
... |
33548298905 | import sys
import argparse
import tensorflow as tf
from keras.models import Model, load_model
from keras.layers import TimeDistributed, Conv1D, Dense, Embedding, Input, Dropout, LSTM, Bidirectional, MaxPooling1D, \
Flatten, concatenate
from keras.initializers import RandomUniform
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
from keras.models import load_model
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform, randint
from util.util import *
def data():
train_df = get_train_data()
#found = train_df['ner']
#hasValue = [True if 'VALUE' in x else False for i, x in enumerate(found)]
#train_df = train_df[np.array(hasValue)]
print(train_df.shape)
train_data, case2Idx, caseEmbeddings, word2Idx, wordEmbeddings, \
char2Idx, label2Idx, sentences_maxlen, words_maxlen = prepare_data(train_df)
val_df = get_val_data()
print(val_df.shape)
val_data = embed_sentences(add_char_information_in(tag_data(val_df)), INFOBOX_CLASS, PROPERTY_NAME)
X_train, Y_train = split_data(train_data)
X_val, Y_val = split_data(val_data)
return X_train, Y_train, X_val, Y_val, caseEmbeddings, wordEmbeddings, label2Idx, char2Idx, sentences_maxlen, words_maxlen
def model(X_train, Y_train, X_val, Y_val, caseEmbeddings, wordEmbeddings, label2Idx, char2Idx, sentences_maxlen,
words_maxlen):
temp = []
for item in Y_train[0]:
flatten = [i for sublist in item for i in sublist]
for i in flatten:
temp.append(i)
temp = np.asarray(temp)
print('labels', np.unique(np.ravel(temp, order='C')))
# word-level input
words_input = Input(shape=(None,), dtype='int32', name='Words_input')
words = Embedding(input_dim=wordEmbeddings.shape[0], output_dim=wordEmbeddings.shape[1],
weights=[wordEmbeddings], trainable=False)(words_input)
# case-info input
casing_input = Input(shape=(None,), dtype='int32', name='Casing_input')
casing = Embedding(input_dim=caseEmbeddings.shape[0], output_dim=caseEmbeddings.shape[1],
weights=[caseEmbeddings], trainable=False)(casing_input)
# character input
character_input = Input(shape=(None, words_maxlen,), name="Character_input")
embed_char_out = TimeDistributed(
Embedding(input_dim=len(char2Idx), output_dim=50,
embeddings_initializer=RandomUniform(minval=-0.5, maxval=0.5)),
name="Character_embedding")(character_input)
dropout = Dropout(0.5)(embed_char_out)
# CNN
conv1d_out = TimeDistributed(
Conv1D(kernel_size={{choice([3, 5])}}, filters=10,
padding='same', activation='tanh', strides=1),
name="Convolution")(dropout)
maxpool_out = TimeDistributed(MaxPooling1D({{choice([10, 25, 50])}}), name="Maxpool")(conv1d_out)
char = TimeDistributed(Flatten(), name="Flatten")(maxpool_out)
char = Dropout(0.5)(char)
# concat & BLSTM
output = concatenate([words, casing, char])
output = Bidirectional(LSTM({{choice([100, 200, 300])}},
return_sequences=True,
dropout=0.5, # on input to each LSTM block
recurrent_dropout=0.25 # on recurrent input signal
), name="BLSTM")(output)
output = TimeDistributed(Dense(len(label2Idx), activation='softmax'), name="Softmax_layer")(output)
# set up model
model = Model(inputs=[words_input, casing_input, character_input], outputs=[output])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='nadam', metrics=['accuracy'])
model.summary()
train_batch, train_batch_len = createBatches2CNN_BLSTM(X_train, Y_train)
val_batch, val_batch_len = createBatches2CNN_BLSTM(X_val, Y_val)
model.fit_generator(iterate_minibatches_CNN_BLSTM(train_batch, train_batch_len),
steps_per_epoch=len(train_batch),
# class_weight=class_weight_vect,
epochs=10, verbose=2, validation_steps=len(val_batch),
validation_data=iterate_minibatches_CNN_BLSTM(val_batch, val_batch_len))
# score, acc = model.evaluate(X_val, Y_val, verbose=0)
score, acc = model.evaluate_generator(generator=iterate_minibatches_CNN_BLSTM(val_batch, val_batch_len), steps=len(val_batch),
verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == "__main__":
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials(),
functions=[createBatches2CNN_BLSTM, iterate_minibatches_CNN_BLSTM])
plot_model(best_model, to_file='models/' + PROPERTY_NAME + '-CNN_BLSTM_best_model.png', show_shapes=True, show_layer_names=True)
best_model.save('models/'+INFOBOX_CLASS+'/dl/' + PROPERTY_NAME + '-CNN_BLSTM_best_model.h5')
print("Best performing model chosen hyper-parameters:")
print(best_run)
model = load_model('models/'+INFOBOX_CLASS+'/dl/' + PROPERTY_NAME + '-CNN_BLSTM_best_model.h5')
test_df = get_test_data()
print(test_df.shape)
test_data = embed_sentences(add_char_information_in(tag_data(test_df)), INFOBOX_CLASS, PROPERTY_NAME)
X_test, Y_test = split_data(test_data)
temp = []
for item in Y_test[0]:
flatten = [i for sublist in item for i in sublist]
for i in flatten:
temp.append(i)
temp = np.asarray(temp)
print('labels', np.unique(np.ravel(temp, order='C')))
test_batch, test_batch_len = createBatches2CNN_BLSTM(X_test, Y_test)
print("Evalutation of best performing model:")
score, acc = model.evaluate_generator(generator=iterate_minibatches_CNN_BLSTM(test_batch, test_batch_len),
steps=len(test_batch), verbose=0)
print("acc on test: ", acc)
| guardiaum/DeepEx | CNN_BLSTM_fit_hyperparams.py | CNN_BLSTM_fit_hyperparams.py | py | 6,299 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "keras.layers.Input",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "keras.layers.Embedding",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "keras.la... |
655237097 | import os
from glob import glob
import numpy as np
import pandas as pd
try:
import imageio.v2 as imageio
except ImportError:
import imageio
from tqdm import tqdm
from xarray import DataArray
from elf.evaluation import dice_score
def run_prediction(input_folder, output_folder):
import bioimageio.core
os.makedirs(output_folder, exist_ok=True)
inputs = glob(os.path.join(input_folder, "*.tif"))
model = bioimageio.core.load_resource_description("10.5281/zenodo.5869899")
with bioimageio.core.create_prediction_pipeline(model) as pp:
for inp in tqdm(inputs):
fname = os.path.basename(inp)
out_path = os.path.join(output_folder, fname)
image = imageio.v2.imread(inp)
input_ = DataArray(image[None, None], dims=tuple("bcyx"))
pred = bioimageio.core.predict_with_padding(pp, input_)[0].values.squeeze()
imageio.volwrite(out_path, pred)
def evaluate(label_folder, output_folder):
cell_types = ["A172", "BT474", "BV2", "Huh7",
"MCF7", "SHSY5Y", "SkBr3", "SKOV3"]
grid = pd.DataFrame(columns=["Cell_types"] + cell_types)
row = ["all"]
for i in cell_types:
label_files = glob(os.path.join(label_folder, i, "*.tif"))
this_scores = []
for label_file in label_files:
fname = os.path.basename(label_file)
pred_file = os.path.join(output_folder, fname)
label = imageio.imread(label_file)
pred = imageio.volread(pred_file)[0]
score = dice_score(pred, label != 0, threshold_gt=None, threshold_seg=None)
this_scores.append(score)
row.append(np.mean(this_scores))
grid.loc[len(grid)] = row
print("Cell type results:")
print(grid)
def main():
# input_folder = "/home/pape/Work/data/incu_cyte/livecell/images/livecell_test_images"
output_folder = "./predictions"
# run_prediction(input_folder, output_folder)
label_folder = "/home/pape/Work/data/incu_cyte/livecell/annotations/livecell_test_images"
evaluate(label_folder, output_folder)
if __name__ == "__main__":
main()
| constantinpape/torch-em | experiments/unet-segmentation/livecell/check_cell_type_performance.py | check_cell_type_performance.py | py | 2,148 | python | en | code | 42 | github-code | 6 | [
{
"api_name": "os.makedirs",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
... |
16540523617 | import os
import sys
from nuitka.utils.FileOperations import (
areSamePaths,
isFilenameBelowPath,
isFilenameSameAsOrBelowPath,
)
from nuitka.utils.Utils import (
isAndroidBasedLinux,
isFedoraBasedLinux,
isLinux,
isMacOS,
isPosixWindows,
isWin32Windows,
withNoDeprecationWarning,
)
from .PythonVersions import (
getInstalledPythonRegistryPaths,
getRunningPythonDLLPath,
getSystemPrefixPath,
isStaticallyLinkedPython,
python_version,
python_version_str,
)
def isNuitkaPython():
"""Is this our own fork of CPython named Nuitka-Python."""
# spell-checker: ignore nuitkapython
if python_version >= 0x300:
return sys.implementation.name == "nuitkapython"
else:
return sys.subversion[0] == "nuitkapython"
_is_anaconda = None
def isAnacondaPython():
"""Detect if Python variant Anaconda"""
# singleton, pylint: disable=global-statement
global _is_anaconda
if _is_anaconda is None:
_is_anaconda = os.path.exists(os.path.join(sys.prefix, "conda-meta"))
return _is_anaconda
def isApplePython():
if not isMacOS():
return False
# Python2 on 10.15 or higher
if "+internal-os" in sys.version:
return True
# Older macOS had that
if isFilenameSameAsOrBelowPath(path="/usr/bin/", filename=getSystemPrefixPath()):
return True
# Newer macOS has that
if isFilenameSameAsOrBelowPath(
path="/Library/Developer/CommandLineTools/", filename=getSystemPrefixPath()
):
return True
# Xcode has that on macOS, we consider it an Apple Python for now, it might
# be more usable than Apple Python, we but we delay that.
if isFilenameSameAsOrBelowPath(
path="/Applications/Xcode.app/Contents/Developer/",
filename=getSystemPrefixPath(),
):
return True
return False
def isHomebrewPython():
# spell-checker: ignore sitecustomize
if not isMacOS():
return False
candidate = os.path.join(
getSystemPrefixPath(), "lib", "python" + python_version_str, "sitecustomize.py"
)
if os.path.exists(candidate):
with open(candidate, "rb") as site_file:
line = site_file.readline()
if b"Homebrew" in line:
return True
return False
def isPyenvPython():
if isWin32Windows():
return False
return os.environ.get("PYENV_ROOT") and isFilenameSameAsOrBelowPath(
path=os.environ["PYENV_ROOT"], filename=getSystemPrefixPath()
)
def isMSYS2MingwPython():
"""MSYS2 the MinGW64 variant that is more Win32 compatible."""
if not isWin32Windows() or "GCC" not in sys.version:
return False
import sysconfig
if python_version >= 0x3B0:
return "-mingw_" in sysconfig.get_config_var("EXT_SUFFIX")
else:
return "-mingw_" in sysconfig.get_config_var("SO")
def isTermuxPython():
"""Is this Termux Android Python."""
# spell-checker: ignore termux
if not isAndroidBasedLinux():
return False
return "com.termux" in getSystemPrefixPath().split("/")
def isUninstalledPython():
# Debian package.
if isDebianPackagePython():
return False
if isStaticallyLinkedPython():
return False
if os.name == "nt":
import ctypes.wintypes
GetSystemDirectory = ctypes.windll.kernel32.GetSystemDirectoryW
GetSystemDirectory.argtypes = (ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD)
GetSystemDirectory.restype = ctypes.wintypes.DWORD
MAX_PATH = 4096
buf = ctypes.create_unicode_buffer(MAX_PATH)
res = GetSystemDirectory(buf, MAX_PATH)
assert res != 0
system_path = os.path.normcase(buf.value)
return not getRunningPythonDLLPath().startswith(system_path)
return isAnacondaPython() or "WinPython" in sys.version
_is_win_python = None
def isWinPython():
"""Is this Python from WinPython."""
if "WinPython" in sys.version:
return True
# singleton, pylint: disable=global-statement
global _is_win_python
if _is_win_python is None:
for element in sys.path:
if os.path.basename(element) == "site-packages":
if os.path.exists(os.path.join(element, "winpython")):
_is_win_python = True
break
else:
_is_win_python = False
return _is_win_python
def isDebianPackagePython():
"""Is this Python from a debian package."""
# spell-checker: ignore multiarch
if not isLinux():
return False
if python_version < 0x300:
return hasattr(sys, "_multiarch")
else:
with withNoDeprecationWarning():
try:
from distutils.dir_util import _multiarch
except ImportError:
return False
else:
return True
def isFedoraPackagePython():
"""Is the Python from a Fedora package."""
if not isFedoraBasedLinux():
return False
system_prefix_path = getSystemPrefixPath()
return system_prefix_path == "/usr"
def isCPythonOfficialPackage():
"""Official CPython download, kind of hard to detect since self-compiled doesn't change much."""
sys_prefix = getSystemPrefixPath()
# For macOS however, it's very knowable.
if isMacOS() and isFilenameBelowPath(
path="/Library/Frameworks/Python.framework/Versions/", filename=sys_prefix
):
return True
# For Windows, we check registry.
if isWin32Windows():
for registry_python_exe in getInstalledPythonRegistryPaths(python_version_str):
if areSamePaths(sys_prefix, os.path.dirname(registry_python_exe)):
return True
return False
def isGithubActionsPython():
return os.environ.get(
"GITHUB_ACTIONS", ""
) == "true" and getSystemPrefixPath().startswith("/opt/hostedtoolcache/Python")
def getPythonFlavorName():
"""For output to the user only."""
# return driven, pylint: disable=too-many-branches,too-many-return-statements
if isNuitkaPython():
return "Nuitka Python"
elif isAnacondaPython():
return "Anaconda Python"
elif isWinPython():
return "WinPython"
elif isDebianPackagePython():
return "Debian Python"
elif isFedoraPackagePython():
return "Fedora Python"
elif isHomebrewPython():
return "Homebrew Python"
elif isApplePython():
return "Apple Python"
elif isPyenvPython():
return "pyenv"
elif isPosixWindows():
return "MSYS2 Posix"
elif isMSYS2MingwPython():
return "MSYS2 MinGW"
elif isTermuxPython():
return "Android Termux"
elif isCPythonOfficialPackage():
return "CPython Official"
elif isGithubActionsPython():
return "GitHub Actions Python"
else:
return "Unknown"
| Nuitka/Nuitka | nuitka/PythonFlavors.py | PythonFlavors.py | py | 6,933 | python | en | code | 10,019 | github-code | 6 | [
{
"api_name": "PythonVersions.python_version",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "sys.implementation",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "sys.subversion",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_nam... |
11838557066 | import pickle
import os
import sys
import numpy as np
import json_tricks as json
import cv2
import torch
import copy
import random
import torchvision.transforms as transforms
from glob import glob
from tqdm import tqdm
from PIL import Image
from natsort import natsorted
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from torch.utils.data import Dataset
from utils.dataset_helper import get_affine_transform,affine_transform
from core.utils.helper_functions import load_matfile
class InfantDataSet(Dataset):
def __init__(self, root,pose_root,subjects,path=None, transform=transforms.ToTensor(),is_train=True):
"""
root: the root of data, eg: '/vol/biodata/data/human3.6m/training'
pose_root: the root of 2d pose;
subjects: subjects=['EMT4', 'EMT7', 'EMT38', 'EMT36', 'EMT31', 'EMT43',
'EMT5', 'EMT9', 'EMT47', 'EMT45', 'EMT29', 'EMT42',
'EMT23', 'EMT41', 'EMT37', 'EMT48', 'EMT44', 'EMT46',
'EMT20', 'EMT34', 'EMT11', 'EMT30', 'EMT39', 'EMT35',
'EMT14']
transforms: torchvision.transforms to resize and crop frame to (256,256)
"""
self.root = root
self.pose_root=pose_root
self.indices = np.array([3, 2, 9, 8, 4, 1, 10, 7, 13, 12, 15, 14, 5, 0, 11, 6])
self.subjects=subjects
# load dataset
self.sequences = []
self.transform=transform
self.is_train=is_train
self.dataset=None
if path != None:
self.load_seq(path)
else:
for subject in tqdm(self.subjects): # actor is in the form of 'S1','S5'
if not os.path.isdir(os.path.join(root, subject)):
print(subject,"not exists")
continue
frames = natsorted(os.listdir(os.path.join(root, subject)))
frame_nums = [int(x[6:-4]) for x in frames]
frame_nums=np.array(sorted(frame_nums))-1
pred_path=os.path.join(pose_root,subject+'.pickle')
if not os.path.exists(pred_path):
print(pred_path,"not exists")
continue
with open(pred_path, "rb") as file:
pickle_file = pickle.load(file)
for frame in frames:
frame_num=self.find_index(frame,pickle_file)
frame2=random.sample(frames, 1)[0]
seq={'subject': subject,
'bbox':pickle_file[frame_num]['bounding_box'],
'center': pickle_file[frame_num]['center'],
'scale': pickle_file[frame_num]['scale'],
'frame_num':frame_num,
'frame1':pickle_file[frame_num]['frame_id'],
'frame2': frame2,
'pose_2d':pickle_file[frame_num]['predicted_keypoints']
}
self.sequences.append(seq)
def get_single(self, sequence):
bbox=sequence['bbox']
center=sequence['center']
scale=np.asarray([sequence['scale'],sequence['scale']])
image_size=(256,256)
frame1_path=os.path.join(self.root,sequence['subject'],sequence['frame1'])
frame2_path=os.path.join(self.root,sequence['subject'],sequence['frame2'])
pose_2d=sequence['pose_2d'][self.indices]
frame1=Image.open(frame1_path)
frame2=Image.open(frame2_path)
trans = get_affine_transform(center, scale, 0, image_size)
frame1=cv2.warpAffine(np.array(frame1), trans, (int(image_size[0]), int(image_size[1])), flags=cv2.INTER_LINEAR)
frame2=cv2.warpAffine(np.array(frame2), trans, (int(image_size[0]), int(image_size[1])), flags=cv2.INTER_LINEAR)
frame1_tensor=self.transform(frame1)
frame2_tensor=self.transform(frame2)
pose2d_tensor=torch.FloatTensor(pose_2d)
return frame1_tensor,frame2_tensor,pose2d_tensor
def find_index(self,s,file):
j=None
for i in range(len(file)):
if s == file[i]['frame_id']:
j=i
return j
def get_sequences(self):
return self.sequences
def save_seq(self,path):
if not os.path.exists(path):
os.makedirs(path)
seq=self.__dict__['sequences']
torch.save(seq, os.path.join(path,'sequences.tar'))
print("saved successfully!")
def load_seq(self,path):
seq_load=torch.load(path)
self.sequences=seq_load
print("load successfully!")
def __getitem__(self, index):
seq=self.sequences[index]
return self.get_single(seq)
def __len__(self):
return len(self.sequences)
class Infant3DPose(Dataset):
def __init__(self, path,load_path=None):
self.path = path
self.indices = np.array([15, 14, 10, 6, 3, 0, 11, 7, 4, 1, 12, 8, 5, 2, 13, 9])
self.keypoints = ['hips', 'shoulders', 'knees', 'elbows', 'ankles', 'wrists', 'feet', 'hands']
self.sequences=[]
if load_path != None:
self.load_seq(load_path)
else:
self.sequences=self.get_sequences()
def save_seq(self,path):
if not os.path.exists(path):
os.makedirs(path)
seq=self.__dict__['sequences']
torch.save(seq, os.path.join(path,'pose3d_sequences.tar'))
print("saved successfully!")
def load_seq(self,path):
seq_load=torch.load(path)
self.sequences=seq_load
print("load successfully!")
def get_sequences(self):
spatial_indices = np.array([1, 2, 0])
sequences = []
files = glob(os.path.join(self.path, '*.mat'))
sequences=[]
for file in tqdm(files):
# subj = os.path.splitext(os.path.split(file)[-1])[0]
data, timestamps = load_matfile(file)
# only xyz
data = data[:, 0:3]
data = data.transpose((0, 2, 1))
# tracker data is 120 Hz, camera is 30 Hz, so factor is 4
tracker_data = data
for i in range(len(tracker_data)):
pose = tracker_data[i]
# change keypoint order and spatial orientation
pose = pose[self.indices]
pose = pose[..., spatial_indices]
pose = self.align(pose)
first = pose
pelvis = first[0] + (first[1] - first[0]) / 2
pose -= pelvis
pose = self.normalize(pose)
pose1 = np.copy(pose)
pose1[..., 1] = pose[..., 2]
pose1[..., 2] = pose[..., 1]
sequences.append(pose1)
return sequences
def normalize(self, points):
first = points
# normalize to unit cube -1 to 1
max_ = np.abs(first.max())
min_ = np.abs(first.min())
if max_ >= min_:
points /= max_
else:
points /= min_
return points
def get_angle(self, vec1, vec2):
inv = vec1 @ vec2 / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
return np.arccos(inv)
def align(self, sequence):
"""remove y component of hip line,
align pelvis-neck line with z axis"""
points = sequence
hip_line = points[0] - points[1]
pelvis = points[0] + (points[1] - points[0]) / 2
neck = points[2] + (points[3] - points[2]) / 2
pelvis_neck_line = neck - pelvis
# pelvis neck
rot_axis1 = np.array([0, 0, 1])
angle = self.get_angle(pelvis_neck_line, rot_axis1)
cross_prod = np.cross(pelvis_neck_line, rot_axis1)
cross_prod /= np.linalg.norm(cross_prod)
R1 = R.from_rotvec(angle * cross_prod)
points = R1.apply(points)
# hip
hip_line = points[0] - points[1]
rot_axis2 = np.array([1, 0])
angle = self.get_angle(hip_line[0:2], rot_axis2)
cross_prod = np.cross(hip_line[0:2], rot_axis2)
cross_prod /= np.linalg.norm(cross_prod)
R2 = R.from_rotvec(angle * np.array([0, 0, cross_prod]))
points=R2.apply(points)
rot = R.from_rotvec(np.array([0., 0., np.pi]))
points=rot.apply(points)
return points
def __getitem__(self, idx):
pose = np.array(self.sequences[idx])
# p1 = np.copy(pose)
# p1[..., 1] = pose[..., 2]
# p1[..., 2] = pose[..., 1]
pose_tensor=torch.FloatTensor(pose)
return pose_tensor
def __len__(self):
return len(self.sequences)
| Qingwei-Li98/PoseEstimation | core/utils/infant_dataset.py | infant_dataset.py | py | 8,713 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 22,
"usage_type": "name"
},
{
"a... |
13879303932 | #!/usr/local/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2020-06-20 16:15
# @Author : 小凌
# @Email : 296054210@qq.com
# @File : test_06_audit.py
# @Software: PyCharm
import json
import unittest
import ddt
from common.excel_handler import ExcelHandler
from common.http_handler import visit
from middlerware.handler import Handler
logger = Handler.logger
excel = Handler.excel
yaml = Handler.yaml
sheet_name = yaml['excel']['auditsheet']
test_data = excel.get_data(sheet_name)
@ddt.ddt
class TestAudit(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
logger.info('------------------------------TestAuditBegin------------------------------')
cls.admin_token = Handler().admin_token
cls.token = Handler().token
def setUp(self) -> None:
# 封装使用测试号生成的项目,并提取loan_id
self.loan_id = Handler().loan_id
self.db = Handler.database_cls()
self.excel = ExcelHandler(Handler.excel_path)
@ddt.data(*test_data)
def test01_audit(self, case_data):
"""审核项目接口测试"""
global case_result
logger.info('**********正在获取第%d条<%s>用例**********' % (case_data['case_id'], case_data['title']))
headers = case_data['headers']
# 增加一个用户登陆登陆进行审核的失败操作用例
if ("#admin_token#" in headers) or ("#token#" in headers):
headers = headers.replace("#admin_token#", self.admin_token)
headers = headers.replace("#token#", self.token)
data = case_data['data']
if "#loan_id#" in data:
data = data.replace("#loan_id#", str(self.loan_id))
# 取一个不存在的项目id
if "#fail_loan_id#" in data:
data = data.replace("#fail_loan_id#", str(self.loan_id + 1000))
# 判断是否为已审批的用例,若为已审批的账号则从数据库提取一条status != 1的数据
if "#approve_loan_id#" in data:
self.loan_id = self.db.query("select * from futureloan.loan where `status` !=2 limit 1;")['id']
data = data.replace("#approve_loan_id#", str(self.loan_id))
response = visit(
url=yaml['host'] + case_data['url'],
method=case_data['method'],
json=json.loads(data),
headers=json.loads(headers)
)
expected = json.loads(case_data["expected"])
try:
self.assertEqual(response.json()["code"], expected['code'])
if response.json()['code'] == 0:
sql = "select `status` from futureloan.loan where id = {};".format(self.loan_id)
after_state = self.db.query(sql)['status']
self.assertEqual(expected['status'], after_state)
logger.info('**********第%d条<%s>用例测试结束**********' % (case_data['case_id'], case_data['title']))
case_result = "pass"
except AssertionError as error:
logger.error("第{}用例出现异常,异常为{}".format(case_data['case_id'], error))
case_result = "fail"
raise error
finally:
# 最后执行用例回写操作
row = case_data['case_id'] + 1
self.excel.excel_write(name=sheet_name, row=row, column=len(case_data), value=case_result)
self.excel.excel_write(name=sheet_name, row=row, column=len(case_data) - 1, value=str(response.json()))
logger.info("Write the response and result: %s " % case_result)
def tearDown(self) -> None:
self.db.close()
@classmethod
def tearDownClass(cls) -> None:
logger.info('------------------------------TestAuditOver------------------------------')
if __name__ == '__main__':
unittest.main()
| galaxyling/api-framework | testcases/test_06_audit.py | test_06_audit.py | py | 3,807 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "middlerware.handler.Handler.logger",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "middlerware.handler.Handler",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "middlerware.handler.Handler.excel",
"line_number": 17,
"usage_type": ... |
16543939917 | import functools
def makeTable(grid):
"""Create a REST table."""
def makeSeparator(num_cols, col_width, header_flag):
if header_flag == 1:
return num_cols * ("+" + (col_width) * "=") + "+\n"
else:
return num_cols * ("+" + (col_width) * "-") + "+\n"
def normalizeCell(string, length):
return string + ((length - len(string)) * " ")
cell_width = 2 + max(
functools.reduce(
lambda x, y: x + y, [[len(item) for item in row] for row in grid], []
)
)
num_cols = len(grid[0])
rst = makeSeparator(num_cols, cell_width, 0)
header_flag = 1
for row in grid:
rst = (
rst
+ "| "
+ "| ".join([normalizeCell(x, cell_width - 1) for x in row])
+ "|\n"
)
rst = rst + makeSeparator(num_cols, cell_width, header_flag)
header_flag = 0
return rst
| Nuitka/Nuitka | nuitka/utils/Rest.py | Rest.py | py | 927 | python | en | code | 10,019 | github-code | 6 | [
{
"api_name": "functools.reduce",
"line_number": 17,
"usage_type": "call"
}
] |
39972780174 | #!/usr/bin/env python3
# ------------------------------------------------------------------------
# MIDI Control for SignalFlow
# ------------------------------------------------------------------------
from signalflow import *
import configparser
import logging
import mido
import os
logger = logging.getLogger(__name__)
class MIDIManager:
shared_manager = None
def __init__(self, device_name: str = None):
if device_name is None:
if os.getenv("SIGNALFLOW_MIDI_OUTPUT_DEVICE_NAME") is not None:
device_name = os.getenv("SIGNALFLOW_MIDI_OUTPUT_DEVICE_NAME")
else:
config_path = os.path.expanduser("~/.signalflow/config")
parser = configparser.ConfigParser()
parser.read(config_path)
try:
# --------------------------------------------------------------------------------
# configparser includes quote marks in its values, so strip these out.
# --------------------------------------------------------------------------------
device_name = parser.get(section="midi", option="input_device_name")
device_name = device_name.strip('"')
except configparser.NoOptionError:
pass
except configparser.NoSectionError:
pass
self.input = mido.open_input(device_name)
self.input.callback = self.handle_message
self.voice_class = None
self.voice_class_kwargs = None
self.notes = [None] * 128
self.note_handlers = [[] for _ in range(128)]
self.control_handlers = [[] for _ in range(128)]
self.control_values = [0] * 128
self.channel_handlers = [[] for _ in range(16)]
if MIDIManager.shared_manager is None:
MIDIManager.shared_manager = self
logger.info("Opened MIDI input device: %s" % self.input.name)
def handle_message(self, message):
if message.type == 'control_change':
logger.debug("Received MIDI control change: control %d, value %d" % (message.control, message.value))
self.on_control_change(message.control, message.value)
elif message.type == 'note_on':
logger.debug("Received MIDI note on: note %d, velocity %d" % (message.note, message.velocity))
if self.voice_class:
voice = self.voice_class(frequency=midi_note_to_frequency(message.note),
amplitude=message.velocity / 127,
**self.voice_class_kwargs)
voice.play()
voice.auto_free = True
self.notes[message.note] = voice
if self.note_handlers[message.note]:
self.note_handlers[message.note]()
elif message.type == 'note_off':
logger.debug("Received MIDI note off: note %d" % (message.note))
if self.notes[message.note]:
self.notes[message.note].set_input("gate", 0)
try:
channel = message.channel
for handler in self.channel_handlers[channel]:
handler.handle_message(message)
except AttributeError:
pass
@classmethod
def get_shared_manager(cls):
if MIDIManager.shared_manager is None:
MIDIManager.shared_manager = MIDIManager()
return MIDIManager.shared_manager
def set_voice_patch(self, cls, **kwargs):
self.voice_class = cls
self.voice_class_kwargs = kwargs
def add_note_handler(self, note, handler):
self.note_handlers[note] = handler
def add_control_handler(self, control, handler):
self.control_handlers[control].append(handler)
def on_control_change(self, control, value):
self.control_values[control] = value
for handler in self.control_handlers[control]:
handler.on_change(value)
def get_control_value(self, control):
return self.control_values[control]
def add_channel_handler(self, channel, handler):
self.channel_handlers[channel].append(handler)
def remove_channel_handler(self, channel, handler):
self.channel_handlers[channel].remove(handler)
class MIDIControl(Patch):
def __init__(self, control, range_min, range_max, initial=None, mode="absolute", manager=None, curve="linear"):
super().__init__()
assert mode in ["absolute", "relative"]
if manager is None:
manager = MIDIManager.get_shared_manager()
self.value = self.add_input("value")
self.value_smoothed = Smooth(self.value, 0.999)
self.set_output(self.value_smoothed)
self.control = control
self.range_min = range_min
self.range_max = range_max
self.curve = curve
if initial is not None:
if self.curve == "exponential":
self._value_norm = scale_exp_lin(initial, range_min, range_max, 0, 1)
elif self.curve == "linear":
self._value_norm = scale_lin_lin(initial, range_min, range_max, 0, 1)
else:
self._value_norm = 0.5
self.update()
self.mode = mode
manager.add_control_handler(control, self)
def on_change(self, value):
if self.mode == "absolute":
self._value_norm = value / 127.0
elif self.mode == "relative":
if value > 64:
value = value - 128
change = value / 127.0
self._value_norm += change
if self._value_norm < 0:
self._value_norm = 0
if self._value_norm > 1:
self._value_norm = 1
self.update()
def update(self):
if self.curve == "exponential":
value_scaled = scale_lin_exp(self._value_norm, 0, 1, self.range_min, self.range_max)
elif self.curve == "linear":
value_scaled = scale_lin_lin(self._value_norm, 0, 1, self.range_min, self.range_max)
self.set_input("value", value_scaled)
| ideoforms/signalflow | auxiliary/libs/signalflow_midi/signalflow_midi.py | signalflow_midi.py | py | 6,131 | python | en | code | 138 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_... |
19621184365 | #import libraries
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
"""
This tutorial from the AI at UCLA's technical blog post:
https://uclaacmai.github.io/Linear-Regression
"""
testlines = []
testans = []
trainlines = []
trainans = []
for line in open("boston2.txt", 'r').readlines()[0:100]:
tl = line.strip('\n').strip(' ').split(' ')
testlines.append(map(lambda x:float(x.strip(' ')),tl[0:13]))
testans.append([float(tl[13].strip(' '))])
for line in open("boston2.txt", 'r').readlines()[100:]:
tl = line.strip('\n').strip(' ').split(' ')
trainlines.append(map(lambda x:float(x.strip(' ')),tl[0:13]))
trainans.append([float(tl[13].strip(' '))])
X_train = np.array(trainlines, dtype=np.float32)
X_test = np.array(testlines, dtype=np.float32)
Y_train = np.array(trainans, dtype=np.float32)
Y_test = np.array(testans, dtype=np.float32)
print(",".join([str(t.shape) for t in (X_train, X_test, Y_train, Y_test)]))
prices = Y_train.tolist()
student_teacher_ratios = [X_train[i][10] for i in range(X_train.shape[0])]
plt.scatter(student_teacher_ratios,prices)
plt.show()
X = tf.placeholder(tf.float32,shape=[None,13])
Y = tf.placeholder(tf.float32, shape = [None,1])
W = tf.Variable(tf.constant(0.1,shape=[13,1]))
b = tf.Variable(tf.constant(0.1))
y_pred = tf.matmul(X,W) + b
loss = tf.reduce_mean(tf.square(y_pred - Y))
opt = tf.train.GradientDescentOptimizer(learning_rate = 0.5).minimize(loss)
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
sess.run(init)
initial_loss = loss.eval(feed_dict = {X:X_train, Y:Y_train})
print("initial loss: {}".format(initial_loss))
for i in range(100):
#Run the optimization step with training data
sess.run(opt, feed_dict = {X:X_train, Y:Y_train})
print("epoch "+str(i)+"loss:{}".format(loss.eval(feed_dict = {X:X_train, Y:Y_train})))
final_test_loss = loss.eval(feed_dict = {X:X_test,Y:Y_test})
print("final loss (test): {}".format(final_test_loss))
| canders1/COMSC343 | _site/pdf/regression_class.py | regression_class.py | py | 1,961 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_... |
10380773430 | import torch.nn as nn
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
@BACKBONES.register_module()
class RMNet(BaseBackbone):
def __init__(self, depth,frozen_stages=-1):
super(RMNet, self).__init__()
self.frozen_stages = frozen_stages
self.conv1 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inplanes = 64
stages = [2,2,2,2] if depth == 18 else [3,4,6,3]
self.layer1 = self._make_layer(64, stages[0], 1)
self.layer2 = self._make_layer(128, stages[1], 2)
self.layer3 = self._make_layer(256, stages[2], 2)
self.layer4 = self._make_layer(512, stages[3], 2)
self._freeze_stages()
def _make_layer(self, planes, blocks, stride=1):
layers = []
layers.append(
nn.Sequential(
nn.Conv2d(self.inplanes, self.inplanes+planes, kernel_size=3,stride=stride, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.inplanes+planes, planes, kernel_size=3,stride=1, padding=1),
nn.ReLU(inplace=True)
)
)
self.inplanes = planes
for _ in range(1, blocks):
layers.append(
nn.Sequential(
nn.Conv2d(self.inplanes, self.inplanes+planes, kernel_size=3,stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.inplanes+planes, planes, kernel_size=3,stride=1, padding=1),
nn.ReLU(inplace=True)
)
)
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
x = self.layer1(x)
outs.append(x)
x = self.layer2(x)
outs.append(x)
x = self.layer3(x)
outs.append(x)
x = self.layer4(x)
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(RMNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
| fxmeng/mmclassification | mmcls/models/backbones/rmnet.py | rmnet.py | py | 2,851 | python | en | code | null | github-code | 6 | [
{
"api_name": "base_backbone.BaseBackbone",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d... |
3477657730 | import logging
import posixpath
from collections import defaultdict
from typing import TYPE_CHECKING, Callable, Dict, Generator, Optional, Tuple
from ...stash import ExpStashEntry
from ..base import BaseExecutor
from ..ssh import SSHExecutor, _sshfs
from .base import BaseExecutorManager
if TYPE_CHECKING:
from scmrepo.git import Git
from dvc.repo import Repo
logger = logging.getLogger(__name__)
class SSHExecutorManager(BaseExecutorManager):
EXECUTOR_CLS = SSHExecutor
def __init__(
self,
scm: "Git",
wdir: str,
host: Optional[str] = None,
port: Optional[int] = None,
username: Optional[str] = None,
fs_factory: Optional[Callable] = None,
**kwargs,
):
assert host
super().__init__(scm, wdir, **kwargs)
self.host = host
self.port = port
self.username = username
self._fs_factory = fs_factory
def _load_infos(self) -> Generator[Tuple[str, "BaseExecutor"], None, None]:
# TODO: load existing infos using sshfs
yield from []
@classmethod
def from_stash_entries(
cls,
scm: "Git",
wdir: str,
repo: "Repo",
to_run: Dict[str, ExpStashEntry],
**kwargs,
):
machine_name: Optional[str] = kwargs.get("machine_name", None)
manager = cls(
scm, wdir, **repo.machine.get_executor_kwargs(machine_name)
)
manager._enqueue_stash_entries(scm, repo, to_run, **kwargs)
return manager
def sshfs(self):
return _sshfs(self._fs_factory, host=self.host, port=self.port)
def get_infofile_path(self, name: str) -> str:
return f"{name}{BaseExecutor.INFOFILE_EXT}"
def _exec_attached(self, repo: "Repo", jobs: Optional[int] = 1):
from dvc.exceptions import DvcException
from dvc.stage.monitor import CheckpointKilledError
assert len(self._queue) == 1
result: Dict[str, Dict[str, str]] = defaultdict(dict)
rev, executor = self._queue.popleft()
info = executor.info
infofile = posixpath.join(
info.root_dir,
info.dvc_dir,
"tmp",
self.get_infofile_path(rev),
)
try:
exec_result = executor.reproduce(
info=executor.info,
rev=rev,
infofile=infofile,
log_level=logger.getEffectiveLevel(),
fs_factory=self._fs_factory,
)
if not exec_result.exp_hash:
raise DvcException(
f"Failed to reproduce experiment '{rev[:7]}'"
)
if exec_result.ref_info:
result[rev].update(
self._collect_executor(repo, executor, exec_result)
)
except CheckpointKilledError:
# Checkpoint errors have already been logged
return {}
except DvcException:
raise
except Exception as exc:
raise DvcException(
f"Failed to reproduce experiment '{rev[:7]}'"
) from exc
finally:
self.cleanup_executor(rev, executor)
return result
def cleanup_executor(self, rev: str, executor: "BaseExecutor"):
executor.cleanup()
| gshanko125298/Prompt-Engineering-In-context-learning-with-GPT-3-and-LLMs | myenve/Lib/site-packages/dvc/repo/experiments/executor/manager/ssh.py | ssh.py | py | 3,336 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "base.BaseExecutorManager",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "ssh.S... |
6497432832 | from stock_util import StockUtil
from logger import Logger
from stock_db import StockDb
import time
import threading
import requests
from pandas import DataFrame
import pandas as pd
class StockMon():
def __init__(self):
self.logger = Logger("StockMon")
self.util = StockUtil()
def get_xueqiu_info(self,url):
cookie_url = "https://xueqiu.com"
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'}
r = requests.get(cookie_url,headers=headers)
cookies = r.cookies
r1 = requests.get(url,headers=headers,cookies=cookies)
#self.logger.info(r1.text)
stock_list = eval(r1.text)['stocks']
return DataFrame(stock_list)
def get_market_status_from_xueqiu(self,direction,page_number,page_size):
#direction = asc 跌幅榜, direction = desc 涨幅榜
url = "https://xueqiu.com/stock/cata/stocklist.json?page=%s&size=%s&order=%s&orderby=percent&type=11%%2C12&_=1541985912951"%(page_number,page_size,direction)
#self.logger.info(url)
return self.get_xueqiu_info(url)
def get_market_status(self,direction,page_number,page_size,use_proxy=0):
#direction=0 means top n, direction=1 means bottom n
proxies = {'http': 'http://18.197.117.119:8080', 'https': 'http://18.197.117.119:8080'}
detail_url = "http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData?\
page=%s&num=%s&sort=changepercent&asc=%s&node=hs_a&symbol=&_s_r_a=init"%(page_number,page_size,direction)
if use_proxy==1:
resp = requests.get(detail_url,proxies=proxies)
else:
resp = requests.get(detail_url)
#self.logger.info(resp.text)
if resp.text=='null':
return ''
elif '?xml' in resp.text:
self.logger.info(resp.text)
else:
return eval(resp.text.replace('symbol','"symbo"').replace('code','"code"').replace('name','"name"').replace('trade','"trade"').\
replace('pricechange','"pricechange"').replace('changepercent','"changepercent"').replace('buy','"buy"').replace('sell','"sell"').\
replace('settlement','"settlement"').replace('open','"open"').replace('high','"high"').replace('low','"low"').\
replace('volume','"volume"').replace('amount','"amount"').replace('ticktime','"ticktime"').replace('per:','"per":').\
replace('pb','"pb"').replace('mktcap','"mktcap"').replace('nmc','"nmc"').replace('turnoverratio','"turnoverratio"'))
def get_zt_number(self):
#Get zt number
market_status = self.get_market_status(0,1,100)
for i in range(100):
if float(market_status[i]['changepercent'])<9.7:
self.logger.info("涨停个数:%s"%i)
return i
def get_dt_number(self):
#Get dt number
market_status = self.get_market_status(1,1,100)
for i in range(100):
if float(market_status[i]['changepercent'])>-9.7:
self.logger.info("跌停个数:%s"%i)
return i
def monitor_bid(self,stock_list,refresh_interval):
sample = {}
for s in stock_list:
aoi = self.util.get_live_aoi_bid(s)
sample[s] = aoi
while True:
self.logger.info("================Monitor==============")
self.logger.info("股票名称(股票ID)| 涨幅 | 竞买价 | 竞买量(万手)")
for s in stock_list:
status = self.util.get_live_mon_items_bid(s)
aoi = self.util.get_live_aoi_bid(s)
if aoi-sample[s]>1:
plus_icon = "[↑+%s]"%(round(aoi-sample[s],2))
self.logger.info("*%s %s"%(status,plus_icon))
elif aoi-sample[s]<-1:
plus_icon = "[↓%s]"%(round(aoi-sample[s],2))
self.logger.info("*%s %s"%(status,plus_icon))
else:
self.logger.info(status)
'''
if aoi-sample[s]>2:
self.logger.info("Stock %s aoi increased from %s to %s"%(s,sample[s],aoi))
elif aoi-sample[s]<-2:
self.logger.info("Stock %s aoi dropped from %s to %s"%(s,sample[s],aoi))
'''
sample[s] = aoi
time.sleep(refresh_interval)
def check_stock_list(self,stock_list):
self.logger.info("================Monitor==============")
status = '\n'.join(self.util.get_summary_status(stock_list))
self.logger.info(status)
def monitor_after_bid(self,stock_list,refresh_interval):
while True:
self.logger.info("===============Monitor===============")
self.logger.info("股票名称(股票ID)| 开盘涨幅 | 当前涨幅 | 当前价格 | 成交量(万手)| 成交金额(亿)")
for s in stock_list:
self.logger.info(self.util.get_live_mon_items(s))
time.sleep(refresh_interval)
def check_top_and_bottom(self,n):
status = self.get_market_status(0,1,n)
df = DataFrame(status)
df1 = df[['symbo','name','changepercent','trade','open','high','low','volume','turnoverratio']]
print(df1)
status = self.get_market_status(1,1,n)
df = DataFrame(status)
df1 = df[['symbo','name','changepercent','trade','open','high','low','volume','turnoverratio']]
print(df1)
def get_top_n_df(self,direction,n):
#direction=0 means top n, direction=1 means bottom n
status = self.get_market_status(direction,1,n)
df = DataFrame(status)
ret = df[['symbo','name','changepercent','trade','open','turnoverratio']]
print(ret)
return ret
def sum_top_n_list(self,direction,n):
'''
tmp_csv = 'tmp.csv'
df = self.get_top_n_df(direction,n)
df.to_csv(tmp_csv,index=False)
f = open(tmp_csv,'r')
out = open('out.csv','w')
line_number = 0
sample_count = 3
for line in f.readlines():
item = line.replace('\n','')
if line_number==0:
target_line = ",%s,"%(item)
else:
s = item.split(',')[0]
s_name = item.split(',')[1]
#self.logger.info(s)
if s_name.startswith('N'):
target_line = "%s,%s,"%(line_number,item)
else:
db = StockDb()
tmp = []
turn_over_list = db.get_last_n_turnover(s,sample_count)
for t in turn_over_list:
tmp.append(str(t))
turn_over_sample = ','.join(tmp)
pchg_list = db.get_last_n_pchg(s,sample_count)
for t in pchg_list:
tmp.append(str(t))
pchg_sample = ','.join(tmp)
target_line = ("%s,%s,%s,%s"%(line_number,item,turn_over_sample,pchg_sample))
line_number = line_number+1
out.write("%s\n"%(target_line))
f.close()
out.close()
'''
df1 = pd.read_csv('out.csv',index_col=0)
with open('output.html','w',encoding="gb2312") as f:
f.write(df1.to_html())
def get_bid_sample_list(self,top_n=100): #run on 9:20, get stock_list which is in top n
url = 'https://xueqiu.com/stock/cata/stocklist.json?page=1&size=%s&order=desc&orderby=percent&type=11%%2C12&_=1541985912951'%(top_n)
df = self.get_xueqiu_info(url)
df1 = df[['symbol','name','current','percent','volume']]
#print(df1)
s_list = df1['symbol'].values.tolist()
#print(s_list)
return s_list
def mon_bid(self):
sample_list = self.get_bid_sample_list()
f = open('bid.txt','w')
while True:
time.sleep(20) #every 20 seconds, check diff(new_list,sample_list)...
new_list = self.get_bid_sample_list()
check_list = []
for s in new_list:
if s not in sample_list:
check_list.append(s)
for s in check_list:
self.logger.info("================Please check the following==============")
status = self.util.get_live_status(s)
self.logger.info(s)
self.logger.info(status)
f.write(s)
f.close()
if __name__ == '__main__':
t = StockMon()
t.mon_bid()
#df = t.get_bid_sample_list()
#stock_list = t.get_top_n_list(100)
#print(stock_list)
#t.sum_top_n_list(0,100)
#check(50)
#df = DataFrame(t.get_market_status(0,1,50))
#df1 = df.iloc[:,10:20]
#df1 = df.iloc[:,0:10]
#print(df1)
#t.get_bid_sample_list()
#t.mon_bid()
'''
f = open('t1.csv','w')
for i in range(1,40):
status = t.get_market_status(0,i,100)
if status == '':
print('No data in this %s page!'%i)
break
else:
df = DataFrame(status)
csv_file = 't.csv'
#df1 = df.loc[df.turnoverratio>5]
#df1 = df.iloc[:,10:20]
df1 = df[['code','open','high','low','trade','volume','turnoverratio','changepercent']]
print(df1)
df1.to_csv(f,header=False,index=False)
f.close()
'''
| jia-zhang/fp-client | lib/stock_mon.py | stock_mon.py | py | 9,923 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logger.Logger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "stock_util.StockUtil",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
"l... |
5073225846 | import asyncio
import logging
import aiohttp
import aiohttp.server
logger = logging.getLogger(__name__)
class ProxyRequestHandler(aiohttp.server.ServerHttpProtocol):
"""
Inspired by https://github.com/jmehnle/aiohttpproxy
Copyright Julian Mehnle, Apache License 2.0
"""
def __init__(self):
super(ProxyRequestHandler, self).__init__(keep_alive_on=False)
self.logger = logger
@asyncio.coroutine
def handle_request(self, message, payload):
url = message.path
logger.info('{0} {1}'.format(message.method, url))
if message.method in ('POST', 'PUT', 'PATCH'):
data = yield from payload.read()
else:
data = None
message, data = self.intercept_request(message, data)
if not message:
return
response = yield from aiohttp.request(message.method, url, headers=message.headers,
data=data)
response_content = yield from response.content.read()
response, response_content = self.intercept_response(response, response_content)
yield from self.response_to_proxy_response(response, response_content)
def response_to_proxy_response(self, response, response_content):
proxy_response = aiohttp.Response(self.writer, response.status, http_version=response.version)
# Copy response headers, except for Content-Encoding header,
# since unfortunately aiohttp transparently decodes content.
proxy_response_headers = [(name, value)
for name, value
in response.headers.items() if name not in ('CONTENT-ENCODING',)]
proxy_response.add_headers(*proxy_response_headers)
proxy_response.send_headers()
proxy_response.write(response_content)
yield from proxy_response.write_eof()
def intercept_request(self, message, data):
return message, data
def intercept_response(self, response, content):
return response, content
def run(port, cls=None):
cls = cls or ProxyRequestHandler
loop = asyncio.get_event_loop()
logging.basicConfig(
format='[proxy] {asctime} {levelname} {name}: {message}',
style='{',
level=logging.DEBUG
)
server_future = loop.create_server(lambda: cls(), '', port)
server = loop.run_until_complete(server_future)
logger.info('Accepting HTTP proxy requests on {0}:{1} ...'.format(*server.sockets[0].getsockname()))
loop.run_forever()
if __name__ == '__main__':
run(8080)
| raphaelm/cockatiel | functional_tests/utils_proxy.py | utils_proxy.py | py | 2,602 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "aiohttp.server",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "aiohttp.request",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "asyncio.coroutin... |
36208434520 | import multiDownload as dl
import pandas as pd
import sqlite3
import json
import os
from datetime import datetime, timedelta
import shutil
import argparse
import jieba
from jieba import analyse
from wordcloud import WordCloud
from opencc import OpenCC
import requests
import emoji
with open("scripts/config.json", "r") as file:
data = json.load(file)
# account = data["account"]
# nickName = data["nickName"]
Cookie = data["Cookie"]
picDriverPath = data["picDriverPath"]
dbpath = data["dbpath"]
storyPicLink = data["storyPicLink"]
storyPicType = data["storyPicType"]
# 创建 ArgumentParser 对象
parser = argparse.ArgumentParser()
parser.add_argument("account", help="输入account")
parser.add_argument("password", help="输入password")
parser.add_argument("nickName", help="输入nickName")
# parser.add_argument("Cookie", help="输入Cookie")
parser.add_argument("repo", help="输入repo")
options = parser.parse_args()
account = options.account
password = options.password
nickName = options.nickName
# Cookie = options.Cookie
repo = options.repo
font_path = "./scripts/font.otf"
cn_path_svg = "./output/postcrossing_cn.svg"
en_path_svg = "./output/postcrossing_en.svg"
excel_file = "./template/postcardStory.xlsx"
if os.path.exists(dbpath):
shutil.copyfile(dbpath, f"{dbpath}BAK")
def replateTitle(type):
with open(f"./output/title.json", "r",encoding="utf-8") as f:
title = json.load(f)
value = title.get(type)
from_or_to, pageNum, Num, title = value
return title
# 获取收发总距离
def getUserHomeInfo(type):
distance_all = []
content = dl.readDB(dbpath,type,"Mapinfo")
#print("content:",content)
for item in content:
distance_all.append(int(item["distance"]))
total = sum(distance_all)
rounds = round((total/40076),2)
return total,len(content),rounds
def getUserSheet(tableName):
data = dl.readDB(dbpath, "", tableName)
countryCount = len(data)
new_data = []
for i, item in enumerate(data):
if item['sentMedian']:
sentMedian = f"{item['sentMedian']}天"
else:
sentMedian = "-"
if item['receivedMedian']:
receivedMedian = f"{item['receivedMedian']}天"
else:
receivedMedian = "-"
formatted_item = {
'国家': f"{item['name']} {emoji.emojize(item['flagEmoji'],language='alias')}",
'已寄出': item['sentNum'],
'已收到': item['receivedNum'],
'寄出-平均': f"{item['sentAvg']}天",
'收到-平均': f"{item['receivedAvg']}天",
'寄出-中间值': sentMedian,
'收到-中间值': receivedMedian,
}
new_data.append(formatted_item)
# 将数据数组转换为DataFrame
df = pd.DataFrame(new_data)
# 修改索引从1开始
df.index = df.index + 1
# 将DataFrame转换为HTML表格,并添加Bootstrap的CSS类和居中对齐的属性
html_table = df.to_html(classes="table table-striped table-bordered", escape=False)
html_table = html_table.replace('<th>', '<th class="text-center">')
html_table = html_table.replace('<td>', '<td class="text-center">')
# 生成完整的HTML文件
html_content = f'''
<!DOCTYPE html>
<html>
<head>
<title>{tableName}</title>
<link rel="stylesheet" href="../src/bootstrap-5.2.2/package/dist/css/bootstrap.min.css">
<style>
.table-responsive {{
width: 100%;
overflow-x: auto;
}}
</style>
</head>
<body>
<div class="container-fluid">
<div class="table-responsive">
{html_table}
</div>
</div>
</body>
</html>
'''
# 保存HTML表格为网页文件
with open(f'./output/{tableName}.html', 'w', encoding="utf-8") as file:
file.write(html_content)
return countryCount
def replaceTemplate():
stat,content_raw,types = dl.getAccountStat(Cookie)
title_all=""
desc_all=""
countryNum = getUserSheet("CountryStats")
travelingNum = getTravelingID(account,"traveling",Cookie)
countryCount = f"> 涉及国家[🗺️**{countryNum}**]\n\n"
travelingCount = f"> 待签收[📨**{travelingNum}**]\n\n"
for type in types:
distance,num,rounds = getUserHomeInfo(type)
distance_all = format(distance, ",")
summary = f"**{num}** 📏**{distance_all}** km 🌏**{rounds}** 圈]\n\n"
if type == "sent":
desc = f"> 寄出[📤{summary}"
elif type == "received":
desc = f"> 收到[📥{summary}"
else:
desc =""
desc_all += desc
for type in types:
title = replateTitle(type)
title_all += f"#### [{title}](/{nickName}/postcrossing/{type})\n\n"
title_final = f"{desc_all}\n{countryCount}\n{travelingCount}\n{title_all}"
#print("title_all:\n",title_all)
storylist,storyNum = getCardStoryList("received")
commentlist,commentNum = getCardStoryList("sent")
calendar,series,height = createCalendar()
with open(f"./template/信息汇总_template.md", "r",encoding="utf-8") as f:
data = f.read()
dataNew = data.replace('$account',account)
print(f"已替换account:{account}")
dataNew = dataNew.replace('$title',title_final)
print("已替换明信片墙title")
dataNew = dataNew.replace('$storylist',storylist).replace('$storyNum',storyNum)
print("已替换明信片故事list")
dataNew = dataNew.replace('$commentlist',commentlist).replace('$commentNum',commentNum)
print("已替换明信片评论list")
dataNew = dataNew.replace('$calendar',calendar)
dataNew = dataNew.replace('$series',series)
dataNew = dataNew.replace('$height',str(height))
print("已替换明信片日历list")
dataNew = dataNew.replace('$repo',repo)
print(f"已替换仓库名:{repo}")
with open(f"./output/信息汇总.md", "w",encoding="utf-8") as f:
f.write(dataNew)
blog_path = r"D:\web\Blog2\src\Arthur\Postcrossing\信息汇总.md"
# 换为你的blog的本地链接,可自动同步过去
if os.path.exists(blog_path):
with open(blog_path, "w", encoding="utf-8") as f:
f.write(dataNew)
def StoryXLS2DB(excel_file):
df = pd.read_excel(excel_file)
content_all = []
for index, row in df.iterrows():
data = {
"id": row[0],
"content_en": row[1],
"content_cn": row[2],
"comment_en": row[3],
"comment_cn": row[4],
}
content_all.append(data)
tablename = "postcardStory"
dl.writeDB(dbpath, content_all,tablename)
def getCardStoryList(type):
list_all = ""
content =dl.readDB(dbpath, type,"postcardStory")
num = str(len(content))
for id in content:
postcardID = id["id"]
content_en = id["content_en"]
content_cn = id["content_cn"]
comment_en = id["comment_en"]
comment_cn = id["comment_cn"]
def remove_blank_lines(text):
if text:
return "\n".join(line for line in text.splitlines() if line.strip())
return text
# 去掉空白行
content_en = remove_blank_lines(content_en)
content_cn = remove_blank_lines(content_cn)
comment_en = remove_blank_lines(comment_en)
comment_cn = remove_blank_lines(comment_cn)
if comment_en:
comment = f'@tab 回复\n' \
f'* 回复原文\n\n> {comment_en}\n\n* 翻译:\n\n> {comment_cn}\n\n:::'
else:
comment = ":::"
#print("comment:",comment)
userInfo = id["userInfo"]
picFileName = id["picFileName"]
contryNameEmoji = id["contryNameEmoji"] if id["contryNameEmoji"] is not None else ""
travel_time = id["travel_time"]
distanceNum = id["distance"]
distance = format(distanceNum, ",")
if type == "received":
list = f'### [{postcardID}](https://www.postcrossing.com/postcards/{postcardID})\n\n' \
f'> 来自 {userInfo} {contryNameEmoji}\n' \
f'> 📏 {distance} km\n⏱ {travel_time}\n\n' \
f':::tabs\n' \
f'@tab 图片\n' \
f'<div class="image-preview"> <img src="{picDriverPath}/{picFileName}" />' \
f' <img src="{storyPicLink}/{postcardID}.{storyPicType}" /></div>' \
f'\n\n' \
f'@tab 内容\n' \
f'* 卡片文字\n\n> {content_en}\n\n* 翻译:\n\n> {content_cn}\n\n' \
f'{comment}\n\n' \
f'---\n'
else:
list = f'### [{postcardID}](https://www.postcrossing.com/postcards/{postcardID})\n\n' \
f'> 寄往 {userInfo} {contryNameEmoji}\n' \
f'> 📏 {distance} km\n⏱ {travel_time}\n\n' \
f':::tabs\n' \
f'@tab 图片\n' \
f'\n\n' \
f'' \
f'{comment}\n\n' \
f'---\n'
list_all += list
return list_all,num
def createCalendar():
with open("output/UserStats.json", "r") as file:
a_data = json.load(file)
year_list = []
for data in a_data:
timestamp = data[0] # 获取时间戳
date = datetime.fromtimestamp(timestamp) # 将时间戳转换为日期格式
year = date.strftime("%Y") # 提取年份(YYYY)
if year not in year_list:
year_list.append(year)
calendar_all=""
series_all=""
for i,year in enumerate(year_list):
calendar = f"""
{{
top: {i*150},
cellSize: ["auto", "15"],
range: {year},
itemStyle: {{
color: '#ccc',
borderWidth: 3,
borderColor: '#fff'
}},
splitLine: true,
yearLabel: {{
show: true
}},
dayLabel: {{
firstDay: 1,
}}
}},
"""
calendar_all+=calendar
series = f"""
{{
type: "heatmap",
coordinateSystem: "calendar",
calendarIndex: {i},
data: data
}},
"""
series_all+=series
height = len(year_list)*150
return calendar_all, series_all, height
def createWordCloud(type, contents):
contents = contents.replace("nan","")
# 转换为svg格式输出
if type == "cn":
path = cn_path_svg
# 使用jieba的textrank功能提取关键词
keywords = jieba.analyse.textrank(contents, topK=100, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'))
#print(f"keywords={keywords}")
# 创建 OpenCC 对象,指定转换方式为简体字转繁体字
converter = OpenCC('s2t.json')
# 统计每个关键词出现的次数
keyword_counts = {}
for keyword in keywords:
count = contents.count(keyword)
keyword = converter.convert(keyword) #简体转繁体
keyword_counts[keyword] = count
print(keyword_counts)
# 创建一个WordCloud对象,并设置字体文件路径和轮廓图像
wordcloud = WordCloud(width=1600, height=800, background_color="white", font_path=font_path)
# 生成词云
wordcloud.generate_from_frequencies(keyword_counts)
else:
path = en_path_svg
wordcloud = WordCloud(width=1600, height=800, background_color="white", font_path=font_path, max_words=100).generate(contents)
keywords = wordcloud.words_
print(keywords)
svg_image = wordcloud.to_svg(embed_font=True)
with open(path, "w+", encoding='UTF8') as f:
f.write(svg_image)
print(f"已保存至{path}")
def readStoryDB(dbpath):
result_cn = ""
result_en = ""
content =dl.readDB(dbpath, "sent","postcardStory")
for id in content:
postcardID = id["id"]
content_en = id["content_en"]
content_cn = id["content_cn"]
comment_en = id["comment_en"]
comment_cn = id["comment_cn"]
data_en = f"{content_en}\n{comment_en}\n"
data_cn = f"{content_cn}\n{comment_cn}\n"
result_en += data_en
result_cn += data_cn
return result_cn,result_en
# 实时获取该账号所有sent、received的明信片列表,获取每个postcardID的详细数据
def getTravelingID(account,type,Cookie):
headers = {
'Host': 'www.postcrossing.com',
'X-Requested-With': 'XMLHttpRequest',
'Sec-Fetch-Site': 'same-origin',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Sec-Fetch-Mode': 'cors',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.0.1 Mobile/15E148 Safari/604.1',
'Connection': 'keep-alive',
'Referer': f'https://www.postcrossing.com/user/{account}/{type}',
'Cookie': Cookie,
'Sec-Fetch-Dest': 'empty'
}
url=f'https://www.postcrossing.com/user/{account}/data/{type}'
response = requests.get(url,headers=headers).json()
travelingCount = len(response)
data = sorted(response, key=lambda x: x[7])
#print(data)
new_data = []
for i,stats in enumerate(data):
baseurl = "https://www.postcrossing.com"
formatted_item = {
'ID号': f"<a href='{baseurl}/travelingpostcard/{stats[0]}'>{stats[0]}</a>",
'收件人': f"<a href='{baseurl}/user/{stats[1]}'>{stats[1]}</a>",
'国家': stats[3],
'寄出时间': datetime.fromtimestamp(stats[4]).strftime('%Y/%m/%d'),
'距离': f'{format(stats[6], ",")} km',
'天数': stats[7]
}
new_data.append(formatted_item)
df = pd.DataFrame(new_data)
# 修改索引从1开始
df.index = df.index + 1
# 删除序号列
#df = df.drop(columns=['序号'])
# 将DataFrame转换为HTML表格,并添加Bootstrap的CSS类和居中对齐的属性
html_table = df.to_html(classes="table table-striped table-bordered", escape=False)
html_table = html_table.replace('<th>', '<th class="text-center">')
html_table = html_table.replace('<td>', '<td class="text-center">')
# 生成完整的HTML文件
html_content = f'''
<!DOCTYPE html>
<html>
<head>
<title>还在漂泊的明信片</title>
<link rel="stylesheet" href="../src/bootstrap-5.2.2/package/dist/css/bootstrap.min.css">
<style>
.table-responsive {{
width: 100%;
overflow-x: auto;
}}
</style>
</head>
<body>
<div class="container-fluid">
<div class="table-responsive">
{html_table}
</div>
</div>
</body>
</html>
'''
# 保存HTML表格为网页文件
with open(f'./output/{type}.html', 'w', encoding="utf-8") as file:
file.write(html_content)
return travelingCount
dl.replaceTemplateCheck()
excel_file="./template/postcardStory.xlsx"
StoryXLS2DB(excel_file)
replaceTemplate()
if os.path.exists(f"{dbpath}BAK"):
dbStat = dl.compareMD5(dbpath, f"{dbpath}BAK")
if dbStat == "1":
print(f"{dbpath} 有更新")
print(f"正在生成中、英文词库")
result_cn,result_en = readStoryDB(dbpath)
createWordCloud("cn",result_cn)
createWordCloud("en",result_en)
os.remove(f"{dbpath}BAK")
else:
print(f"{dbpath} 暂无更新")
os.remove(f"{dbpath}BAK") | arthurfsy2/Postcrossing_map_generator | scripts/createPersonalPage.py | createPersonalPage.py | py | 15,829 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_... |
30543923938 | import sys
import numpy as np
import torch
from tqdm import tqdm
import matplotlib.pyplot as plt
from Preprocessor import Preprocessor
"""
Dataset Snapshot:
Dataset A:
Normal
Murmur
Extra Heart Sound
Artifact
Dataset B:
Normal
Murmur
Extrasystole
"""
class PASCAL(Preprocessor):
def __init__(self):
super().__init__()
self.dataset_dir = {"normal": ["./data/PASCAL/Atraining_normal/", "./data/PASCAL/Training B Normal/"],
"murmur": ["./data/PASCAL/Atraining_murmur/", "./data/PASCAL/Btraining_murmur/"],
"extra-heart-sounds": ["./data/PASCAL/Atraining_extrahls/", "./data/PASCAL/Btraining_extrastole/"],
"artifact": ["./data/PASCAL/Atraining_artifact/"]}
self.lbls = {"normal": 0, "murmur": 1,
"extra-heart-sounds": 2, "artifact": 3}
self.data = []
self.data_lbls = []
def traverseDataset(self, location):
for label in tqdm(self.dataset_dir):
data_lbl = self.lbls[label]
for dir in self.dataset_dir[label]:
files = self.getFiles(dir)
for file in files:
raw_signal = self.getAudioSignal(f"{dir}{file}", 500)
segmented_signal = self.signalPreprocess(
raw_signal, length=5, sampleRate=500, includeLast=False)
for segment in segmented_signal:
self.data.append(segment.flatten()[:2500])
self.data_lbls.append(data_lbl)
self.data = torch.tensor(self.data).float()
self.data_lbls = torch.tensor(self.data_lbls).long()
print(self.data.shape)
print(self.data_lbls.shape)
torch.save({'data': self.data, 'labels': self.data_lbls}, location)
def signalPreprocess(self, data, **kargs):
segmented_signal = self.timeSegmentation(
data, length=kargs["length"], sampleRate=kargs["sampleRate"], includeLast=kargs["includeLast"])
return segmented_signal
dataset = PASCAL()
dataset.traverseDataset("./data/preprocessed/PASCAL.pt")
| kendreaditya/heart-auscultation | src/preprocess/PASCAL-dataset.py | PASCAL-dataset.py | py | 2,173 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "Preprocessor.Preprocessor",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"... |
40696691343 | import asyncio
import re
from collections import namedtuple
from magma.magmad.check import subprocess_workflow
DEFAULT_NUM_PACKETS = 4
DEFAULT_TIMEOUT_SECS = 20
PingCommandParams = namedtuple(
'PingCommandParams',
['host_or_ip', 'num_packets', 'timeout_secs'],
)
PingInterfaceCommandParams = namedtuple(
'PingInterfaceCommandParams',
['host_or_ip', 'num_packets', 'interface', 'timeout_secs'],
)
PingCommandResult = namedtuple(
'PingCommandResult',
['error', 'host_or_ip', 'num_packets', 'stats'],
)
ParsedPingStats = namedtuple(
'ParsedPingStats', [
'packets_transmitted',
'packets_received',
'packet_loss_pct',
'rtt_min',
'rtt_avg',
'rtt_max',
'rtt_mdev',
],
)
# regexp's for parsing
dec_re = r'\d+(\.\d+)?'
packet_line_re = re.compile(
r'^(?P<packets_transmitted>\d+) packets transmitted, '
+ r'(?P<packets_received>\d+) received, '
+ r'(?P<packet_loss_pct>{d})% packet loss, '.format(d=dec_re)
+ r'time .+$',
)
rtt_line_re = re.compile(
r'^rtt min/avg/max/mdev = '
+ r'(?P<rtt_min>{d})/(?P<rtt_avg>{d})/'.format(d=dec_re)
+ r'(?P<rtt_max>{d})/(?P<rtt_mdev>{d}) ms$'.format(d=dec_re),
)
def ping(ping_params):
"""
Execute ping commands via subprocess. Blocks while waiting for output.
Args:
ping_params ([PingCommandParams]): params for the pings to execute
Returns:
[PingCommandResult]: stats from the executed ping commands
"""
return subprocess_workflow.exec_and_parse_subprocesses(
ping_params,
_get_ping_command_args_list,
parse_ping_output,
)
@asyncio.coroutine
def ping_async(ping_params, loop=None):
"""
Execute ping commands asynchronously.
Args:
ping_params ([PingCommandParams]): params for the pings to execute
loop: asyncio event loop (optional)
Returns:
[PingCommandResult]: stats from the executed ping commands
"""
return subprocess_workflow.exec_and_parse_subprocesses_async(
ping_params,
_get_ping_command_args_list,
parse_ping_output,
loop,
)
@asyncio.coroutine
def ping_interface_async(ping_params, loop=None):
"""
Execute ping commands asynchronously through specified interface.
Args:
ping_params ([PingCommandParams]): params for the pings to execute
loop: asyncio event loop (optional)
Returns:
[PingCommandResult]: stats from the executed ping commands
"""
return subprocess_workflow.exec_and_parse_subprocesses_async(
ping_params,
_get_ping_command_interface_args_list,
parse_ping_output,
loop,
)
def _get_ping_command_args_list(ping_param):
return [
'ping', ping_param.host_or_ip,
'-c', str(ping_param.num_packets or DEFAULT_NUM_PACKETS),
'-w', str(ping_param.timeout_secs or DEFAULT_TIMEOUT_SECS),
]
def _get_ping_command_interface_args_list(ping_param):
return [
'ping', ping_param.host_or_ip,
'-c', str(ping_param.num_packets or DEFAULT_NUM_PACKETS),
'-I', str(ping_param.interface),
'-w', str(ping_param.timeout_secs or DEFAULT_TIMEOUT_SECS),
]
def parse_ping_output(stdout, stderr, param):
"""
Parse stdout output from a ping command.
Raises:
ValueError: If any errors are encountered while parsing ping output.
"""
def create_error_result(error_msg):
return PingCommandResult(
error=error_msg,
host_or_ip=param.host_or_ip,
num_packets=param.num_packets or DEFAULT_NUM_PACKETS,
stats=None,
)
def find_statistic_line_idx(ping_lines):
line_re = re.compile('^--- .+ statistics ---$')
for i, line in enumerate(ping_lines):
if line_re.match(line):
return i
raise ValueError('Could not find statistics header in ping output')
def match_ping_line(line, line_re, line_name='ping'):
line_match = line_re.match(line)
if not line_match:
raise ValueError(
'Could not parse {name} line:\n{line}'.format(
name=line_name,
line=line,
),
)
return line_match
def str_to_num(s_in):
try:
return int(s_in)
except ValueError:
return float(s_in)
if stderr:
return create_error_result(stderr)
else:
try:
stdout_lines = stdout.decode('ascii').strip().split('\n')
stat_header_line_idx = find_statistic_line_idx(stdout_lines)
if len(stdout_lines) <= stat_header_line_idx + 2:
raise ValueError(
'Not enough output lines in ping output. '
'The ping may have timed out.',
)
packet_match = match_ping_line(
stdout_lines[stat_header_line_idx + 1],
packet_line_re,
line_name='packet',
)
rtt_match = match_ping_line(
stdout_lines[stat_header_line_idx + 2],
rtt_line_re,
line_name='rtt',
)
match_dict = {}
match_dict.update(packet_match.groupdict())
match_dict.update(rtt_match.groupdict())
match_dict = {k: str_to_num(v) for k, v in match_dict.items()}
return PingCommandResult(
error=None,
host_or_ip=param.host_or_ip,
num_packets=param.num_packets or DEFAULT_NUM_PACKETS,
stats=ParsedPingStats(**match_dict),
)
except ValueError as e:
return create_error_result(str(e.args[0]))
| magma/magma | orc8r/gateway/python/magma/magmad/check/network_check/ping.py | ping.py | py | 5,786 | python | en | code | 1,605 | github-code | 6 | [
{
"api_name": "collections.namedtuple",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "... |
41495871576 | from rdkit import Chem
import argparse
import math
import os
from sklearn.metrics import mean_squared_error, r2_score
from statistics import stdev
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculate the RMSD value for a molecular property.')
parser.add_argument('--original', '-o', metavar='ORIGINAL', required=True, type=str, help='File with original property values. SDF or SMILES format.')
parser.add_argument('--prediction', '-p', metavar='PREDICTION', required=True, type=str, help='File with predicted property values. SDF or SMILES format.')
parser.add_argument('--optimized', '-q', metavar="OPTIMIZED", type=str, help="File with optimized property predictions. SDF or SMILES format.")
parser.add_argument('-i', '--id', type=str, default="_Name", help="ID tag for files in SD format.")
parser.add_argument('-l', '--logfile', type=str, default="evaluation_result.log")
parser.add_argument('--label_predicted', type=str, required=True, help="Label of the prediction.")
parser.add_argument('--label_optimized', type=str, required=True, help="Label of the optimized prediction.")
parser.add_argument('property', metavar='Property', type=str, help='Property of interest (should be similar in both files.')
args = parser.parse_args()
original_dict = {}
pred_dict = {}
opti_dict = {}
orig_ext = os.path.splitext(args.original)[-1].lower()
pred_ext = os.path.splitext(args.prediction)[-1].lower()
id_label = args.id
# collect original values
if orig_ext == ".sdf":
original_mols = Chem.SDMolSupplier(args.original)
for mol in original_mols:
original_dict[mol.GetProp(id_label)] = float(mol.GetProp(args.property))
else:
for line in open(args.original, "r"):
line_arr = line.split("\t")
original_dict[line_arr[1]] = float(line_arr[2])
# collect values predicted values
no_deviation = False
if pred_ext ==".sdf":
pred_mols = Chem.SDMolSupplier(args.prediction)
for mol in pred_mols:
pred_dict[mol.GetProp(id_label)] = float(mol.GetProp(args.property))
else:
for line in open(args.prediction, "r"):
line_arr = line.split("\t")
pred_dict[line_arr[1]] = float(line_arr[2])
if args.optimized:
if os.path.splitext(args.optimized)[-1].lower() == ".sdf":
pred_mols = Chem.SDMolSupplier(args.optimized)
for mol in pred_mols:
opti_dict[mol.GetProp(id_label)] = float(mol.GetProp(args.label_optimized))
else:
for line in open(args.prediction, "r"):
line_arr = line.split("\t")
pred_dict[line_arr[1]] = float(line_arr[2]) + float(line_arr[3])
sum_sq = 0
preds = []
orgs = []
optis_all = [] # including unoptimizable prediction values
optis_only = []
orgs_only = []
unoptimizable_ids = []
unopt_values = []
unopt_values_for_ori = []
pred_in_net = []
pred_not_in_net = []
for id in pred_dict.keys():
preds.append(pred_dict[id])
orgs.append(original_dict[id])
diff = (pred_dict[id] - original_dict[id])
if args.optimized:
if id in opti_dict.keys():
optis_only.append(opti_dict[id])
orgs_only.append(original_dict[id])
optis_all.append(opti_dict[id])
pred_in_net.append(pred_dict[id])
else:
unoptimizable_ids.append(id)
optis_all.append(pred_dict[id])
unopt_values.append(pred_dict[id])
unopt_values_for_ori.append(original_dict[id])
pred_not_in_net.append(pred_dict[id])
with open(args.logfile, "w") as f:
stdDev_orig_all = stdev(orgs)
stdDev_orig_only_in_net = stdev(orgs_only)
stdDev_orig_only_not_in_net = stdev(unopt_values_for_ori)
stdDev_opt_all = stdev(optis_all)
stdDev_opt_only_in_net = stdev(optis_only)
f.write("StdDevs:\n")
f.write(f"original all values: {stdDev_orig_all}\n")
f.write(f"original only in net values: {stdDev_orig_only_in_net}\n")
f.write(f"original only NOT in net values: {stdDev_orig_only_not_in_net}\n")
f.write(f"optimized all values: {stdDev_opt_all}\n")
f.write(f"optimized only in net values: {stdDev_opt_only_in_net}\n\n")
f.write(f" Root Mean Square Deviation R² Num_of_Samples\n")
f.write(f"Predictions(all): {mean_squared_error(orgs, preds, squared=False):.6f} {r2_score(orgs, preds):.6f} {len(orgs)}\n")
f.write(f"Predictions(only in net): {mean_squared_error(orgs_only, pred_in_net, squared=False):.6f} {r2_score(orgs_only, pred_in_net):.6f} {len(orgs_only)}\n")
f.write(f"Predictions(only NOT in net): {mean_squared_error(unopt_values_for_ori, pred_not_in_net, squared=False):.6f} {r2_score(unopt_values_for_ori, pred_not_in_net):.6f} {len(unopt_values_for_ori)}\n\n")
if len(optis_all) > 0:
f.write(f"Optimized (all): {mean_squared_error(orgs, optis_all, squared=False):.6f} {r2_score(orgs, optis_all):.6f} {len(orgs)}\n")
f.write(
f"Optimized (only): {mean_squared_error(orgs_only, optis_only, squared=False):.6f} {r2_score(orgs_only, optis_only):.6f} {len(orgs_only)}\n\n")
if len(unopt_values) > 0:
f.write(f"Scores (un_opt): {mean_squared_error(unopt_values_for_ori, unopt_values, squared=False):.6f} {r2_score(unopt_values_for_ori, unopt_values):.6f} {len(unopt_values)}\n")
f.write(f"\nUnoptimizable molecules (IDs) ({len(unoptimizable_ids)} mols):\n")
for id in unoptimizable_ids:
f.write(f"{id}\n")
| sophiahoenig/NetworkBalanceScaling | utils/evaluate_results.py | evaluate_results.py | py | 6,038 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext... |
43193724256 | # -*- coding: utf-8 -*-
import streamlit as st
from topics import TopicModel
import pandas as pd
import numpy as np
from scipy.optimize import linear_sum_assignment
import matplotlib.pyplot as plt
import base64
@st.cache(allow_output_mutation=True)
def load_corpus(url):
return tm.load_corpus(url)
# check the cache if there is already a model for this url, stopwords and number_of_topics
@st.cache(allow_output_mutation=True, persist=True, show_spinner=False)
def lda_model(url, stopwords, number_of_topics):
corpus = load_corpus(url)
corpus.update_stopwords(stopwords)
with st.spinner("Training the topic model for {} topics ...".format(number_of_topics)):
print("*** Training the topic model: {}".format(number_of_topics))
return lda_model_no_cache(corpus, number_of_topics)
def lda_model_no_cache(corpus, number_of_topics):
if use_heuristic_alpha_value:
return tm.fit(corpus, number_of_topics, alpha="talley", number_of_chunks=number_of_chunks)
else:
return tm.fit(corpus, number_of_topics, number_of_chunks=number_of_chunks)
# check the cache if there are already runs for this url, stopwords and number_of_topics
@st.cache(allow_output_mutation=True, persist=True, show_spinner=False)
def lda_model_runs(url, stopwords, number_of_topics, n=4):
corpus = load_corpus(url)
corpus.update_stopwords(stopwords)
with st.spinner("Creating {} different topic models".format(n)):
lda_models = [lda_model_no_cache(corpus, number_of_topics) for run in range(n)]
return lda_models
def topic_alignment(n):
lda_models = lda_model_runs(url, stopwords, number_of_topics, n=n)
topics = pd.DataFrame([[" ".join([tw[0] for tw in lda.lda.show_topic(t, 10)]) for lda in lda_models]
for t in range(number_of_topics)])
diff = [lda_models[0].difference(lda_models[i]) for i in range(1, n)]
matches = pd.DataFrame()
matches[0] = range(number_of_topics)
for i in range(1, n):
_, cols = linear_sum_assignment(diff[i-1])
matches[i] = cols
return topics, matches, lda_models, diff
def highlight_topic(x, topic, matches, color="lightgreen"):
color = "background-color: %s" % (color)
df = pd.DataFrame('', x.index, x.columns)
for run in range(len(x.columns)):
df[run].loc[matches[run][topic]] = color
return df
def topic_runs(lda_models, topic, matches):
keywords = pd.DataFrame()
weights = pd.DataFrame()
documents = pd.DataFrame()
for run in range(len(lda_models)):
keywords[run] = [tw[0] for tw
in lda_models[run].lda.show_topic(matches[run][topic], 10)]
weights[run] = [tw[1] for tw
in lda_models[run].lda.show_topic(matches[run][topic], 10)]
# todo: while correct, this is inefficient as the DTM is recomputed for each run
documents[run] = document_topics_matrix(lda_models[run])[matches[run][topic]]
return lda_models, keywords, weights, documents
# done: once we pass weights, use the relative weights to assign colors
# relative weight = weight / lowest weight in top 10
# for all keywords that are repeated across topics, color them blue if all >= 2,
# green if some >= 2, and blue if all < 2
def highlight_repeated_keywords(keywords, weights):
df = pd.DataFrame('', keywords.index, keywords.columns)
num_runs, num_words = len(keywords.columns), len(keywords.index)
# extract array from data frame
# we transpose the array so that each row represents one run
keywords = keywords.values.T
weights = weights.values.T
repeated_keywords = []
for keyword in keywords[0]:
i = 0
for run in range(1, num_runs):
if keyword in keywords[run]:
i = i + 1
# print("keyword {} occurs {} times".format(keyword, i))
if i == num_runs - 1:
repeated_keywords.append(keyword)
color = keyword_color(repeated_keywords, num_runs, num_words, keywords, weights)
for j in range(num_runs):
for i in range(num_words):
if keywords[j,i] in repeated_keywords:
df[j].loc[i] = "background-color: light%s" % (color[keywords[j,i]])
return df
def keyword_color(repeated_keywords, num_runs, num_words, keywords, weights):
color = {}
for keyword in repeated_keywords:
color[keyword] = None
for j in range(num_runs):
for i in range(num_words):
if keywords[j,i] in repeated_keywords:
ratio = weights[j,i]/weights[j,num_words-1]
if ratio >= 2.0:
if color[keywords[j,i]] is None:
color[keywords[j,i]] = 'yellow'
elif color[keywords[j,i]] == 'blue':
color[keywords[j,i]] = 'green'
else:
if color[keywords[j,i]] is None:
color[keywords[j,i]] = 'blue'
elif color[keywords[j,i]] == 'yellow':
color[keywords[j,i]] = 'green'
return color
def document_topics_matrix(lda):
dtm = []
for document_bow in corpus.bow():
dtm.append(topics_sparse_to_full(lda.get_document_topics(document_bow)))
return pd.DataFrame(dtm)
def topics_sparse_to_full(topics):
topics_full = [0] * number_of_topics # pythonic way of creating a list of zeros
for topic, score in topics:
topics_full[topic] = score
return topics_full
def download_link_from_csv(csv, file_name, title="Download"):
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = "<a href='data:file/csv;base64,{}' download='{}'>{}</a>".format(b64, file_name, title)
st.markdown(href, unsafe_allow_html=True)
def download_link_from_html(html, file_name, title="Download"):
b64 = base64.b64encode(html.encode()).decode() # some strings <-> bytes conversions necessary here
href = "<a href='data:file/html;base64,{}' download='{}'>{}</a>".format(b64, file_name, title)
st.markdown(href, unsafe_allow_html=True)
st.sidebar.title("Topic Model Explorer")
tm = TopicModel()
url = st.sidebar.file_uploader("Corpus", type="csv")
stopwords = st.sidebar.text_area("Stopwords (one per line)")
update_stopwords = st.sidebar.button("Update stopwords")
if update_stopwords:
if url is not None:
corpus = load_corpus(url)
corpus.update_stopwords(stopwords)
show_documents = st.sidebar.checkbox("Show documents", value=True)
if show_documents:
st.header("Corpus")
if url is not None:
corpus = load_corpus(url)
if('name' not in corpus.documents or 'content' not in corpus.documents):
st.markdown('''
The corpus must have a *name* and a *content* column.
''')
st.dataframe(corpus.documents)
download_link_from_csv("\n".join(corpus.stopwords), "stopwords.txt",
"Download stopwords")
else:
st.markdown("Please upload a corpus.")
number_of_topics = st.sidebar.slider("Number of topics", 1, 50, 10)
use_heuristic_alpha_value = st.sidebar.checkbox("Use heuristic value for alpha", value=False)
number_of_chunks = st.sidebar.slider("Number of chunks", 1, 100, 100)
show_runs = st.sidebar.checkbox("Compare topic model runs", value=False)
if show_runs:
st.header("Topic Model Runs")
topic_to_highlight = st.sidebar.selectbox("Highlight topic", range(number_of_topics), 0)
show_runs_all_topics = st.sidebar.checkbox("Show all topics", value=True)
if url is None:
st.markdown("No corpus")
elif show_runs_all_topics:
topics, matches, lda_models, diff = topic_alignment(4)
st.table(topics.style
.apply(highlight_topic, topic=topic_to_highlight, matches=matches, axis=None))
download_link_from_csv(topics.to_csv(index=False),
"tm-{}-runs.csv".format(number_of_topics),
title="Download topics as CSV")
else:
# todo: topic_alignment to return weights as well
# then pass weights as argument to highlight_repeated_keywords
topics, matches, lda_models, diff = topic_alignment(4)
lda_models, keywords, weights, documents = topic_runs(lda_models, topic=topic_to_highlight, matches=matches)
st.table(keywords.style
.apply(highlight_repeated_keywords, weights=weights, axis=None))
download_link_from_csv(keywords.to_csv(index=False),
"tm-{}-{}-keywords.csv".format(number_of_topics, topic_to_highlight),
title="Download keywords as CSV")
download_link_from_html(keywords.style
.apply(highlight_repeated_keywords, weights=weights, axis=None)
.render(),
"tm-{}-{}-keywords.html".format(number_of_topics, topic_to_highlight),
title="Download keywords as HTML (with colors)")
st.table(weights)
download_link_from_csv(keywords.to_csv(index=False),
"tm-{}-{}-weights.csv".format(number_of_topics, topic_to_highlight),
title="Download weights as CSV")
st.dataframe(documents)
| michaelweiss/topic-model-explorer | old/topic_model_explorer_stability.py | topic_model_explorer_stability.py | py | 8,264 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "streamlit.cache",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "streamlit.spinner",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "streamlit.cache",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "streamlit.spinner",... |
26857827694 | import os
import numpy as np
import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, latent_dim, img_shape):
super().__init__()
self.img_shape = img_shape
self.label_embed = nn.Embedding(10, 10)
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*block(latent_dim+10, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, int(np.prod(img_shape))),
nn.Tanh(),
)
def forward(self, z, labels):
c = self.label_embed(labels)
z = torch.cat([z, c], dim=1)
img = self.model(z)
img = img.view(img.shape[0], *self.img_shape)
return img
class Discriminator(nn.Module):
def __init__(self, img_shape):
super().__init__()
self.label_embed = nn.Embedding(10, 10)
self.model = nn.Sequential(
nn.Linear(int(np.prod(img_shape))+10, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid(),
)
def forward(self, img, labels):
img_flat = img.view(img.size(0), -1)
c = self.label_embed(labels)
x = torch.cat([img_flat, c], dim=1)
validity = self.model(x)
return validity | zeroone-universe/GM4MNIST | models/cGAN/model.py | model.py | py | 1,673 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
36264192726 | from PyQt5.QtCore import QModelIndex, pyqtSignal, pyqtSlot, QVariant, QFile, QByteArray, QBuffer, QIODevice, QSize, \
QItemSelectionModel, QItemSelection
from PyQt5.QtGui import QPixmap, QFont, QIcon
from PyQt5.QtSql import QSqlQuery
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QDialog, QMessageBox
from TreeModel import TreeModel
from inputdialog import InputDialog
from connect1 import Connect1
from newForm import *
class Widget2(QWidget):
def __init__(self):
super().__init__()
self.db = None
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.setFixedSize(600, 400)
self.setWindowTitle("Лейман М.А.")
label = QLabel(self)
label.move(25, 25)
label.setFixedSize(550, 350)
label.setStyleSheet('background-color: rgb(180, 190, 200)')
label.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
label.setFont(QFont('Arial', 15))
label.setWordWrap(True)
label.setText('Тестовая программа\n \nПостроение из базы данных иерархического списка\n в виде дерева,'
'с возможностью редактирования\n и добавления дочерних элементов. \n \n \n '
' выполнил: Лейман М.А.\n тел: +79613224543\n email: makc.mon@mail.ru')
self.setWindowFlags(QtCore.Qt.Dialog) # делает окно несворачиваемым
class Widget1(QtWidgets.QWidget):
valueChangedSignal = pyqtSignal(list)
valueInsertSignal = pyqtSignal(list)
def __init__(self, parent):
QtWidgets.QWidget.__init__(self, parent)
self.idRowNew = None # id новой (вставляемой) строки
self.__iDialog = None
self.w = None
self.y = None
self.ui = Ui_Widget()
pixmap = QPixmap(":/img2.png") # установка эмблеммы в окно
self.ui.setupUi(self)
self.ui.label_2.setPixmap(pixmap)
self.ui.label.setPixmap(pixmap)
self.ui.treeView.setAttribute(QtCore.Qt.WA_StyledBackground, True)
self.ui.treeView.setStyleSheet('background-color: rgb(170, 190,195)') # установка фона для окна дерева
self.ui.button.setToolTip("<h3>Нажми меня</h3>")
self.ui.button.setStyleSheet("background-color : rgb(10, 120, 10)")
self.db = Connect1("task")
cursor = self.db.connect_to_data_base
treemodel = TreeModel()
self.ui.treeView.setModel(treemodel)
self.ui.button.clicked.connect(self.window_open) # открывает окно "О программе" +
self.ui.delRow.clicked.connect(self.removeRowTree) # удаляет строку (узел) с дочерними строками +
self.ui.modifi.clicked.connect(self.modifiRow) # изменяет данные строки (узла) -
self.ui.addRow.clicked.connect(self.insertChildTree) # Добавляет строку (Узел) -
self.ui.treeView.selectionModel().selectionChanged[QItemSelection, QItemSelection].connect(self.updateActions)
self.ui.treeView.selectionModel().currentRowChanged[QModelIndex, QModelIndex].connect(self.slotCurrentPic)
self.valueChangedSignal[list].connect(self.editDataBase) # Изменяет выбранный элемент в БАЗЕ
self.valueInsertSignal[list].connect(self.insertDataBase) # вставляем новые данные в БАЗУ
self.ui.treeView.setColumnHidden(2, True) # делает невидимым столбцы 2,3,4
self.ui.treeView.setColumnHidden(3, True)
self.ui.treeView.setColumnHidden(4, True)
header = self.ui.treeView.header()
header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(2, QtWidgets.QHeaderView.Fixed)
header.setSectionResizeMode(3, QtWidgets.QHeaderView.Fixed)
header.setSectionResizeMode(4, QtWidgets.QHeaderView.Fixed)
header.setStretchLastSection(False)
self.updateActions()
def window_open(self):
self.w = Widget2()
self.w.show()
# self.hide()
def closeEvent(self, event):
self.db.close_db()
for window in QApplication.topLevelWidgets():
window.close()
def slotCurrentPic(self, index: QModelIndex):
yurModel = self.ui.treeView.model()
item = yurModel.getItem(index)
pix = item.data(1)
if not isinstance(pix, QByteArray):
# sss = QByteArray(item.data(1).encode())
sss = ":/img2.png"
outPixmap = QPixmap(sss)
pixmap = outPixmap.scaledToWidth(200)
else:
sss = pix
outPixmap = QPixmap()
outPixmap.loadFromData(sss)
dd = outPixmap.width()
pixmap = outPixmap.scaledToWidth(200)
self.ui.label.setPixmap(pixmap)
@pyqtSlot()
def insertChildTree(self):
pInputDialog = InputDialog()
if pInputDialog.flag:
name = pInputDialog.name() # вводим данные
image = pInputDialog.image()
state = pInputDialog.state()
var = pInputDialog.destroyed
index = self.ui.treeView.selectionModel().currentIndex() # Получаем модельный индекс элемента
model = self.ui.treeView.model() # получаем модель дерева
colCount = model.columnCount(index)
itemParent = model.getItem(index) # получаем выбранный элемент, он становится родителем вставляемого элемента
idParentRow = int(itemParent.data(2)) # получаем id выбранной строки, становится id_parent для вставляемого элемента
newValue = list()
newValue.append(name)
newValue.append(image)
newValue.append(state)
newValue.append(idParentRow)
self.valueInsertSignal.emit(newValue) # отправляем сигнал на запись данных в БД
newValue.clear()
query2 = QSqlQuery() # получаем изображение
query2.prepare("SELECT * FROM hierarhy WHERE id =?;")
query2.addBindValue(self.idRowNew)
query2.exec()
query2.next()
image2 = query2.value(3)
query2.clear()
newValue.append(name)
newValue.append(image2)
newValue.append(self.idRowNew)
newValue.append(idParentRow)
newValue.append(state)
rowNew = model.rowCount(index)
if not model.insertRow(rowNew, index):
return
dictRole = (0, 1, 0, 0, 0)
for column in range(colCount):
indexChild = model.index(rowNew, column, index) # индекс вставляемого элемента в модели
model.setData(indexChild, newValue[column], dictRole[column]) # вставляем данные в столбец модели по индексу
self.ui.treeView.selectionModel().reset()
self.updateActions()
def updateActions(self):
hasSelection = not self.ui.treeView.selectionModel().selection().isEmpty()
self.ui.delRow.setEnabled(hasSelection)
self.ui.modifi.setEnabled(hasSelection)
@pyqtSlot()
def modifiRow(self):
pInputDialog = InputDialog()
if pInputDialog.flag:
name = pInputDialog.name() # вводим данные
image = pInputDialog.image()
state = pInputDialog.state()
var = pInputDialog.destroyed
index = self.ui.treeView.selectionModel().currentIndex() # модельный индекс элемента
model = self.ui.treeView.model()
item2 = model.getItem(index) # выбранный элемент
rowItem = item2.rowNumber() # номер строки элемента в родительском узле
idRow = int(item2.data(2)) # id выбранной строки
idRowParent = int(item2.data(3)) # id_parent выбраной строки
parent = model.parent(index) # индекс родителя
newValue = list()
newValue.append(name)
newValue.append(image)
newValue.append(state)
newValue.append(idRow)
self.valueChangedSignal.emit(newValue) # отправляем сигнал на запись данных в БД
newValue.clear()
query2 = QSqlQuery() # получаем изображение
query2.prepare("SELECT * FROM hierarhy WHERE id =?;")
query2.addBindValue(idRow)
query2.exec()
query2.next()
image2 = query2.value(3)
query2.clear()
newValue.append(name)
newValue.append(image2)
newValue.append(idRow)
newValue.append(idRowParent)
newValue.append(state)
model.beginResetModel1() # Изменяем данные в строке
colCount = model.columnCount(index)
dictRole = (0, 1, 0, 0, 0)
for column in range(colCount):
indexInsert = model.index(rowItem, column, parent) # УЗНАТЬ СТРОКУ Изменяемого (ТЕКУЩЕГО) ЭЛЕМЕНТА
model.setData(indexInsert, newValue[column], dictRole[column])
model.endResetModel1()
newValue.clear()
self.updateActions()
var = pInputDialog.destroyed
def removeRowTree(self):
""" удаляет строку со всеми зависимыми строками """
model = self.ui.treeView.model()
index = self.ui.treeView.selectionModel().currentIndex() # Получаем модельный индекс выбранного элемента
self.remoweItemRows(index, model)
self.ui.treeView.selectionModel().reset()
self.updateActions()
def remoweItemRows(self, index: QModelIndex, model: TreeModel): # удаляет элементы из списка детей
item = model.getItem(index)
childCountItem = item.childCount() # количество детей у элемента
numRow = item.rowNumber() # номер строки элемента
indexParent = model.parent(index) # индекс родителя элемента
if childCountItem > 0:
for numRowChild in range(childCountItem - 1, - 1, -1):
indexChild = model.index(numRowChild, 0, index)
self.remoweItemRows(indexChild, model) # каскадное удаление потомков потомка
idRow = int(item.data(2)) # получаем id строки
if not model.hasChildren(index): # если нет потомков, то удаляем узел
query2 = QSqlQuery()
query2.prepare("DELETE FROM hierarhy WHERE id =?;")
query2.addBindValue(idRow)
query2.exec()
query2.clear()
model.removeRow(numRow, indexParent) # Удаляем текущий узел после удаления всех детей
@pyqtSlot(list)
def insertDataBase(self, newValue: list):
""" вставляет новые данные в базу"""
strName = str(newValue[0])
if strName == '':
return
strImg1 = str(newValue[1])
file = QFile(strImg1) # создаем объект класса QFile
dataImg = QByteArray() # куда будем считывать данные
# inBuffer = QBuffer(dataImg)
if file.open(QIODevice.ReadOnly): # Проверяем, возможно ли открыть наш файл для чтения
dataImg = file.readAll() # считываем данные
query3 = QSqlQuery()
query3.exec("INSERT INTO hierarhy (id_parent,name,image,state) VALUES (?, ?, ?, ?)")
strIdParent = int(newValue[3])
query3.addBindValue(strIdParent)
query3.addBindValue(strName)
query3.addBindValue(dataImg)
strState = str(newValue[2])
query3.addBindValue(strState)
query3.exec()
self.idRowNew = int(query3.lastInsertId())
query3.clear()
@pyqtSlot(list)
def editDataBase(self, newValue: list):
strName = str(newValue[0])
if strName == '':
return
strImg1 = newValue[1]
file = QFile(strImg1) # создаем объект класса QFile
dataImg = QByteArray() # куда будем считывать данные
if file.open(QIODevice.ReadOnly): # Проверяем, возможно ли открыть наш файл для чтения
dataImg = file.readAll()
query3 = QSqlQuery()
query3.prepare("UPDATE hierarhy SET name=?, image=?, state=? WHERE id =?;")
strName = str(newValue[0])
query3.addBindValue(strName)
query3.addBindValue(dataImg)
strState = str(newValue[2])
query3.addBindValue(strState)
idRow = int(newValue[3])
query3.addBindValue(idRow)
query3.exec()
query3.next()
if not query3.isActive():
QMessageBox.warning(self, "Database Error", query3.lastError().text())
| drug173/Python | applications/Tree1/widget1.py | widget1.py | py | 13,903 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QFont",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PyQt... |
25755944520 | import unittest
from datetime import date, datetime
from constants import (
STATUS_IN_PROGRESS,
STATUS_COMPLETED,
TASK_UPDATED,
PRIORITY_HIGH,
PRIORITY_MEDIUM,
PRIORITY_LOW,
TASK1,
TASK2,
TASK3
)
from main import app, bd
from models.task_model import Task
from repository.task_repository import TaskRepository
from service.task_service import get_all_tasks, create_task, update_task, delete_task
class TaskServiceTestCase(unittest.TestCase):
def setUp(self):
app.testing = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
self.app_context = app.app_context()
self.app_context.push()
bd.create_all()
self.client = app.test_client()
self.repository = TaskRepository()
def tearDown(self):
bd.session.remove()
bd.drop_all()
def test_get_all_tasks(self):
task1 = Task(
name=TASK1,
priority=PRIORITY_HIGH,
start_date=date.today(),
planned_end_date=date.today(),
actual_end_date=None,
status=STATUS_IN_PROGRESS,
project_id='1'
)
task2 = Task(
name=TASK2,
priority=PRIORITY_MEDIUM,
start_date=date.today(),
planned_end_date=date.today(),
actual_end_date=None,
status=STATUS_IN_PROGRESS,
project_id='2'
)
task3 = Task(
name=TASK3,
priority=PRIORITY_LOW,
start_date=date.today(),
planned_end_date=date.today(),
actual_end_date=None,
status=STATUS_IN_PROGRESS,
project_id='3'
)
self.repository.create(task1)
self.repository.create(task2)
self.repository.create(task3)
tasks_project1 = get_all_tasks(task1.project_id)
tasks_project2 = get_all_tasks(task2.project_id)
tasks_project3 = get_all_tasks(task3.project_id)
self.assertEqual(len(tasks_project1), 1)
self.assertEqual(tasks_project1[0]['name'], TASK1)
self.assertEqual(tasks_project1[0]['priority'], PRIORITY_HIGH)
self.assertEqual(tasks_project1[0]['start_date'], date.today().strftime('%Y-%m-%d'))
self.assertEqual(tasks_project1[0]['planned_end_date'], date.today().strftime('%Y-%m-%d'))
self.assertIsNone(tasks_project1[0]['actual_end_date'])
self.assertEqual(tasks_project1[0]['status'], STATUS_IN_PROGRESS)
self.assertEqual(len(tasks_project2), 1)
self.assertEqual(tasks_project2[0]['name'], TASK2)
self.assertEqual(tasks_project2[0]['priority'], PRIORITY_MEDIUM)
self.assertEqual(tasks_project2[0]['start_date'], date.today().strftime('%Y-%m-%d'))
self.assertEqual(tasks_project2[0]['planned_end_date'], date.today().strftime('%Y-%m-%d'))
self.assertIsNone(tasks_project2[0]['actual_end_date'])
self.assertEqual(tasks_project2[0]['status'], STATUS_IN_PROGRESS)
self.assertEqual(len(tasks_project3), 1)
self.assertEqual(tasks_project3[0]['name'], TASK3)
self.assertEqual(tasks_project3[0]['priority'], PRIORITY_LOW)
self.assertEqual(tasks_project3[0]['start_date'], date.today().strftime('%Y-%m-%d'))
self.assertEqual(tasks_project3[0]['planned_end_date'], date.today().strftime('%Y-%m-%d'))
self.assertIsNone(tasks_project3[0]['actual_end_date'])
self.assertEqual(tasks_project3[0]['status'], STATUS_IN_PROGRESS)
def test_create_task(self):
project_id = 1
data = {
'name': 'New Task',
'priority': 'High',
'status': 'In Progress',
'planned_end_date': '2023-07-20'
}
create_task(project_id, data)
task = self.repository.get_all()[0]
self.assertIsNotNone(task.id)
self.assertEqual(task.name, 'New Task')
self.assertEqual(task.priority, 'High')
self.assertEqual(task.start_date, date.today())
self.assertEqual(task.planned_end_date, datetime.strptime(data['planned_end_date'], '%Y-%m-%d').date())
self.assertIsNone(task.actual_end_date)
self.assertEqual(task.status, 'In Progress')
self.assertEqual(task.project_id, 1)
def test_update_task(self):
task = Task(
name=TASK1,
priority=PRIORITY_HIGH,
start_date=date.today(),
planned_end_date=date.today(),
actual_end_date=None,
status=STATUS_IN_PROGRESS
)
self.repository.create(task)
data = {
'name': TASK_UPDATED,
'priority': PRIORITY_MEDIUM,
'status': STATUS_COMPLETED
}
update_task(task.id, data)
updated_task = self.repository.get_by_id(task.id)
self.assertEqual(updated_task.name, TASK_UPDATED)
self.assertEqual(updated_task.priority, PRIORITY_MEDIUM)
self.assertEqual(updated_task.status, STATUS_COMPLETED)
def test_delete_task(self):
task_data = {
'name': TASK1,
'priority': PRIORITY_HIGH,
'start_date': date.today(),
'planned_end_date': date.today(),
'actual_end_date': None,
'status': STATUS_IN_PROGRESS
}
task = Task(**task_data)
self.repository.create(task)
task_id = task.id
delete_task(task_id)
deleted_task = self.repository.get_by_id(task_id)
self.assertIsNone(deleted_task)
# def setUp(self):
# app.testing = True
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
# self.app_context = app.app_context()
# self.app_context.push()
# bd.create_all()
# self.client = app.test_client()
# self.repository = TaskRepository()
#
# def tearDown(self):
# bd.session.remove()
# bd.drop_all()
if __name__ == '__main__':
unittest.main()
| dan9Protasenia/task-management | tests/test_task_service.py | test_task_service.py | py | 5,950 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "main.app.testing",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "main.app",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "main.app.config... |
39690983841 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="visbeat",
version="0.0.9",
author="Abe Davis",
author_email="everyonehasadance@gmail.com",
description="Code for 'Visual Rhythm and Beat' SIGGRAPH 2018",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/abedavis/visbeat",
project_urls={
'Abe Davis': 'http://www.abedavis.com/',
'Visual Rhythm and Beat': 'http://www.abedavis.com/visualbeat/',
'Source': 'https://github.com/abedavis/visbeat',
'Demo': 'http://www.abedavis.com/visualbeat/demo/',
},
install_requires=[
'numpy',
'scipy',
'bs4',
'librosa',
'imageio',
'requests',
'moviepy',
'termcolor',
'youtube-dl',
'matplotlib',
],
scripts=['bin/dancefer'],
packages=setuptools.find_packages(exclude=['contrib', 'docs', 'tests*']),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 2.7",
"Operating System :: OS Independent",
],
)
| abedavis/visbeat | setup.py | setup.py | py | 1,170 | python | en | code | 220 | github-code | 6 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 34,
"usage_type": "call"
}
] |
35228257702 | #!/usr/bin/env python
"""Visualisation.py: Visualise data from simulation"""
__author__ = "Murray Ireland"
__email__ = "murray@craftprospect.com"
__date__ = "22/10/2018"
__copyright__ = "Copyright 2017 Craft Prospect Ltd"
__licence___ = ""
import vtk
import numpy as np
from math import tan, sin, cos, atan, pi
# import msvcrt
import sys, os
import platform
from datetime import datetime
from time import sleep
if platform.system() == "Windows":
from win32api import GetSystemMetrics
screen_size = (GetSystemMetrics(0), GetSystemMetrics(1))
elif platform.system() == "Linux":
try:
import tkinter as tk
except ImportError:
import Tkinter as tk
root = tk.Tk()
screen_size = (root.winfo_screenwidth(), root.winfo_screenheight())
# Get current and base directories
cur_dir = os.path.dirname(os.path.realpath(__file__))
if "\\" in cur_dir:
base_dir = "/".join(cur_dir.split("\\"))
else:
base_dir = cur_dir
class Visualisation(object):
"""Class for visualisation object"""
# Scale time
TIME_SCALE = 1
# Turn text overlay on/off
SHOW_TEXT = True
# Full-screen and window scaling
FULL_SCREEN = True
WIN_H_SCALE = 1.
WIN_V_SCALE = 1.
TEXT_SCALE = 1.
# Use lower resolution textures where appropriate
USE_SMALL_IMAGES = False
# Scale entire animation for troubleshooting
ANI_SCALE = 1.
# Scale satellite up for better visibility
SAT_SCALE = 500. # Breaks if too small
# Colours
COLOUR_BG = (0, 0, 0)
COLOUR_FONT = (0.871, 0.246, 0.246)
# Set solar panel angles [deg]
PANEL_ANGLE = 10
# Tile size for Earth textures [m]
TILE_SIZE = (177390, 183360)
# Anchor settings
ANCHOR = {
"SW": (0, 0),
"NW": (0, 2),
"NE": (2, 2),
"SE": (2, 0),
"N": (1, 2),
"E": (2, 1),
"S": (1, 0),
"W": (0, 1),
"C": (1, 1)
}
def __init__(self, PROPS, data):
"""Class constructor"""
# Screen size and aspect ratio
self.SCREEN_SIZE = (screen_size[0], screen_size[1])
self.SCREEN_AR = float(self.SCREEN_SIZE[0])/float(self.SCREEN_SIZE[1])
# Initialise property dictionaries
self.SAT_PROPS = PROPS["Sat"]
self.CAM_PROPS = PROPS["Camera"]
self.IMG_PROPS = PROPS["Imagery"]
self.LSR_POS = PROPS["Laser"]
self.EARTH_PROPS = PROPS["Earth"]
# Initialise simulation data
self.DATA = data
# Initialise imagery
self.IMG_PROPS["Texture size"] = (
self.IMG_PROPS["Size"]["full"][0]*self.IMG_PROPS["Res"]["full"],
self.IMG_PROPS["Size"]["full"][1]*self.IMG_PROPS["Res"]["full"]
)
# Initialise index
self.index = 0
# Initialise render window and interactor
self.renWin, self.iren = self.init_renderer()
self.ren = {}
# Create scenes
self.actors, self.cameras, self.text, self.lights = self.create_scenes()
for key in self.lights.keys():
for light in self.lights[key]:
self.ren[key].AddLight(light)
# Render scenes
self.iren.Initialize()
self.renWin.Render()
# Initialise time
now = datetime.now()
self.init_time = [now.hour, now.minute, now.second]
# Create timer event
self.iren.AddObserver("TimerEvent", self.execute)
timerId = self.iren.CreateRepeatingTimer(int(1000*self.DATA["Time step"]))
# Start interactor and timer
self.iren.Start()
# Stop timer?
# self.movieWriter.End()
def init_renderer(self):
"""Initialise render window and interactor"""
# Initialise render window
renWin = vtk.vtkRenderWindow()
if self.FULL_SCREEN:
renWin.FullScreenOn()
else:
renWin.SetSize(
int(self.WIN_H_SCALE*self.SCREEN_SIZE[0]),
int(self.WIN_V_SCALE*self.SCREEN_SIZE[1])
)
class MyInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):
def __init__(self, parent=None):
return None
# Initialise interactor
iren = vtk.vtkRenderWindowInteractor()
iren.SetInteractorStyle(MyInteractorStyle())
# iren.AutoAdjustCameraClippingRangeOn()
iren.SetRenderWindow(renWin)
return renWin, iren
def init_video(self):
"""Initialise video recorder"""
# Set up filter
imageFilter = vtk.vtkWindowToImageFilter()
imageFilter.SetInput(self.renWin)
imageFilter.SetInputBufferTypeToRGB()
imageFilter.ReadFrontBufferOff()
imageFilter.Update()
return imageFilter, 0
def add_to_ren(self, name, actors, camera, viewport, text):
"""Add elements of scene to renderer window"""
# Create renderer for scene
self.ren[name] = vtk.vtkRenderer()
# Add renderer to render window
self.renWin.AddRenderer(self.ren[name])
# Add camera and viewport
if camera != []:
self.ren[name].SetActiveCamera(camera)
self.ren[name].SetViewport(viewport)
# Add actors
for key in actors:
if type(actors[key]) is list:
for actor in actors[key]:
self.ren[name].AddActor(actor)
else:
self.ren[name].AddActor(actors[key])
self.ren[name].ResetCameraClippingRange()
# Add text
if type(text) is dict:
for actor in text:
self.ren[name].AddActor(text[actor])
else:
self.ren[name].AddActor(text)
self.ren[name].SetBackground(self.COLOUR_BG)
def create_scenes(self):
"""Create scenes"""
# Initialise dictionaries
cameras = {}
text = {}
lights = {}
# Create scenes
actors, cameras["Main"], text["Main"], lights["Main"] = self.scene_main()
# Return actors and cameras
return actors, cameras, text, lights
def scene_main(self):
"""Create main scene"""
# Create viewport
viewport = [0, 0, 1, 1]
# Camera settings
# Focal point offset from sat centre
foffset = [200, -400, 0]
# Distance from sat
cam_dist = 5e3
# Angles
pitch = -65
yaw = 2
# Focal point
fpoint = np.array([0., 0., -self.SAT_PROPS["Alt"]]) + np.array(foffset)
# Transform camera position
prad = pitch*pi/180
yrad = yaw*pi/180
Rpitch = np.matrix([
[cos(prad), 0, sin(prad)],
[0, 1, 0],
[-sin(prad), 0, cos(prad)]
])
Ryaw = np.matrix([
[cos(yrad), -sin(yrad), 0],
[sin(yrad), cos(yrad), 0],
[0, 0, 1]
])
cam_pos = Ryaw*Rpitch*np.matrix([-cam_dist, 0., 0.]).T
cam_pos = np.array(cam_pos).flatten()
cam_pos = cam_pos + fpoint
# cam_pos = [-10, 0., -self.SAT_PROPS["Alt"] - cam_dist]
# Create camera
camera = vtk.vtkCamera()
camera.SetPosition(cam_pos)
camera.SetViewUp(0, 0, -1)
camera.SetViewAngle(15)
camera.SetFocalPoint(fpoint)
camera.SetClippingRange(0.001, 100)
# Create lights
lights = []
lights.append(vtk.vtkLight())
lights[0].SetPosition(cam_pos)
lights[0].SetFocalPoint([0., 0., -self.SAT_PROPS["Alt"]])
lights[0].SetColor(1., 1., 1.)
lights.append(vtk.vtkLight())
lights[1].SetPosition(0., 0., 0)
lights[1].SetFocalPoint([0., 0., -self.SAT_PROPS["Alt"]])
# Create actors
actors = {
"Sat body": self.create_sat_body(),
"Sat panels": self.create_sat_panels(),
"Earth": self.create_earth("true", "small"),
"Forward cam": self.create_cam_fov("Forward"),
"Redline": self.create_line(self.LSR_POS["Red"], [1., 0., 0.]),
"Greenline": self.create_line(self.LSR_POS["Green"], [0., 1., 0.]),
"Blueline": self.create_line(self.LSR_POS["Blue"], [0., 0., 1.])
}
for T, pos in zip(range(self.DATA["Target info"]["Num"]), self.DATA["Target info"]["Pos"]):
actors["T{}".format(T+1)] = self.create_sphere(pos)
# Text actors
text = {}
if self.SHOW_TEXT:
# Craft text
text["Craft"] = self.create_text(
{
"String": "CRAFT PROSPECT",
"Size": 60,
"Font": "Montserrat-SemiBold",
"Y offset": 0.06
},
viewport
)
# Subtitle text
text["QKD"] = self.create_text(
{
"String": "Demo Simulation",
"Size": 40,
"Style": "Normal"
},
viewport
)
# Time text
text["Time"] = self.create_text(
{
"String": "",
"Size": 50,
"Anchor": "NE",
"Font": "7Segment"
},
viewport
)
# Info text
text["Info"] = self.create_text(
{
"String": """Altitude: {:.0f} km
Velocity: {:.2f} km/s
""".format(
self.SAT_PROPS["Alt"]/1000,
self.DATA["Vel"][0, 0]/1000,
),
"Size": 20,
"Anchor": "NE",
"Y offset": 0.06
},
viewport
)
# Render scene
self.add_to_ren("Main", actors, camera, viewport, text)
# Reset clipping range
# camera.SetClippingRange(1000, 1000e3)
# Return actors to animate
return actors, camera, text, lights
def create_sat_body(self):
"""Generate satellite body geometry"""
# Dimensions of body
SAT_SIZE = self.ANI_SCALE*self.SAT_SCALE*np.asarray(self.SAT_PROPS["Size"])/2
bx = SAT_SIZE[0]
by = SAT_SIZE[1]
bz = SAT_SIZE[2]
# Create vertices in body frame
ind = 0
V = []
for x in [-1, 1]:
for y in [-1, 1]:
for z in [-1, 1]:
V.append((bx*x, by*y, bz*z))
# Create faces
F = [
(0, 1, 3, 2),
(4, 5, 7, 6),
(0, 1, 5, 4),
(2, 3, 7, 6),
(0, 2, 6, 4),
(1, 3, 7, 5)
]
# Create building blocks of polydata
sat = vtk.vtkPolyData()
points = vtk.vtkPoints()
polys = vtk.vtkCellArray()
scalars = vtk.vtkFloatArray()
# Load the point, cell and data attributes
for i in range(len(V)):
points.InsertPoint(i, V[i])
for i in range(len(F)):
polys.InsertNextCell(self.mkVtkIdList(F[i]))
for i in range(len(V)):
scalars.InsertTuple1(i, i)
# Assign the pieces to the vtkPolyData.
sat.SetPoints(points)
del points
sat.SetPolys(polys)
del polys
sat.GetPointData().SetScalars(scalars)
del scalars
# Mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(sat)
mapper.ScalarVisibilityOff()
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.5, 0.5, 0.5)
actor.GetProperty().SetAmbient(0.5)
actor.GetProperty().SetSpecular(1.0)
actor.GetProperty().SetSpecularPower(5.0)
actor.GetProperty().SetDiffuse(0.2)
# Move to sat position
actor.SetPosition(0, 0, -self.SAT_PROPS["Alt"])
return actor
def create_sat_panels(self):
"""Create satellite solar panel geometry"""
# Dimensions of body
SAT_SIZE = self.ANI_SCALE*self.SAT_SCALE*np.asarray(self.SAT_PROPS["Size"])/2
bx = SAT_SIZE[0]
by = SAT_SIZE[1]
bz = SAT_SIZE[2]
# Panel length
L = bx
# Panels
theta = self.PANEL_ANGLE*pi/180
px1 = bx - L*sin(theta)
py1 = by + L*cos(theta)
pz1 = bz
px2 = px1 + L*sin(theta)
py2 = py1 + L*cos(theta)
pz2 = pz1
# Vertices
V = [
(-bx, by, -bz),
(-bx, by, bz),
(-px1, py1, pz1),
(-px1, py1, -pz1),
(-px1, py1, -pz1),
(-px1, py1, pz1),
(-px2, py2, pz2),
(-px2, py2, -pz2),
(-bx, -by, -bz),
(-bx, -by, bz),
(-px1, -py1, pz1),
(-px1, -py1, -pz1),
(-px1, -py1, -pz1),
(-px1, -py1, pz1),
(-px2, -py2, pz2),
(-px2, -py2, -pz2)
]
# Create faces
F = [
(0, 1, 2, 3),
(4, 5, 6, 7),
(8, 9, 10, 11),
(12, 13, 14, 15)
]
# Create building blocks of polydata
sat = vtk.vtkPolyData()
points = vtk.vtkPoints()
polys = vtk.vtkCellArray()
scalars = vtk.vtkFloatArray()
# Load the point, cell and data attributes
for i in range(len(V)):
points.InsertPoint(i, V[i])
for i in range(len(F)):
polys.InsertNextCell(self.mkVtkIdList(F[i]))
for i in range(len(V)):
scalars.InsertTuple1(i, i)
# Assign the pieces to the vtkPolyData.
sat.SetPoints(points)
del points
sat.SetPolys(polys)
del polys
sat.GetPointData().SetScalars(scalars)
del scalars
# Mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(sat)
mapper.ScalarVisibilityOff()
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0., 0., 0.8)
actor.GetProperty().SetAmbient(0.5)
actor.GetProperty().SetSpecular(.5)
actor.GetProperty().SetSpecularPower(10.0)
actor.GetProperty().SetDiffuse(0.2)
# Move to sat position
actor.SetPosition(0, 0, -self.SAT_PROPS["Alt"])
return actor
def create_earth(self, imtype, size):
"""Create tiles for Earth geometry"""
# Update properties for tile
tile_props = {
"Size": self.IMG_PROPS["Texture size"],
"Translate": (
-self.IMG_PROPS["Offset"][0],
-self.IMG_PROPS["Texture size"][1]/2,
0
)
}
# Texture for tile
texture = f"{cur_dir}/images/samp1_{size}.jpg"
# Create actors
actor = self.create_plane(tile_props, texture)
return actor
def create_plane(self, props, texture):
"""Create flat plane"""
# Pull and scale dimensions
SIZE = np.asarray(props["Size"])
POS = np.asarray(props["Translate"])
# Create texture reader
reader = vtk.vtkJPEGReader()
reader.SetFileName(texture)
# Create texture object
texture = vtk.vtkTexture()
texture.SetInputConnection(reader.GetOutputPort())
texture.InterpolateOn()
# Create plane model
plane = vtk.vtkPlaneSource()
plane.SetResolution(1, 1)
plane.SetPoint1(0, SIZE[1], 0)
plane.SetPoint2(SIZE[0], 0, 0)
# Translate to centre
transP = vtk.vtkTransform()
transP.Translate(
POS[0],
POS[1],
POS[2]
)
tpd = vtk.vtkTransformPolyDataFilter()
tpd.SetInputConnection(plane.GetOutputPort())
tpd.SetTransform(transP)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tpd.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.SetTexture(texture)
actor.GetProperty().SetAmbient(1.0)
actor.GetProperty().SetSpecular(.5)
actor.GetProperty().SetSpecularPower(5.0)
actor.GetProperty().SetDiffuse(0.2)
return actor
def create_cam_fov(self, name):
"""Create FOV actor for camera"""
# Vertices of FOV
V = [
(0, 0, -self.SAT_PROPS["Alt"]),
tuple(self.CAM_PROPS[name]["Intercepts"][:, 0]),
tuple(self.CAM_PROPS[name]["Intercepts"][:, 1]),
tuple(self.CAM_PROPS[name]["Intercepts"][:, 2]),
tuple(self.CAM_PROPS[name]["Intercepts"][:, 3])
]
# Faces of FOV
F = [(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 1)]
# Create building blocks of polydata
cam = vtk.vtkPolyData()
points = vtk.vtkPoints()
polys = vtk.vtkCellArray()
scalars = vtk.vtkFloatArray()
# Load the point, cell and data attributes
for i in range(5):
points.InsertPoint(i, V[i])
for i in range(4):
polys.InsertNextCell( self.mkVtkIdList(F[i]))
for i in range(5):
scalars.InsertTuple1(i,i)
# Assign the pieces to the vtkPolyData.
cam.SetPoints(points)
del points
cam.SetPolys(polys)
del polys
cam.GetPointData().SetScalars(scalars)
del scalars
# Mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(cam)
mapper.ScalarVisibilityOff()
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.5, 1, 0.5)
actor.GetProperty().SetAmbient(0.5)
actor.GetProperty().SetOpacity(0.1)
return actor
def create_line(self, pos, colour):
"""Create line"""
# Absolute source position
pos_abs = np.array([0., 0., -self.SAT_PROPS["Alt"]]) + np.array(pos)*self.SAT_SCALE
# Create line
line = vtk.vtkLineSource()
line.SetPoint1(pos_abs)
line.SetPoint2(2*self.SAT_PROPS["Alt"], 0., -self.SAT_PROPS["Alt"])
# Mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(line.GetOutputPort())
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colour)
actor.GetProperty().SetOpacity(0.5)
actor.GetProperty().SetLineWidth(4)
actor.SetOrigin(0., 0., -self.SAT_PROPS["Alt"])
return actor
def create_sphere(self, position):
"""Create sphere of specific size"""
# Create source
source = vtk.vtkSphereSource()
source.SetCenter(0, 0, 0)
source.SetRadius(1.e3)
source.SetPhiResolution(40)
source.SetThetaResolution(40)
# Mapper
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(source.GetOutput())
else:
mapper.SetInputConnection(source.GetOutputPort())
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(1, 0.5, 0.5)
actor.GetProperty().SetAmbient(0.5)
actor.GetProperty().SetOpacity(0.8)
actor.SetPosition(position)
# Return actor
return actor
def create_text(self, settings, viewport):
"""Create text actor for view labels"""
viewport = np.array(viewport)
viewport[[0, 2]] = self.WIN_H_SCALE*viewport[[0, 2]]
viewport[[1, 3]] = self.WIN_V_SCALE*viewport[[1, 3]]
viewport = list(viewport)
# Set defaults if not specified
defaults = {
"Size": 20,
"Anchor": "SW",
"X offset": 0.02,
"Y offset": 0.02,
"Font": "Montserrat",
"Colour": self.COLOUR_FONT
}
for key in defaults:
try:
settings[key]
except KeyError:
settings[key] = defaults[key]
# Position
margin = (
self.TEXT_SCALE*settings["X offset"]*(self.ANCHOR[settings["Anchor"]][0] - 1),
self.TEXT_SCALE*settings["Y offset"]*(self.ANCHOR[settings["Anchor"]][1] - 1)
)
posx = int((viewport[0] + 0.5*self.ANCHOR[settings["Anchor"]][0]*(viewport[2] - viewport[0]) - margin[0])*self.SCREEN_SIZE[0])
posy = int((viewport[1] + 0.5*self.ANCHOR[settings["Anchor"]][1]*(viewport[3] - viewport[1]) - margin[1])*self.SCREEN_SIZE[1])
# Properties
props = vtk.vtkTextProperty()
props.SetFontFamily(vtk.VTK_FONT_FILE)
if settings["Font"] == "Montserrat-SemiBold":
props.SetFontFile("./fonts/Montserrat-SemiBold.ttf")
elif settings["Font"] == "Consolas":
props.SetFontFile("./fonts/consola.ttf")
elif settings["Font"] is "7Segment":
props.SetFontFile("./fonts/digital-7 (mono).ttf")
else:
props.SetFontFile("./fonts/Montserrat.ttf")
props.SetFontSize(int(self.TEXT_SCALE*settings["Size"]))
props.SetColor(settings["Colour"])
props.SetJustification(self.ANCHOR[settings["Anchor"]][0])
props.SetVerticalJustification(self.ANCHOR[settings["Anchor"]][1])
# Create actor
actor = vtk.vtkTextActor()
actor.SetInput(settings["String"])
actor.SetDisplayPosition(posx, posy)
actor.SetTextProperty(props)
return actor
def execute(self, obj, event):
"""Execute timed event"""
# Reset clipping range
# self.cameras["Main"].SetClippingRange(1000, 3000e3)
# Simulation time
T = self.DATA["Time"][self.index]
# Visualisation time
Tvis = T*self.TIME_SCALE
# Modes
adcs_mode = self.DATA["ADCS mode names"][int(self.DATA["ADCS mode"][self.index])]
payload_mode = self.DATA["Payload mode names"][int(self.DATA["Payload mode"][self.index])]
# Update Earth position
self.actors["Earth"].SetPosition(
-self.DATA["Pos"][0, self.index],
0,
0
)
# Update target positions
for trgt, pos in zip(range(self.DATA["Target info"]["Num"]), self.DATA["Target info"]["Pos"]):
self.actors["T{}".format(trgt+1)].SetPosition(
-self.DATA["Pos"][0, self.index] + pos[0],
pos[1],
pos[2]
)
# Update sightline
att_des = tuple(np.array(self.DATA["Inputs"][:, self.index])*180/pi)
self.actors["Redline"].SetOrientation(att_des)
self.actors["Greenline"].SetOrientation(att_des)
self.actors["Blueline"].SetOrientation(att_des)
if payload_mode in ["Synchronise"]:
self.actors["Redline"].GetProperty().SetOpacity(0)
self.actors["Greenline"].GetProperty().SetOpacity(0.9)
self.actors["Blueline"].GetProperty().SetOpacity(0)
elif payload_mode in ["Authenticate"]:
self.actors["Redline"].GetProperty().SetOpacity(0)
self.actors["Greenline"].GetProperty().SetOpacity(0)
self.actors["Blueline"].GetProperty().SetOpacity(0.9)
elif payload_mode in ["Key delivery"]:
self.actors["Redline"].GetProperty().SetOpacity(0.9)
self.actors["Greenline"].GetProperty().SetOpacity(0)
self.actors["Blueline"].GetProperty().SetOpacity(0.9)
else:
self.actors["Redline"].GetProperty().SetOpacity(0)
self.actors["Greenline"].GetProperty().SetOpacity(0)
self.actors["Blueline"].GetProperty().SetOpacity(0)
# Update satellite attitude
att = tuple(np.array(self.DATA["Att"][:, self.index])*180/pi)
for key in ["Sat body", "Sat panels"]:
self.actors[key].SetOrientation(att)
# Update text actors
hh = self.init_time[0]
ss = int(self.init_time[2] + Tvis)
mm = self.init_time[1] + (ss // 60)
ss = ss % 60
hh = hh + (mm // 60)
mm = mm % 60
self.text["Main"]["Time"].SetInput(
"{:02d}:{:02d}:{:02d}".format(hh, mm, ss)
)
# Update render window interactor
self.iren = obj
self.iren.GetRenderWindow().Render()
# Increment index, loop if at end of data
if self.index < len(self.DATA["Time"]) - 1:
self.index += 1
else:
self.index = 0
def mkVtkIdList(self, it):
"""Makes a vtkIdList from a Python iterable"""
vil = vtk.vtkIdList()
for i in it:
vil.InsertNextId(int(i))
return vil
| Craft-Prospect/CubeSatVis | python/visualisation.py | visualisation.py | py | 24,867 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "platform.system",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "win32api.GetSystemMetrics",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "Tkinter.Tk"... |
74291021627 | import math
import numpy as np
from scipy.spatial import ConvexHull
class LOS_guidance():
def __init__(self, params):
self.ship_max_speed = params['ship_max_speed']
self.ship_ax_vel_lim = params['ship_ax_vel_lim']
self.ship_lat_acc_pos_lim = params['ship_lat_acc_pos_lim']
self.ship_lat_acc_neg_lim = params['ship_lat_acc_neg_lim']
self.ship_ax_acc_lim = params['ship_ax_acc_lim']
self.T_max_thrust = params['T_max_thrust']
self.T_min_thrust = params['T_min_thrust']
def ship_los_guidance(self, ship_phys_status, target_pos, dt=1 / 60):
ship_position = ship_phys_status['pose'][0]
heading_in = ship_phys_status['pose'][1]
ship_speed_in = np.linalg.norm(ship_phys_status['velocity'][0])
ship_rot_spd_in = ship_phys_status['velocity'][1]
def angle_between_vector_and_angle(vector, angle):
# Calculate the angle between the vector and the x-axis
vector_angle = math.atan2(vector[1], vector[0])
# Calculate the difference between the vector angle and the given angle
angle_diff = vector_angle - angle
# Ensure that the angle difference is within the range (-pi, pi]
while angle_diff > math.pi:
angle_diff -= 2 * math.pi
while angle_diff <= -math.pi:
angle_diff += 2 * math.pi
# Return the absolute value of the angle difference
return angle_diff
ship_heading = heading_in
ship_rot_spd = ship_rot_spd_in
ship_speed = 0
distance_to_target = math.sqrt(
(target_pos[0] - ship_position[0]) ** 2 + (target_pos[1] - ship_position[1]) ** 2)
if not (distance_to_target >= -1 and distance_to_target <= 10000):
angle_variance = 0
target_angle = (target_pos - ship_position)
angle_variance = angle_between_vector_and_angle(target_angle, ship_heading)
rotational_mul = angle_variance
if not (angle_variance >= -10 and angle_variance <= 10):
angle_variance = 0
if (angle_variance) > (math.pi / 4):
rotational_mul = abs((math.pi / 2))
elif (angle_variance) < -(math.pi / 2):
rotational_mul = - abs((math.pi / 2))
# rotational controller
if abs(angle_variance) > 0.01 and abs(ship_rot_spd) <= abs(self.ship_ax_vel_lim):
ship_rot_spd = -(rotational_mul / (math.pi / 2))*10
else:
ship_rot_spd = 0
# translational controller
if distance_to_target > 2:
ship_speed = self.ship_max_speed
elif distance_to_target >= 0.6:
ship_speed = self.ship_max_speed/3
elif distance_to_target >= 0.3:
ship_speed = self.ship_max_speed/8
else:
ship_speed = 0
ship_rot_spd = 0
ship_vel = ship_speed*np.array([math.cos(heading_in), math.sin(heading_in)])
cmd_vel = [ship_vel, ship_rot_spd]
#print("\rangle_variance, ship_rot_spd", np.degrees(heading_in), ship_rot_spd, end="")
#print("\rTLeft, TRight", rTLeft.TRight, end="")
return cmd_vel
class LOS_VO_guidance():
def __init__(self, params):
self.ship_max_speed = params['ship_max_speed']
self.ship_ax_vel_lim = params['ship_ax_vel_lim']
self.ship_lat_acc_pos_lim = params['ship_lat_acc_pos_lim']
self.ship_lat_acc_neg_lim = params['ship_lat_acc_neg_lim']
self.ship_ax_acc_lim = params['ship_ax_acc_lim']
self.T_max_thrust = params['T_max_thrust']
self.T_min_thrust = params['T_min_thrust']
self.detection_range = 10
self.spd_visual_multiplier = 2.5
self.los = LOS_guidance(params)
self.vo_polygons = []
#parameters
def ship_los_vo_guidance(self, phys_status, output_polygon, target_pos, sensor_data, dt=1 / 60):
# First, this receives a database of ships in the world. the database is a dictionary
# if there are no obstacles in range, it pursues the target using Line-of-sight guidance law
# if there are VO cones in sight, it tries to evade the obstacle by maintaining speed and turning.
# it prefers angles closer to the target.
# if no trajectory is valid, it tries to decelerate to half the maximum speed
# if there is still no trajectory, it tries to stop.
#setting up variables
self.phys_status = phys_status # updates phys status
self.output_polygon = output_polygon
self.sensor_data = sensor_data # get sensor_data
self.vo_lines = []
self.vo_circles = [] # position and radius
self.vo_cones = []
self.vo_polygons = []
ship_position = phys_status['pose'][0]
heading_in = phys_status['pose'][1]
ship_speed_in = np.linalg.norm(phys_status['velocity'][0])
ship_rot_spd_in = phys_status['velocity'][1]
ship_heading = heading_in
ship_rot_spd = ship_rot_spd_in
ship_speed = 0
# recieving and processing the data
self.filtered_objects_angle = self.sensor_simulator() # process the data
self.vo_cone_generator() #generates VO cone
# target_information
distance_to_target = math.sqrt((target_pos[0] - ship_position[0]) ** 2 + (target_pos[1] - ship_position[1]) ** 2)
target_vector = (target_pos - ship_position)
target_angle = self.regularize_angle(math.atan2(target_vector[1], target_vector[0]))
# search for valid angle
target_speed = 2.0
self.angle_opacity = self.vo_theta_opacity(target_angle, target_speed)
angle_rad = math.radians(3*self.index_with_lowest_number(self.angle_opacity))
angle_variance = self.regularize_angle(angle_rad - ship_heading)
if abs(angle_variance) > math.pi * (1 / 8):
target_speed = 1.5
self.angle_opacity = self.vo_theta_opacity(target_angle, target_speed)
angle_rad = math.radians(3 * self.index_with_lowest_number(self.angle_opacity))
angle_variance = self.regularize_angle(angle_rad - ship_heading)
if abs(angle_variance) > math.pi * (1.5 / 8):
target_speed = 1.0
self.angle_opacity = self.vo_theta_opacity(target_angle, target_speed)
angle_rad = math.radians(3 * self.index_with_lowest_number(self.angle_opacity))
angle_variance = self.regularize_angle(angle_rad - ship_heading)
if abs(angle_variance) > math.pi * (1 / 4):
target_speed = 0.5
self.angle_opacity = self.vo_theta_opacity(target_angle, target_speed)
angle_rad = math.radians(3 * self.index_with_lowest_number(self.angle_opacity))
angle_variance = self.regularize_angle(angle_rad - ship_heading)
if abs(angle_variance) > math.pi * (5 / 8):
target_speed = -0.5
self.angle_opacity = self.vo_theta_opacity(target_angle, target_speed)
angle_rad = math.radians(3 * self.index_with_lowest_number(self.angle_opacity))
angle_variance = self.regularize_angle(angle_rad - ship_heading)
for theta in range(0,120):
point = np.array(
[self.spd_visual_multiplier*target_speed * math.cos(math.radians(theta*3)), self.spd_visual_multiplier*target_speed * math.sin(math.radians(theta*3))])
if self.angle_opacity[theta] < 1: #np.percentile(self.angle_opacity,10):
self.vo_circles.append([self.phys_status["pose"][0]+point,0.05, (0,255,50), 1])
self.vo_lines.append([self.phys_status["pose"][0],[self.phys_status["pose"][0][0]+self.spd_visual_multiplier*target_speed*math.cos(angle_rad),self.phys_status["pose"][0][1]+self.spd_visual_multiplier*target_speed*math.sin(angle_rad)],(0,255,0)])
rotational_mul = angle_variance
if target_speed < 0:
rotational_mul = - rotational_mul
if not (angle_variance >= -10 and angle_variance <= 10):
angle_variance = 0
if (angle_variance) > (math.pi / 2):
rotational_mul = abs((math.pi / 2))
elif (angle_variance) < -(math.pi / 2):
rotational_mul = - abs((math.pi / 2))
# rotational controller
if abs(angle_variance) > 0.01 and abs(ship_rot_spd) <= abs(self.ship_ax_vel_lim):
ship_rot_spd = -(rotational_mul / (math.pi / 2))*10
else:
ship_rot_spd = 0
# translational controller
if distance_to_target > 2:
ship_speed = target_speed
elif distance_to_target >= 0.6:
ship_speed = target_speed/3
elif distance_to_target >= 0.3:
ship_speed = target_speed/8
else:
ship_speed = 0
ship_rot_spd = 0
ship_vel = ship_speed*np.array([math.cos(heading_in), math.sin(heading_in)])
cmd_vel = [ship_vel, ship_rot_spd]
return cmd_vel
def vo_cone_generator(self):
ship_pos = self.phys_status["pose"][0]
ship_vel = self.phys_status["velocity"][0]
ship_spd = abs(np.linalg.norm(self.phys_status["velocity"][0]))
#vessel velocity indicator
self.vo_lines.append([ship_pos,ship_pos+ship_vel*self.spd_visual_multiplier, (0, 0, 255)])
circle_color = (200, 200, 200, 255)
line_width = 1
circle = [ship_pos, self.detection_range, circle_color, line_width] # object circle position
self.vo_circles.append(circle) # circle representing collision distance of the object
if self.filtered_objects_angle != None:
for key, object in self.filtered_objects_angle.items():
object_pos = object[0].phys_status["pose"][0] # absolute object position
object_vel = object[0].phys_status["velocity"][0] # absolute object velocity
#object_radius = object[1] + object[2] # object VO_circle radius
circle_color = (200, 0, 0, 255)
line_width = 1
#circle = [object_pos, object_radius, circle_color, line_width] # object circle position
#self.vo_circles.append(circle) # circle representing collision distance of the object
pos_diff = object_pos - ship_pos
object_distance = object[3] # distance from the object to the circle
rel_spd = np.linalg.norm(object[0].phys_status["velocity"][0]-ship_vel)
if object_distance < 0.1: # for avoiding divide by zero error
object_distance = 0.1
start_rad = object[1]
end_rad = object[2]
object_velocity = object[0].phys_status["velocity"][0]
#self.vo_cones.append([ship_pos, [start_rad, end_rad], object_distance*math.cos(tangent_angle)])
self.vo_cones.append([ship_pos+object_velocity*self.spd_visual_multiplier, [start_rad, end_rad], object_distance])
#print(math.degrees(self.filtered_objects))
pass
def vo_theta_opacity(self, target_angle, spd_to_search):
# when search spd = max_spd
spd_to_search
angle_opacity = np.linspace(start=0, stop=120, num=120, endpoint=False)
for theta in range(0,120):
delta_angle = target_angle - math.radians(theta*3)
while delta_angle >= 1 * math.pi:
delta_angle = delta_angle - 2 * math.pi
while delta_angle <= -1 * math.pi:
delta_angle = delta_angle + 2 * math.pi
angle_diff_opacity = (abs(delta_angle/(math.pi)))*(30/80)
angle_opacity[theta] = angle_diff_opacity
for vo_cone in self.vo_cones:
for theta in range(0, 120):
point = np.array([self.spd_visual_multiplier*spd_to_search*math.cos(math.radians(theta*3)), self.spd_visual_multiplier**spd_to_search*math.sin(math.radians(theta*3))])
opacity = self.vo_cone_collision_detector(point + self.phys_status["pose"][0], vo_cone)
if opacity > 1/5:
opacity = 100
if opacity <1/10:
opacity = 0
angle_opacity[theta] = angle_opacity[theta]+(opacity**2)*10
return angle_opacity
def sensor_simulator_legacy(self):
# receives dict(self.ships_database, **self.obstacle_database), which consists of dictionaries of objects(ship or obstacle).
# extracts only {key, polygon} tuples using a for loop
# then it filters only the keys of which its polygon is inside the detection range.
# then it outputs {key: [polygon, phys_status]} to a dictionary.
ship_pose = self.phys_status["pose"]
ship_polygon = self.output_polygon
objects_database = self.sensor_data
origin_point = ship_pose[0]
if not(objects_database):
return
filtered_objects = {}
#print(objects_database)
for key, object in objects_database.items():
# calculate distance between origin and closest point of polygon
# print(key)
object_pose = object.phys_status["pose"]
object_vector = (object_pose[0] - ship_pose[0])
object_distance = (np.linalg.norm(object_vector))
#print(key, object_vector)
# check if polygon is within detection range
if object_distance <= self.detection_range:
# gets object polygonal information
poly_points = object.output_polygon
# if there are no polygons, return nothing
if poly_points == None:
return
#print(poly_points)
(max_angle, min_angle) = self.get_min_max_angles(poly_points)
FOV = (max_angle - min_angle)
while FOV > 1 * math.pi:
FOV = 2 * math.pi - FOV
while FOV <= 0:
FOV += 2 * math.pi
object_radius = (object_distance * math.tan(FOV / 2))
self_radius = (self.get_largest_inner_product(ship_polygon, object_vector))/2
filtered_objects[key] = [object, object_radius, self_radius ]
#print(" key, FOV object_radius self_radius : ", object_distance, math.degrees(FOV), object_radius, self_radius)
return filtered_objects
def sensor_simulator(self):
# receives dict(self.ships_database, **self.obstacle_database), which consists of dictionaries of objects(ship or obstacle).
# extracts only {key, polygon} tuples using a for loop
# then it filters only the keys of which its polygon is inside the detection range.
# then it outputs {key: [polygon, phys_status]} to a dictionary.
ship_pose = self.phys_status["pose"]
ship_polygon = self.output_polygon
objects_database = self.sensor_data
origin_point = ship_pose[0]
if not (objects_database):
return
filtered_objects_angle = {}
# print(objects_database)
for key, object in objects_database.items():
# calculate distance between origin and closest point of polygon
# print(key)
object_pose = object.phys_status["pose"]
object_vector = (object_pose[0] - ship_pose[0])
if object_pose[0][1] == ship_pose[0][1] and object_pose[0][0] == ship_pose[0][0]:
continue
object_distance = (np.linalg.norm(object_vector))
# print(key, object_vector)
# check if polygon is within detection range
if object_distance > self.detection_range*1.1:
continue
# print(poly_points)
N = self.inflate_obstacles(object)
if N == None:
continue
[max_angle_point, min_angle_point], [start_angle, end_angle], self.outer_polygon,closest_distance = N
self.vo_lines.append([self.phys_status["pose"][0], max_angle_point,(255, 153, 251)])
self.vo_lines.append([self.phys_status["pose"][0], min_angle_point,(255, 153, 251)])
if closest_distance <= self.detection_range:
filtered_objects_angle[key] = [object, start_angle, end_angle, closest_distance]
# print(" key, FOV object_radius self_radius : ", object_distance, math.degrees(FOV), object_radius, self_radius)
return filtered_objects_angle
def get_min_max_angles(self,polygon):
# Select the first point as the origin
origin = self.phys_status["pose"][0]
# Calculate the angle for each point relative to the origin
angles = []
for point in polygon:
x, y = point[0] - origin[0], point[1] - origin[1]
angle = math.atan2(y, x)
if angle < 0:
while angle < 0:
angle += 2 * math.pi
if angle > 2 * math.pi:
while angle > 2:
angle -= 2 * math.pi
angles.append(angle)
# Return the maximum and minimum angles
max_angle = max(angles)
min_angle = min(angles)
#print("\n",math.degrees(max_angle), math.degrees(min_angle),end="")
#print("\n", polygon, end="")
return (max_angle, min_angle)
def inflate_obstacles(self,object):
# Select the first point as the origin
origin = self.phys_status["pose"][0]
ship_polygon = self.output_polygon
if object.output_polygon == None:
return
# Calculate the angle for each point relative to the origin
object_vector = (object.phys_status["pose"][0] - self.phys_status["pose"][0])
avg_angle = math.atan2(object_vector[1],object_vector[0])
if avg_angle < 0:
while avg_angle < 0:
avg_angle += 2 * math.pi
if avg_angle > 2 * math.pi:
while avg_angle > 2:
avg_angle -= 2 * math.pi
#centers the ship polygon
ship_polygon_centered = []
for point in self.output_polygon:
ship_polygon_centered.append((point - self.phys_status["pose"][0])*2)
inflated_points = []
for point in object.output_polygon:
for point2 in ship_polygon_centered:
inflated_point = point + point2
inflated_points.append(inflated_point)
hull = ConvexHull(inflated_points)
outer_polygon = [inflated_points[i] for i in hull.vertices]
self.vo_polygons.append(outer_polygon)
angles = []
for point in outer_polygon:
x, y = point[0] - origin[0], point[1] - origin[1]
angle = math.atan2(y, x)
if angle < 0:
while angle < 0:
angle += 2 * math.pi
if angle > 2 * math.pi:
while angle > 2 * math.pi:
angle -= 2 * math.pi
delta_angle = angle-avg_angle
if delta_angle < - math.pi:
while delta_angle < -math.pi:
delta_angle += 2 * math.pi
if delta_angle > math.pi:
while delta_angle > math.pi:
delta_angle -= 2 * math.pi
angles.append(delta_angle)
# Return the maximum and minimum angles
max_angle = max(angles)
min_angle = min(angles)
max_angle_index = angles.index(max_angle)
min_angle_index = angles.index(min_angle)
max_angle = min(max(angles), math.pi*(3/4))+math.pi/20
min_angle = max(min(angles), -math.pi*(3/4))-math.pi/20
max_angle_point = outer_polygon[max_angle_index]
min_angle_point = outer_polygon[min_angle_index]
closest_point = self.closest_point_to_polygon(origin, outer_polygon)
self.vo_circles.append([closest_point[0], 0.1, (236, 232, 26), 1])
if closest_point[1] == -1:
closest_distance = 1
else:
closest_distance = np.linalg.norm(origin - closest_point[0])
return ([max_angle_point,min_angle_point],[avg_angle+min_angle,avg_angle+max_angle],outer_polygon,closest_distance)
def get_largest_inner_product(self,polygon, B):
# Rotate B by pi/2
B_rotated = np.array([-B[1], B[0]])
# Normalize B_rotated
A = B_rotated / np.linalg.norm(B_rotated)
max_inner_product = -float('inf')
for i, point1 in enumerate(polygon):
for j, point2 in enumerate(polygon[i + 1:], i + 1):
# Calculate the line vector between point1 and point2
line_vector = np.array(point2) - np.array(point1)
# Calculate the inner product of line_vector and A
inner_product = np.dot(line_vector / np.linalg.norm(line_vector), A)
# Update the max inner product if applicable
if inner_product > max_inner_product:
max_inner_product = inner_product
# Return the largest inner product
return max_inner_product
def tangent_lines(self, circle, origin):
# Unpack circle coordinates and radius
x_c, y_c = circle[0]
r = circle[1]
# Unpack origin coordinates
x_o, y_o = origin
# Calculate the distance between the origin and the center of the circle
d = math.sqrt((x_c - x_o) ** 2 + (y_c - y_o) ** 2)
# Check if the origin is inside the circle
if d < r:
print("Error: origin is inside the circle.")
return None
# Calculate the angle between the origin and the center of the circle
theta = math.atan2(y_c - y_o, x_c - x_o)
# Calculate the distance from the origin to the tangent point
a = math.sqrt(d ** 2 - r ** 2)
# Calculate the angles of the tangent lines
alpha = math.asin(r / d)
beta = theta - alpha
gamma = theta + alpha
# Return the two angles of the tangent lines
return beta, gamma
def vo_cone_collision_detector(self, point, cone):
"""
Determines whether a point is inside a circle section
"""
origin, [start_angle, end_angle], radius = cone
if radius < 0.5:
radius = 1
# Calculate angle of point relative to circle origin
point_angle = math.atan2(point[1] - origin[1], point[0] - origin[0])
start_angle = self.regularize_angle(start_angle)
end_angle = self.regularize_angle(end_angle)
point_angle = self.regularize_angle(point_angle)
if end_angle >= start_angle:
result = point_angle >= start_angle and point_angle <= end_angle
inside_angle = end_angle - start_angle
elif end_angle <= start_angle: # 0도 넘어갈 때
result = point_angle >= start_angle or point_angle <= end_angle
inside_angle = end_angle + 2*math.pi - start_angle
if result == True:
time_till_collision = radius / (math.dist(point, origin)/5)
print(1)
if radius < 4:
return 1
if inside_angle > math.pi:
return 1
return 1/time_till_collision
if result == False:
return 0
def angle_between_vector_and_angle(self, vector, angle):
# Calculate the angle between the vector and the x-axis
vector_angle = math.atan2(vector[1], vector[0])
# Calculate the difference between the vector angle and the given angle
angle_diff = vector_angle - angle
# Ensure that the angle difference is within the range (-pi, pi]
angle_diff = self.regularize_angle(angle_diff)
# Return the absolute value of the angle difference
return angle_diff
def index_with_lowest_number(self, numbers):
"""
Returns the index of the lowest number in a list
"""
lowest_index = 0
for i in range(1, len(numbers)):
if numbers[i] < numbers[lowest_index]:
lowest_index = i
return lowest_index
def regularize_angle(self,angle):
while angle > math.pi:
angle -= 2 * math.pi
while angle <= -math.pi:
angle += 2 * math.pi
return angle
def point_polygon_distance(self, point, polygon):
if isinstance(point, np.ndarray):
point = tuple(point)
polygon_vertices = polygon
# Create a list of tuples containing the x,y coordinates of the polygon vertices
polygon = [(x, y) for (x, y) in polygon_vertices]
# Find the closest point on the polygon boundary to the target point
closest_point = None
min_distance = float('inf')
for i in range(len(polygon)):
x1, y1 = polygon[i]
x2, y2 = polygon[(i + 1) % len(polygon)]
dx = x2 - x1
dy = y2 - y1
dot = ((point[0] - x1) * dx + (point[1] - y1) * dy) / (dx ** 2 + dy ** 2)
closest_x = float(x1) + dot * dx
closest_y = float(y1) + dot * dy
distance = math.sqrt((point[0] - closest_x) ** 2 + (point[1] - closest_y) ** 2)
if distance < min_distance:
min_distance = distance
closest_point = (closest_x, closest_y)
print(distance)
# If the point is inside the polygon, return a negative distance
if self.is_inside_polygon(point, polygon_vertices):
return 0
else:
return min_distance
def closest_point_to_polygon(self, point, polygon_vertices):
# Convert numpy array to tuple if necessary
if isinstance(point, np.ndarray):
point = tuple(point)
# Find the closest point on the polygon boundary to the target point
closest_point = None
min_distance = float('inf')
polygon_vertices2 = self.subdivide_polygon(polygon_vertices)
for i in range(len(polygon_vertices2)):
(closest_x, closest_y) = polygon_vertices2[i]
distance = np.sqrt((point[0] - closest_x) ** 2 + (point[1] - closest_y) ** 2)
if distance < min_distance:
min_distance = distance
closest_point = np.array([closest_x, closest_y])
if self.is_inside_polygon(point, polygon_vertices):
return [closest_point, -1]
else:
return [closest_point, 1]
def is_inside_polygon(self, point, polygon_vertices):
# Create a list of tuples containing the x,y coordinates of the polygon vertices
polygon = [(x, y) for x, y in polygon_vertices]
# Use the winding number algorithm to check if the point is inside the polygon
wn = 0
for i in range(len(polygon)):
x1, y1 = polygon[i]
x2, y2 = polygon[(i + 1) % len(polygon)]
if y1 <= point[1]:
if y2 > point[1]:
if (point[0] - x1) * (y2 - y1) > (x2 - x1) * (point[1] - y1):
wn += 1
else:
if y2 <= point[1]:
if (point[0] - x1) * (y2 - y1) < (x2 - x1) * (point[1] - y1):
wn -= 1
return wn != 0
def subdivide_polygon(self, polygon, n=20):
"""Subdivides a polygon and divides each edge into n segments."""
new_polygon = []
# Iterate over the edges of the polygon
for i in range(len(polygon)):
# Add the current vertex to the new polygon
new_polygon.append(polygon[i])
# Calculate the difference vector between the current and next vertex
diff = [polygon[(i + 1) % len(polygon)][j] - polygon[i][j] for j in range(len(polygon[0]))]
# Calculate the size of each segment along the current edge
segment_size = [diff[j] / n for j in range(len(diff))]
# Iterate over the segments of the current edge
for j in range(1, n):
# Calculate the coordinates of the new vertex
x_new = polygon[i][0] + j * segment_size[0]
y_new = polygon[i][1] + j * segment_size[1]
new_vertex = [x_new, y_new]
# Add the new vertex to the new polygon
new_polygon.append(new_vertex)
return new_polygon
| spacedoge2320/Ship-OA-sim | Ship-OA-sim/Guidance_algorithms.py | Guidance_algorithms.py | py | 28,641 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.linalg.norm",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "math.atan2",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_nu... |
29147674060 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
import bs4
# In[2]:
'http://books.toscrape.com/catalogue/page-2.html'
# In[3]:
'http://books.toscrape.com/catalogue/page-3.html'
# In[4]:
base_url = 'http://books.toscrape.com/catalogue/page-{}.html'
# In[5]:
base_url.format('20')
# In[9]:
page_num = 18
# In[7]:
'http://books.toscrape.com/catalogue/page-{page_num}.html'
# In[10]:
base_url.format(page_num)
# In[12]:
res = requests.get(base_url.format(1))
# In[13]:
soup = bs4.BeautifulSoup(res.text,'lxml')
# In[14]:
soup
# In[15]:
soup.select(".product_pod")
# In[17]:
len(soup.select(".product_pod"))
# In[18]:
products = soup.select(".product_pod")
# In[19]:
example = products[0]
# In[20]:
example
# In[21]:
'star-rating Thr' in str(example)
# In[22]:
example.select(".star-rating.Two")
# In[23]:
example.select('a')
# In[24]:
example.select('a')[1]
# In[25]:
example.select('a')[1]['title']
# In[28]:
two_star_titles = []
for n in range(1,51):
scrape_url = base_url.format(n)
res = requests.get(scrape_url)
soup = bs4.BeautifulSoup(res.text, 'lxml')
books = soup.select(".product_pod")
for book in books:
if len(book.select('.star-rating.Two')) != 0:
book_title = book.select('a')[1]['title']
two_star_titles.append(book_title)
# In[29]:
two_star_titles
# In[ ]:
| maylinaung/python-learning | web_scrabing_book_example.py | web_scrabing_book_example.py | py | 1,427 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
... |
13859758386 | import bs4
import requests
from bs4 import BeautifulSoup
SUPPORTED_LANGUAGES = ("EN", "__test__")
def scrape_oxford_learners_dictionary(word: str) -> list[str]:
def url(i: int) -> str:
return (
f"https://www.oxfordlearnersdictionaries.com"
f"/definition/english/{word}_{i}"
)
# The website filters out requests without a proper User-Agent
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) "
"Gecko/20100101 Firefox/42.0",
}
list_of_definitions = []
for i in range(1, 5, 1):
response = requests.get(url(i), headers=headers)
if response.status_code != 200:
break
page = response.text
soup = BeautifulSoup(page, "html.parser")
find_pos = soup.find("span", class_="pos")
if isinstance(find_pos, bs4.Tag):
pos = find_pos.text
else:
pos = "-"
find_senses = soup.find("ol", class_="senses_multiple")
if isinstance(find_senses, bs4.Tag):
list_of_senses = find_senses.find_all(
"li", class_="sense", recursive=False
)
else:
find_senses = soup.find("ol", class_="sense_single")
if isinstance(find_senses, bs4.Tag):
list_of_senses = find_senses.find_all(
"li", class_="sense", recursive=False
)
else:
break
for sense in list_of_senses:
definition = sense.find("span", class_="def")
list_of_definitions.append(f"({pos}) " + definition.text)
return list_of_definitions
| pavelkurach/vocab-builder | src/dict_scrapers.py | dict_scrapers.py | py | 1,687 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "bs4.Tag",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "bs4.Tag",
"line_numbe... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.