index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
76,297 | iskyd/babs | refs/heads/master | /babs/note_list.py | from abc import ABC
from babs import Note
from babs.exceptions import NoteListException
class NoteList(ABC):
OCTAVE_TYPE_ROOT = 'root'
OCTAVE_TYPE_FROM_ROOT = 'from_root'
def __init__(self, *notes, **kwargs):
"""
:param notes: list of notes
"""
self._notes = list(notes)
self.strict = kwargs.pop('strict', True)
self.invalid_exception = kwargs.pop('invalid_exception', NoteListException)
if self.strict is True and self.is_valid() is False:
raise self.invalid_exception('Invalid {}.'.format(type(self).__name__))
def __eq__(self, other):
return self._notes == other.notes
def __ne__(self, other):
return self._notes != other.notes
def __str__(self):
return ','.join(list(map(lambda n: str(n), self._notes)))
def __repr__(self):
return '{}({}, strict={})'.format(type(self).__name__, ','.join(list(map(lambda n: repr(n), self._notes))), self.strict)
@property
def notes(self):
return self._notes
def is_valid(self):
"""
Check if list is valid
:return: bool
"""
if len(self._notes) < 1:
return False
for n in self._notes:
if not isinstance(n, Note):
return False
return True
def add_note(self, note, strict=True):
"""
Add note to list
:param note: note to be added in list
:param strict: raise NoteListException if note is not valid
:return: None
"""
if strict and not isinstance(note, Note):
raise self.invalid_exception('Invalid note given.')
self._notes.append(note)
def remove_note(self, note=None, freq=None, name=None, octave=None, strict=True):
"""
Remove note by note, freq, name or octave from list
:param note: note to remove
:param freq: frequency to remove
:param name: name to remove
:param octave: octave to remove
:param strict: raise NoteListException if note is not valid, not found or if list will be invalid after remove
:return: None
"""
notes = self._notes
indices = []
if note is not None:
indices = [key for key, n in enumerate(self._notes) if n == note]
elif freq is not None:
indices = [key for key, n in enumerate(self._notes) if n.freq == freq]
elif name is not None:
indices = [key for key, n in enumerate(self._notes) if n.name == name]
elif octave is not None:
indices = [key for key, n in enumerate(self._notes) if n.octave == octave]
if len(indices) > 0:
self._notes = [n for key, n in enumerate(self._notes) if key not in indices]
if strict is True and not self.is_valid():
self._notes = notes
raise self.invalid_exception('Invalid {}.'.format(type(self).__name__))
@classmethod
def get_notes_from_root(cls, root, note_list_type=None, octave=None, alt=Note.SHARP):
"""
:param root: root note
:param note_list_type: a list of notes distance from root note
:param octave: octave of notes in chord
:param alt: note alteration 'sharp' or 'flat'
:type root: Note
:type note_list_type: list
:type octave: Union[int, string, callable]
:type alt: string
:return: Scale
"""
if note_list_type is None:
raise NoteListException('Invalid note list type')
if octave is None:
octave = cls.OCTAVE_TYPE_ROOT
root_idx = root.get_note_index()
def get_octave(i, distance):
if callable(octave):
return octave(root.octave, i, distance)
elif isinstance(octave, int):
return octave
elif octave == cls.OCTAVE_TYPE_ROOT:
return root.octave
elif octave == cls.OCTAVE_TYPE_FROM_ROOT:
return root.octave + int(distance / len(Note.NOTES))
else:
return Note.A_DEFAULT_OCTAVE
notes = [Note(
name=Note.get_note_name_by_index(root_idx + distance, alt=alt),
octave=get_octave(i, root_idx + distance),
alt=alt
) for i, distance in enumerate(note_list_type)]
notes.insert(0, root)
return notes
| {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,298 | iskyd/babs | refs/heads/master | /babs/exceptions/chord_exception.py | from babs.exceptions import NoteException
class ChordException(NoteException):
pass
| {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,299 | iskyd/babs | refs/heads/master | /babs/exceptions/__init__.py | from .note_exception import NoteException
from .note_list_exception import NoteListException
from .chord_exception import ChordException
from .scale_exception import ScaleException | {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,300 | iskyd/babs | refs/heads/master | /babs/scale.py | from babs import Note, NoteList
from babs.exceptions import ScaleException
class Scale(NoteList):
"""
Set of musical notes ordered by fundamental frequency or pitch
"""
ASCENDING_SCALE_TYPE='asc'
DESCENDING_SCALE_TYPE='desc'
MAJOR_TYPE = [2, 4, 5, 7, 9, 11]
MINOR_TYPE = [2, 3, 5, 7, 8, 10]
IONIAN_TYPE = MAJOR_TYPE
DORIAN_TYPE = [2, 3, 5, 7, 9, 10]
PHRIGIAN_TYPE = [1, 3, 5, 7, 8, 10]
LIDYAN_TYPE = [2, 4, 6, 7, 9, 11]
DOMINANT_TYPE = [2, 4, 5, 7, 9, 10]
AEOLIAN_TYPE = MINOR_TYPE
LOCRIAN_TYPE = [1, 3, 5, 6, 8, 10]
PENTATONIC_TYPE = [2, 4, 7, 9]
PENTATONIC_MINOR_TYPE = [3, 5, 7, 10]
BLUES_TYPE = [2, 3, 4, 7, 9]
BLUES_MINOR_TYPE = [3, 5, 6, 7, 10]
MELODIC_MINOR_TYPE = [2, 3, 5, 7, 9, 11]
HARMONIC_MINOR_TYPE = [2, 3, 5, 7, 8, 11]
HARMONIC_MAJOR_TYPE = [2, 4, 5, 7, 8, 11]
def __init__(self, *notes, **kwargs):
"""
:param notes: list of notes
:param kwargs: options [strict]
"""
self._order_type = kwargs.pop('order', self.ASCENDING_SCALE_TYPE)
super().__init__(*notes, strict=kwargs.pop('strict', True), invalid_exception=ScaleException)
self._notes = list(set(self._notes))
self._order()
def _order(self):
if self.is_valid():
self._notes.sort(key=lambda note: note.freq, reverse=False if self._order_type == self.ASCENDING_SCALE_TYPE else True)
def add_note(self, note, strict=True):
"""
Add note to list
:param note: note to be added in list
:param strict: raise ScaleException if note is not valid, not found or if chord will be invalid
:return: None
"""
if note in self._notes:
if strict is True:
raise ScaleException('Note {} is alredy in Scale.'.format(str(note)))
return
super().add_note(note=note, strict=strict)
self._order()
def remove_note(self, note=None, freq=None, name=None, octave=None, strict=True):
"""
Remove note by note, freq, name or octave from list
:param note: note to remove
:param freq: frequency to remove
:param name: name to remove
:param octave: octave to remove
:param strict: raise ScaleException if note is not valid, not found or if scale will be invalid after remove
:return: None
"""
super().remove_note(note=note, freq=freq, name=name, octave=octave, strict=strict)
self._order()
@classmethod
def create_from_root(cls, root, scale_type=None, octave=NoteList.OCTAVE_TYPE_ROOT, alt=Note.SHARP, order=None, strict=True):
"""
:param root: root note
:param scale_type: a list of notes distance from root note
:param octave: octave of notes in chord
:param alt: note alteration 'sharp' or 'flat'
:type root: Note
:type scale_type: list
:type octave: Union[int, string, callable]
:type alt: string
:return: Scale
"""
if scale_type is None:
scale_type = cls.MAJOR_TYPE
if order is None:
order = Scale.ASCENDING_SCALE_TYPE
notes = NoteList.get_notes_from_root(root=root, note_list_type=scale_type, octave=octave, alt=alt)
return cls(
*notes,
order=order,
strict=strict
) | {"/babs/__init__.py": ["/babs/note.py", "/babs/rest.py", "/babs/note_list.py", "/babs/chord.py", "/babs/scale.py"], "/tests/test_rest.py": ["/babs/__init__.py"], "/tests/test_scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/tests/test_note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/scale_exception.py": ["/babs/exceptions/__init__.py"], "/tests/test_chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note.py": ["/babs/exceptions/__init__.py"], "/babs/chord.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/note_list.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"], "/babs/exceptions/chord_exception.py": ["/babs/exceptions/__init__.py"], "/babs/exceptions/__init__.py": ["/babs/exceptions/note_list_exception.py", "/babs/exceptions/chord_exception.py", "/babs/exceptions/scale_exception.py"], "/babs/scale.py": ["/babs/__init__.py", "/babs/exceptions/__init__.py"]} |
76,332 | HaroldMills/Vesper-Old-Bird-Detector-Eval | refs/heads/master | /evaluate_old_bird_detectors.py | """
Plots precision vs. recall curves for the Vesper reimplementations of
the Old Bird Tseep and Thrush detectors run on the BirdVox-full-night
recordings.
This script plots curves for the detectors run both with and without the
post-processing steps performed by the Old Bird detectors, including the
merging of overlapping clips and the suppression of frequent clips.
The inputs required by this script are:
1. The files "Old Bird Clips (no post).csv" and
"Old Bird Clips (with post).csv" produced by the run_old_bird_detectors
script. The directory containing these files is specified by
eval.utils.WORKING_DIR_PATH.
2. The BirdVox-full-night CSV annotation files, as distributed with the
BirdVox-full-night dataset. The directory containing these files is
specified by eval.utils.ANNOTATIONS_DIR_PATH.
The outputs produced by this script are:
1. The files "Old Bird Detector Precision vs. Recall (no post).pdf"
and "Old Bird Detector Precision vs. Recall (with post).pdf", containing
plots of the precision vs. recall curves. The directory to which these
files are written is specified by eval.utils.WORKING_DIR_PATH.
"""
from collections import defaultdict
import csv
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.ticker import MultipleLocator
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import eval.utils as utils
def main():
evaluate_detectors(False)
evaluate_detectors(True)
def evaluate_detectors(post_enabled):
old_bird_clips = get_old_bird_clips(post_enabled)
show_old_bird_clip_counts(old_bird_clips)
ground_truth_call_centers = get_ground_truth_call_centers()
show_ground_truth_call_counts(ground_truth_call_centers)
rows = count_old_bird_calls(old_bird_clips, ground_truth_call_centers)
raw_df = create_raw_df(rows)
separated_df = create_separated_detectors_df(raw_df)
merged_df = create_merged_detectors_df(separated_df)
add_precision_recall_f1(raw_df)
add_precision_recall_f1(separated_df)
add_precision_recall_f1(merged_df)
print(raw_df.to_csv())
print(separated_df.to_csv())
print(merged_df.to_csv())
plot_precision_vs_recall(post_enabled, separated_df, merged_df)
def get_old_bird_clips(post_enabled):
file_path = utils.get_old_bird_clips_file_path(post_enabled)
clips = defaultdict(list)
with open(file_path) as file_:
reader = csv.reader(file_)
# Skip header.
next(reader)
for row in reader:
key = (row[0], int(row[1]), float(row[2]))
value = (int(row[3]), int(row[4]))
clips[key].append(value)
return clips
def show_old_bird_clip_counts(clips):
print('Old Bird clip counts:')
keys = sorted(clips.keys())
for key in keys:
print(' ', key, len(clips[key]))
def get_ground_truth_call_centers():
centers = defaultdict(list)
for unit_num in utils.UNIT_NUMS:
file_path = utils.get_annotations_file_path(unit_num)
with open(file_path) as file_:
reader = csv.reader(file_)
# Skip header.
next(reader)
for row in reader:
time = float(row[0])
index = utils.seconds_to_samples(time)
freq = int(row[1])
call_type = get_call_type(freq)
key = (call_type, unit_num)
centers[key].append(index)
# Make sure center index lists are sorted.
for indices in centers.values():
indices.sort()
return centers
def get_call_type(freq):
return 'Tseep' if freq >= utils.FREQ_THRESHOLD else 'Thrush'
def show_ground_truth_call_counts(call_centers):
print('Ground truth call counts:')
keys = sorted(call_centers.keys())
for key in keys:
print(' ', key, len(call_centers[key]))
def count_old_bird_calls(old_bird_clips, ground_truth_call_center_indices):
rows = []
for (detector_name, unit_num, threshold), clips in old_bird_clips.items():
call_center_indices = \
ground_truth_call_center_indices[(detector_name, unit_num)]
window = utils.OLD_BIRD_CLIP_CALL_CENTER_WINDOWS[detector_name]
matches = match_clips_with_calls(clips, call_center_indices, window)
old_bird_call_count = len(matches)
old_bird_clip_count = len(clips)
ground_truth_call_count = len(call_center_indices)
rows.append([
detector_name, unit_num, threshold, ground_truth_call_count,
old_bird_call_count, old_bird_clip_count])
return rows
def match_clips_with_calls(clips, call_center_indices, window):
clip_windows = [get_clip_window(clip, window) for clip in clips]
clip_count = len(clips)
call_count = len(call_center_indices)
i = 0
j = 0
matches = []
while i != clip_count and j != call_count:
window_start_index, window_end_index = clip_windows[i]
call_center_index = call_center_indices[j]
if window_end_index <= call_center_index:
# clip window i precedes call center j
i += 1
elif window_start_index > call_center_index:
# clip window i follows call center j
j += 1
else:
# clip window i includes call center j
matches.append((i, j))
i += 1
j += 1
return matches
def get_clip_window(clip, window):
clip_start_index, clip_length = clip
clip_end_index = clip_start_index + clip_length
window_start_offset, window_length = window
window_start_index = min(
clip_start_index + window_start_offset, clip_end_index)
window_end_index = min(
window_start_index + window_length, clip_end_index)
return (window_start_index, window_end_index)
def create_raw_df(rows):
columns = [
'Detector', 'Unit', 'Threshold', 'Ground Truth Calls',
'Old Bird Calls', 'Old Bird Clips']
return pd.DataFrame(rows, columns=columns)
def create_separated_detectors_df(df):
df = df.drop(columns=['Unit'])
grouped = df.groupby(['Detector', 'Threshold'], as_index=False)
return grouped.aggregate(np.sum)
def create_merged_detectors_df(df):
df = df.drop(columns=['Detector'])
grouped = df.groupby(['Threshold'], as_index=False)
return grouped.aggregate(np.sum)
def sum_counts(df, detector):
if detector != 'All':
df = df.loc[df['Detector'] == detector]
return [
detector,
df['Ground Truth Calls'].sum(),
df['Old Bird Calls'].sum(),
df['Old Bird Clips'].sum()]
def add_precision_recall_f1(df):
p = df['Old Bird Calls'] / df['Old Bird Clips']
r = df['Old Bird Calls'] / df['Ground Truth Calls']
df['Precision'] = to_percent(p)
df['Recall'] = to_percent(r)
df['F1'] = to_percent(2 * p * r / (p + r))
def to_percent(x):
return round(1000 * x) / 10
def plot_precision_vs_recall(post_enabled, separated_df, merged_df):
file_path = utils.get_precision_vs_recall_plot_file_path(post_enabled)
with PdfPages(file_path) as pdf:
_, axes = plt.subplots(figsize=(6, 6))
detector_data = {
('Tseep', 2, 'C0'),
('Thrush', 1.3, 'C1'),
}
# Plot separate detector curves.
for detector_name, threshold, color in detector_data:
# Plot curve.
df = separated_df.loc[separated_df['Detector'] == detector_name]
precisions = df['Precision'].values
recalls = df['Recall'].values
axes.plot(recalls, precisions, color=color, label=detector_name)
# Put marker at Old Bird detector point.
indices = dict(
(t, i) for i, t in enumerate(df['Threshold'].values))
i = indices[threshold]
axes.plot([recalls[i]], [precisions[i]], marker='o', color=color)
# Plot merged curve.
precisions = merged_df['Precision'].values
recalls = merged_df['Recall'].values
axes.plot(recalls, precisions, color='C2', label='Tseep and Thrush')
plt.xlabel('Recall (%)')
plt.ylabel('Precision (%)')
limits = (0, 100)
plt.xlim(limits)
plt.ylim(limits)
major_locator = MultipleLocator(25)
minor_locator = MultipleLocator(5)
axes.xaxis.set_major_locator(major_locator)
axes.xaxis.set_minor_locator(minor_locator)
axes.yaxis.set_major_locator(major_locator)
axes.yaxis.set_minor_locator(minor_locator)
plt.grid(which='both')
plt.grid(which='minor', alpha=.4)
axes.legend()
post_string = utils.get_post_string(post_enabled)
plt.title(
'Old Bird Detector Precision vs. Recall ({})'.format(post_string))
pdf.savefig()
# plt.show()
if __name__ == '__main__':
main()
| {"/evaluate_old_bird_detectors.py": ["/eval/utils.py"], "/run_old_bird_detectors.py": ["/eval/wave_file_reader.py", "/eval/utils.py"]} |
76,333 | HaroldMills/Vesper-Old-Bird-Detector-Eval | refs/heads/master | /eval/wave_file_reader.py | """Module containing `WaveFileReader` class."""
import wave
import numpy as np
class WaveFileReader:
"""Simple reader for 16-bit, uncompressed wave files."""
def __init__(self, file_path):
self._reader = wave.open(file_path, 'rb')
(self.num_channels, _, self.sample_rate, self.length, _, _) = \
self._reader.getparams()
def read(self, start_index, length):
self._reader.setpos(start_index)
buffer = self._reader.readframes(length)
samples = np.frombuffer(buffer, dtype='<i2')
samples = samples.reshape((length, self.num_channels)).transpose()
return samples
def close(self):
self._reader.close()
| {"/evaluate_old_bird_detectors.py": ["/eval/utils.py"], "/run_old_bird_detectors.py": ["/eval/wave_file_reader.py", "/eval/utils.py"]} |
76,334 | HaroldMills/Vesper-Old-Bird-Detector-Eval | refs/heads/master | /eval/utils.py | """
Utility constants and functions for Old Bird detector evaluation.
Edit the RECORDINGS_DIR_PATH, ANNOTATIONS_DIR_PATH, and WORKING_DIR_PATH
constants below to set the input and output directories for the
run_old_bird_detectors and evaluate_old_bird_detectors scripts.
Edit NUM_DETECTION_THRESHOLDS to adjust the number of detection
thresholds for which the detectors are run. Reducing the number of
thresholds speeds up detector runs considerably during testing.
"""
from pathlib import Path
RECORDINGS_DIR_PATH = Path(
'/Users/harold/Desktop/NFC/Data/BirdVox/BirdVox-full-night/Other/'
'Recording Wave Files')
ANNOTATIONS_DIR_PATH = Path(
'/Users/harold/Desktop/NFC/Data/BirdVox/BirdVox-full-night/Dataset')
WORKING_DIR_PATH = Path('/Users/harold/Desktop')
RECORDING_FILE_NAME_FORMAT = 'BirdVox-full-night_wav-audio_unit{:02}.wav'
ANNOTATIONS_FILE_NAME_FORMAT = \
'BirdVox-full-night_csv-annotations_unit{:02}.csv'
OLD_BIRD_CLIPS_FILE_NAME_FORMAT = 'Old Bird Clips ({}).csv'
PRECISION_VS_RECALL_PLOT_FILE_NAME_FORMAT = \
'Old Bird Detector Precision vs. Recall ({}).pdf'
UNIT_NUMS = (1, 2, 3, 5, 7, 10)
# Constants determining the thresholds for which detectors are run.
# The Old Bird Tseep and Thrush thresholds (2 and 1.3, respectively)
# are added to those generated from these constants.
MIN_DETECTION_THRESHOLD = 1.05
MAX_DETECTION_THRESHOLD = 20
DETECTION_THRESHOLDS_POWER = 3
NUM_DETECTION_THRESHOLDS = 100
# Center frequency threshold separating tseep and thrush calls, in hertz.
FREQ_THRESHOLD = 5000
# Recording sample rate, in hertz.
SAMPLE_RATE = 24000
def seconds_to_samples(x):
return int(round(x * SAMPLE_RATE))
# Windows of Old Bird clips that must contain a BirdVox-full-night
# call center in order for the clip to be counted as a call. The
# windows begin a fixed offset (90 ms for tseep clips and 150 ms
# for thrush clips, reflecting the different amounts of initial
# padding that the detectors add to their clips) from the beginnings
# of clips and have a duration of 200 ms.
OLD_BIRD_CLIP_CALL_CENTER_WINDOWS = {
'Tseep': (seconds_to_samples(.09), seconds_to_samples(.2)),
'Thrush': (seconds_to_samples(.15), seconds_to_samples(.2))
}
def get_recording_file_path(unit_num):
file_name = RECORDING_FILE_NAME_FORMAT.format(unit_num)
return RECORDINGS_DIR_PATH / file_name
def get_annotations_file_path(unit_num):
file_name = ANNOTATIONS_FILE_NAME_FORMAT.format(unit_num)
return ANNOTATIONS_DIR_PATH / file_name
def get_old_bird_clips_file_path(post_enabled):
post_string = get_post_string(post_enabled)
file_name = OLD_BIRD_CLIPS_FILE_NAME_FORMAT.format(post_string)
return WORKING_DIR_PATH / file_name
def get_post_string(post_enabled):
return 'with post' if post_enabled else 'no post'
def get_precision_vs_recall_plot_file_path(post_enabled):
post_string = get_post_string(post_enabled)
file_name = PRECISION_VS_RECALL_PLOT_FILE_NAME_FORMAT.format(post_string)
return WORKING_DIR_PATH / file_name
| {"/evaluate_old_bird_detectors.py": ["/eval/utils.py"], "/run_old_bird_detectors.py": ["/eval/wave_file_reader.py", "/eval/utils.py"]} |
76,335 | HaroldMills/Vesper-Old-Bird-Detector-Eval | refs/heads/master | /run_old_bird_detectors.py | """
Runs the Vesper reimplementations of the Old Bird Tseep and Thrush detectors
on the BirdVox-full-night recordings.
This script runs the Vesper reimplementations of the Old Bird Tseep and
Thrush detectors on all of the BirdVox-full-night recordings with multiple
detection thresholds. It writes metadata for the resulting detections to an
output CSV file for further processing, for example for plotting precision
vs. recall curves.
Both detectors are run both with and without the post-processing steps
performed by the original Old Bird detectors, including the merging of
overlapping clips and the suppression of frequent clips.
The inputs required by this script are:
1. The BirdVox-full-night recordings, as WAV files. The BirdVox-full-night
dataset includes the recordings as FLAC files: you can use Sox
(http://sox.sourceforge.net/) or some other software to convert the FLAC
files to WAV files. The directory containing the WAV files is specified
by eval.utils.RECORDINGS_DIR_PATH.
The outputs produced by this script are:
1. The files "Old Bird Clips (no post).csv" and
"Old Bird Clips (with post).csv", containing the results of detector
runs with and without the post-processing steps of the original Old Bird
detectors, respectively. Each line of these files contains data describing
one clip produced by a detector. The directory to which these files are
written is specified by eval.utils.WORKING_DIR_PATH.
"""
import csv
import math
import time
import numpy as np
from eval.old_bird_detector_redux_1_1_mt import ThrushDetector, TseepDetector
from eval.wave_file_reader import WaveFileReader
import eval.utils as utils
# Set this `True` to run detectors on first recording only.
QUICK_RUN = False
DETECTOR_CLASSES = {
'Thrush': ThrushDetector,
'Tseep': TseepDetector
}
# Wave file read chunk size in samples.
CHUNK_SIZE = 100000
def main():
run_detectors_on_all_recordings(False)
run_detectors_on_all_recordings(True)
def run_detectors_on_all_recordings(post_enabled):
listeners = create_listeners()
for unit_num in utils.UNIT_NUMS:
run_detectors_on_one_recording(unit_num, post_enabled, listeners)
if QUICK_RUN:
break
write_detections_file(post_enabled, listeners)
def create_listeners():
names = sorted(DETECTOR_CLASSES.keys())
return [Listener(name) for name in names]
def run_detectors_on_one_recording(unit_num, post_enabled, listeners):
file_path = utils.get_recording_file_path(unit_num)
post_state = 'enabled' if post_enabled else 'disabled'
print((
'Running detectors with post-processing {} on file '
'"{}"...').format(post_state, file_path))
for listener in listeners:
listener.unit_num = unit_num
start_time = time.time()
reader = WaveFileReader(str(file_path))
num_chunks = int(math.ceil(reader.length / CHUNK_SIZE))
sample_rate = reader.sample_rate
detectors = \
[create_detector(sample_rate, post_enabled, l) for l in listeners]
for i, samples in enumerate(generate_sample_buffers(reader)):
if i != 0 and i % 1000 == 0:
print(' Chunk {} of {}...'.format(i, num_chunks))
for detector in detectors:
detector.detect(samples[0])
for detector in detectors:
detector.complete_detection()
reader.close()
processing_time = time.time() - start_time
file_duration = reader.length / sample_rate
show_processing_time(processing_time, file_duration)
def create_detector(sample_rate, post_enabled, listener):
cls = DETECTOR_CLASSES[listener.name]
thresholds = get_detection_thresholds(utils.DETECTION_THRESHOLDS_POWER)
return cls(thresholds, post_enabled, sample_rate, listener)
def get_detection_thresholds(p):
min_t = utils.MIN_DETECTION_THRESHOLD
max_t = utils.MAX_DETECTION_THRESHOLD
n = utils.NUM_DETECTION_THRESHOLDS
y = (np.arange(n) / (n - 1)) ** p
t = min_t + (max_t - min_t) * y
t = list(t)
# Always include Old Bird Tseep and Thrush thresholds.
t.append(1.3) # Thrush
t.append(2) # Tseep
t.sort()
return t
def generate_sample_buffers(file_reader):
start_index = 0
while start_index < file_reader.length:
length = min(CHUNK_SIZE, file_reader.length - start_index)
yield file_reader.read(start_index, length)
start_index += CHUNK_SIZE
def show_processing_time(processing_time, file_duration):
factor = file_duration / processing_time
print(
('Ran detectors on {}-second file in {} seconds, {} times faster '
'than real time.').format(
round_(file_duration), round_(processing_time), round_(factor)))
def round_(t):
return round(10 * t) / 10
def write_detections_file(post_enabled, listeners):
file_path = utils.get_old_bird_clips_file_path(post_enabled)
with open(file_path, 'w') as file_:
writer = csv.writer(file_)
writer.writerow(
['Detector', 'Unit', 'Threshold', 'Start Index', 'Length'])
for listener in listeners:
print('{} detector produced {} clips.'.format(
listener.name, len(listener.clips)))
listener.clips.sort()
writer.writerows(listener.clips)
class Listener:
def __init__(self, name):
self.name = name
self.unit_num = None
self.clips = []
def process_clip(self, start_index, length, threshold):
self.clips.append(
[self.name, self.unit_num, threshold, start_index, length])
if __name__ == '__main__':
main()
| {"/evaluate_old_bird_detectors.py": ["/eval/utils.py"], "/run_old_bird_detectors.py": ["/eval/wave_file_reader.py", "/eval/utils.py"]} |
76,366 | kevjshih/semantic_segmentation | refs/heads/master | /train.py | """
Main train loop
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import sys
import yaml
from data import Data
from pascal_context_data import PascalContextData
import torch.optim as optim
from model import SimpleFCN
import pdb
# reads the config file, returns appropriate instantiation of Data class
def _dataset_factory(cfg_file) -> Data:
f = open(cfg_file, 'r')
cfg = yaml.load(f)
f.close()
if cfg['name'] == 'pascal_context':
return PascalContextData(cfg)
else:
print("Dataset name not matched")
exit(-1)
# reads the config file, returns appropriate instantiation of pytorch Module class
def _model_factory(cfg_file) -> torch.nn.Module:
f = open(cfg_file, 'r')
cfg = yaml.load(f)
f.close()
if cfg['name'] == 'simple_fcn':
return SimpleFCN(cfg)
else:
print("Model name not matched")
exit(-1)
def train(model, data):
# train the model
model.cuda()
num_batches = data.get_num_batches()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
optimizer.zero_grad()
criterion = nn.CrossEntropyLoss()
for epoch in range(10):
data.shuffle() # shuffle the dataset
for iter in range(num_batches):
# inputs sholud be N x 224 x 224 x 3
inputs, labels = data.get_batch(iter)
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
# labels and outputs should be N x 28 x 28 x 459
labels_reshaped = labels.view(-1)
outputs = model(inputs)
pdb.set_trace()
outputs_reshaped = outputs.view(-1,459)
loss = criterion(outputs_reshaped, labels_reshaped)
loss.backward()
optimizer.step()
print(loss.data[0])
if __name__ == '__main__':
dataset_cfg = sys.argv[1]
# does nothing for now
model_cfg = sys.argv[2]
# load the appropriate dataset into a container
data = _dataset_factory(dataset_cfg)
model = _model_factory(model_cfg)
train(model, data)
| {"/train.py": ["/data.py", "/pascal_context_data.py", "/model.py"], "/pascal_context_data.py": ["/data.py"]} |
76,367 | kevjshih/semantic_segmentation | refs/heads/master | /pascal_context_data.py | """
dataset encapsulation class for PASCAL Context Dataset
"""
import scipy.io
from scipy import misc
import numpy as np
from data import Data
import os
import torch
class PascalContextData(Data):
def __init__(self, cfg):
super().__init__(cfg)
self._segdir = cfg['segdir']
self._VOCdir = cfg['VOCdir']
fid = open(cfg['idlist'], 'r')
fids = fid.readlines()
self._ids = [s.strip() for s in fids]
self._batch_size = 1 # default
fid.close()
def set_batch_size(self, bsize):
self._batch_size = bsize
def get_batch(self,ind):
curr_ids = self._ids[ind]
id = curr_ids
curr_im_path = self._VOCdir+'/JPEGImages/'+ id +'.jpg'
curr_im = misc.imread(curr_im_path)
curr_im = np.expand_dims(curr_im, 0)
curr_label_path = self._segdir+'/trainval/'+id+'.mat'
curr_label = scipy.io.loadmat(curr_label_path)
curr_label = curr_label['LabelMap'] -1 # (set to 0-index)
curr_label = np.expand_dims(curr_label, 0)
curr_im = torch.Tensor(curr_im.astype(np.float32))
curr_im = curr_im.permute(0,3,1,2)
curr_label = torch.from_numpy(curr_label.astype(np.int64)).contiguous()
return curr_im, curr_label
def shuffle(self):
np.random.shuffle(self._ids)
def get_num_examples(self):
return len(self._ids)
def get_num_batches(self):
# round up
return round(len(self._ids) / self._batch_size)
| {"/train.py": ["/data.py", "/pascal_context_data.py", "/model.py"], "/pascal_context_data.py": ["/data.py"]} |
76,368 | kevjshih/semantic_segmentation | refs/heads/master | /model.py | import torch
import torch.nn as nn
from torch.autograd import Variable
class SimpleFCN(torch.nn.Module):
def __init__(self,cfg):
super().__init__()
self._setup_model(cfg)
# setup model params based on configuration
def _add_conv(self,in_dim, out_dim):
layers = [nn.BatchNorm2d(in_dim), nn.Conv2d(in_dim, out_dim, 3, padding=1), nn.ReLU(inplace=True)]
return nn.Sequential(*layers)
def _add_linear(self, in_dim, out_dim):
layers = [nn.BatchNorm1d(in_dim), nn.Linear(in_dim, out_dim), nn.ReLU(inplace=True)]
return nn.Sequential(*layers)
def _setup_model(self, model_cfg):
self.c1 = self._add_conv(3, 64)
self.c2 = self._add_conv(64, 128)
self.c3 = self._add_conv(128, 256)
self.c4 = self._add_conv(256, 512)
self.c5 = self._add_conv(512, 459)
#self.maxpool = nn.MaxPool2d(2, stride=2)
def forward(self, x):
x = self.c1(x)
x = self.c2(x)
x = self.c3(x)
x = self.c4(x)
x = self.c5(x)
return x
| {"/train.py": ["/data.py", "/pascal_context_data.py", "/model.py"], "/pascal_context_data.py": ["/data.py"]} |
76,369 | kevjshih/semantic_segmentation | refs/heads/master | /data.py | """
Class for batching datasets
Extend for each different dataset
"""
class Data:
# initialize class using configuration file path
def __init__(self, cfg):
self._batch_size = 10
pass
# returns batch corresponding to index
def get_batch(self, ind):
pass
# partition the dataset into batches of size bsize
def set_batch_size(self, bsize):
self._batch_size = bsize
# update other stuff as necessary
def get_batch_size(self):
return self._batch_size
# returns number of batches per epoch
def get_num_batches(self):
pass
# returns number of examples in epoch
def get_num_examples(self):
pass
# shuffles the dataset
def shuffle(self):
pass
| {"/train.py": ["/data.py", "/pascal_context_data.py", "/model.py"], "/pascal_context_data.py": ["/data.py"]} |
76,372 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/method/metric/__init__.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
"""
from .prototype import get_accuracy, get_prototypes, prototypical_loss
__all__ = ['prototypical_loss', 'get_prototypes', 'get_accuracy']
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,373 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/backbone/conv.py | # -*- coding: utf-8 -*-
"""
Description : Conv model
Author : xxm
"""
import torch.nn as nn
from collections import OrderedDict
from torchmeta.modules import (MetaModule, MetaConv2d, MetaBatchNorm2d,
MetaSequential, MetaLinear)
def conv_block(in_channels, out_channels, bias=True,
activation=nn.ReLU(inplace=True),
use_dropout=False, p=0.1):
res = MetaSequential(OrderedDict([
('conv', MetaConv2d(int(in_channels), int(out_channels), kernel_size=3, padding=1, bias=bias)),
('norm', MetaBatchNorm2d(int(out_channels), momentum=1., track_running_stats=False)),
('relu', activation),
('pool', nn.MaxPool2d(2)),
]))
if use_dropout:
res.add_module('dropout', nn.Dropout2d(p))
return res
class MetaConvModel(MetaModule):
"""4-layer Convolutional Neural Network architecture from [1].
Parameters
----------
in_channels : int
Number of channels for the input images.
out_features : int
Number of classes (output of the model).
hidden_size : int (default: 64)
Number of channels in the intermediate representations.
feature_size : int (default: 64)
Number of features returned by the convolutional head.
embedding: bool (default: True)
Flatten feature map under episodic training.
if False: input will accept meta-task. [batch/task, num of pic, channel, width, height]
for prototype network.
References
----------
.. [1] Finn C., Abbeel P., and Levine, S. (2017). Model-Agnostic Meta-Learning
for Fast Adaptation of Deep Networks. International Conference on
Machine Learning (ICML) (https://arxiv.org/abs/1703.03400)
"""
def __init__(self, in_channels, out_features, hidden_size=64, feature_size=64, embedding=False):
super(MetaConvModel, self).__init__()
self.in_channels = in_channels
self.out_features = out_features
self.hidden_size = hidden_size
self.feature_size = feature_size
self.embedding = embedding
self.features = MetaSequential(OrderedDict([
('layer1', conv_block(in_channels, hidden_size)),
('layer2', conv_block(hidden_size, hidden_size)),
('layer3', conv_block(hidden_size, hidden_size)),
('layer4', conv_block(hidden_size, hidden_size))
]))
self.classifier = MetaLinear(feature_size, out_features)
def forward(self, inputs, params=None):
if self.embedding:
# inputs shape: [batch, task, channel, width, height]
# like: [16, 25, 3, 84, 84]
# Train before embedding [400, 3, 84, 84]
embeddings = self.features(inputs.view(-1, *inputs.shape[2:]))
# Train after embedding: [400, 64, 5, 5]
return embeddings.view(*inputs.shape[:2], -1) # [16, 25, 64x5x5]
else: # MAML
# inputs shape: [batch_size, channel, width, height]
features = self.features(inputs, params=self.get_subdict(params, 'features'))
features = features.view(features.size(0), -1)
logits = self.classifier(features, params=self.get_subdict(params, 'classifier'))
return logits
def ModelConvOmniglot(out_features, hidden_size=64, embedding=False):
return MetaConvModel(1, out_features,
hidden_size=hidden_size, feature_size=hidden_size,
embedding=embedding)
def ModelConv(out_features, hidden_size=64, flatten=False):
return MetaConvModel(3, out_features,
hidden_size=hidden_size, feature_size=5 * 5 * hidden_size,
embedding=flatten)
def ModelConvMiniImagenet(out_features, hidden_size=64):
return MetaConvModel(3, out_features, hidden_size=hidden_size,
feature_size=5 * 5 * hidden_size)
class EmbeddingImagenet(nn.Module):
"""4-layer Convolutional Neural Network architecture from [1].
Parameters
----------
emb_size : int
embedding space after backbone trained.
References
----------
.. [1] [Kim et al. 2019] Kim, J.; Kim, T.; Kim, S.; and Yoo, C. D. (2019).
Edge-labeling graph neural network for few-shot learning. In CVPR.
"""
def __init__(self, emb_size):
super(EmbeddingImagenet, self).__init__()
self.in_channels = 3
self.hidden = 64
self.last_hidden = self.hidden * 25
self.emb_size = emb_size
self.layers = nn.Sequential(OrderedDict([
('layer1', conv_block(self.in_channels, self.hidden, bias=False,
activation=nn.LeakyReLU(0.2))),
('layer2', conv_block(self.hidden, self.hidden * 1.5, bias=False,
activation=nn.LeakyReLU(0.2))),
('layer3', conv_block(self.hidden * 1.5, self.hidden * 2, bias=False,
activation=nn.LeakyReLU(0.2),
use_dropout=True, p=0.4)),
('layer4', conv_block(self.hidden * 2, self.hidden * 4, bias=False,
activation=nn.LeakyReLU(0.2),
use_dropout=True, p=0.5))
]))
self.last_layer = nn.Sequential(nn.Linear(in_features=self.last_hidden * 4,
out_features=self.emb_size, bias=True),
nn.BatchNorm1d(self.emb_size))
def forward(self, x):
features = self.layers(x)
return self.last_layer(features.view(features.size(0), -1))
def _meta_model_embedding_test():
"""
1. input: episodic training
2. feature: get meta-task feature_map without flatten
"""
import torch
input = torch.rand(16, 25, 3, 84, 84)
model = MetaConvModel(3, 5, hidden_size=64, feature_size=5 * 5 * 64, embedding=False)
out = model(input)
print(out.shape)
def _meta_model_test():
"""
1. input: [b, c, h, w]
2. get weight and bias like `maml`
3. return : [batch_size, num_classes]
"""
import torch
input = torch.rand(32, 3, 84, 84)
model = MetaConvModel(3, 5, hidden_size=64, feature_size=5 * 5 * 64, embedding=True)
out = model(input)
print(out.shape)
def _model_egnn_test():
import torch
input = torch.rand(32, 3, 84, 84)
model = EmbeddingImagenet(555)
print(model)
out = model(input)
print(out.shape)
if __name__ == '__main__':
_meta_model_test()
_meta_model_embedding_test()
# _model_egnn_test()
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,374 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/backbone/mlp.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
"""
import torch.nn as nn
from collections import OrderedDict
from torchmeta.modules import MetaModule, MetaSequential, MetaLinear
class MetaMLPModel(MetaModule):
"""Multi-layer Perceptron architecture from [1].
Parameters
----------
in_features : int
Number of input features.
out_features : int
Number of classes (num of ways in few-shot set).
hidden_sizes : list of int
Size of the intermediate representations. The length of this list
corresponds to the number of hidden layers.
References
----------
.. [1] Finn C., Abbeel P., and Levine, S. (2017). Model-Agnostic Meta-Learning
for Fast Adaptation of Deep Networks. International Conference on
Machine Learning (ICML) (https://arxiv.org/abs/1703.03400)
"""
def __init__(self, in_features, out_features, hidden_sizes):
super(MetaMLPModel, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.hidden_sizes = hidden_sizes
layer_sizes = [in_features] + hidden_sizes
self.features = MetaSequential(OrderedDict([
('layer{0}'.format(i + 1), MetaSequential(OrderedDict([
('linear', MetaLinear(hidden_size, layer_sizes[i + 1], bias=True)),
('relu', nn.ReLU())
]))) for (i, hidden_size) in enumerate(layer_sizes[:-1])
]))
self.classifier = MetaLinear(hidden_sizes[-1], out_features, bias=True)
def forward(self, inputs, params=None):
features = self.features(inputs, params=self.get_subdict(params, 'features'))
logits = self.classifier(features, params=self.get_subdict(params, 'classifier'))
return logits
def ModelMLPSinusoid(hidden_sizes=[40, 40]):
return MetaMLPModel(1, 1, hidden_sizes)
if __name__ == '__main__':
model = ModelMLPSinusoid()
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,375 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/train_protonet.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
"""
import os
import sys
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
import torch
from tqdm import tqdm
from torchmeta.utils.data import BatchMetaDataLoader
from learningTolearn.dataset import get_benchmark_by_name
from learningTolearn.method.metric import get_prototypes, get_accuracy, prototypical_loss
from learningTolearn.backbone import ModelConvOmniglot
def train(args):
device = torch.device('cuda:0' if args.use_cuda and torch.cuda.is_available() else 'cpu')
# 加载数据集
benchmark = get_benchmark_by_name(args.dataset,
args.folder,
args.num_ways,
args.num_shots,
args.num_shots_test,
hidden_size=args.hidden_size)
# 训练集
meta_train_dataloader = BatchMetaDataLoader(benchmark.meta_train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True)
# 验证集
meta_val_dataloader = BatchMetaDataLoader(benchmark.meta_val_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True)
model = ModelConvOmniglot(args.embedding_size, hidden_size=args.hidden_size, embedding=True)
model.to(device=device)
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# Training loop
with tqdm(meta_train_dataloader, total=args.num_batches) as pbar:
for batch_idx, batch in enumerate(pbar):
model.zero_grad()
train_inputs, train_targets = batch['train']
train_inputs = train_inputs.to(device=device) # [16, 25, 1, 28, 28]
train_targets = train_targets.to(device=device) # [16, 25]
train_embeddings = model(train_inputs)
test_inputs, test_targets = batch['test']
test_inputs = test_inputs.to(device=device)
test_targets = test_targets.to(device=device)
test_embeddings = model(test_inputs)
prototypes = get_prototypes(train_embeddings, train_targets,
benchmark.meta_train_dataset.num_classes_per_task)
loss = prototypical_loss(prototypes, test_embeddings, test_targets)
loss.backward()
optimizer.step()
with torch.no_grad():
accuracy = get_accuracy(prototypes, test_embeddings, test_targets)
pbar.set_postfix(accuracy='{0:.4f}'.format(accuracy.item()))
if batch_idx >= args.num_batches:
break
# Save model
if args.output_folder is not None:
filename = os.path.join(args.output_folder,
'protonet_omniglot_{0}shot_{1}way.pt'.format(args.num_shots, args.num_ways))
with open(filename, 'wb') as f:
state_dict = model.state_dict()
torch.save(state_dict, f)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('Prototypical Networks')
parser.add_argument('--folder', type=str, default='/Documents/Github/few-shot-datasets/',
help='Path to the folder the data is downloaded to.')
parser.add_argument('--dataset', type=str,
choices=['sinusoid', 'omniglot', 'miniimagenet', 'tieredimagenet'], default='omniglot',
help='Name of the dataset (default: omniglot).')
parser.add_argument('--num-shots', type=int, default=5,
help='Number of examples per class (k in "k-shot", default: 5).')
parser.add_argument('--num-ways', type=int, default=5,
help='Number of classes per task (N in "N-way", default: 5).')
parser.add_argument('--num-shots-test', type=int, default=15,
help='Number of test example per class. '
'If negative, same as the number of training examples `--num-shots` (default: 15).')
parser.add_argument('--embedding-size', type=int, default=64,
help='Dimension of the embedding/latent space (default: 64).')
parser.add_argument('--hidden-size', type=int, default=64,
help='Number of channels for each convolutional layer (default: 64).')
parser.add_argument('--output-folder', type=str, default=None,
help='Path to the output folder for saving the model (optional).')
parser.add_argument('--batch-size', type=int, default=16,
help='Number of tasks in a mini-batch of tasks (default: 16).')
parser.add_argument('--num-batches', type=int, default=100,
help='Number of batches the prototypical network is trained over (default: 100).')
parser.add_argument('--num-workers', type=int, default=1,
help='Number of workers for data loading (default: 1).')
parser.add_argument('--download', action='store_true',
help='Download the Omniglot dataset in the data folder.')
parser.add_argument('--use_cuda', action='store_true',
help='Use CUDA if available.')
args = parser.parse_args()
train(args)
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,376 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/dataset/demo.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
"""
from torchmeta.datasets import MiniImagenet
from torchmeta.transforms import Categorical, ClassSplitter, Rotation
from torchvision.transforms import Compose, Resize, ToTensor
from torchmeta.utils.data import BatchMetaDataLoader
dataset = MiniImagenet("/few-shot-datasets",
# Number of ways
num_classes_per_task=5,
# Resize the images to 28x28 and converts them to PyTorch tensors (from Torchvision)
transform=Compose([Resize(84), ToTensor()]),
# Transform the labels to integers (e.g. ("Glagolitic/character01", "Sanskrit/character14", ...) to (0, 1, ...))
target_transform=Categorical(num_classes=5),
# Creates new virtual classes with rotated versions of the images (from Santoro et al., 2016)
class_augmentations=[Rotation([90, 180, 270])],
meta_train=True,
download=True)
dataset = ClassSplitter(dataset, shuffle=True, num_train_per_class=5, num_test_per_class=15)
dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
for batch in dataloader:
train_inputs, train_targets = batch["train"]
print('Train inputs shape: {0}'.format(train_inputs.shape)) # torch.Size([16, 25, 3, 84, 84])
print('Train targets shape: {0}'.format(train_targets.shape)) # torch.Size([16, 25])
test_inputs, test_targets = batch["test"]
print('Test inputs shape: {0}'.format(test_inputs.shape)) # torch.Size([16, 75, 3, 84, 84])
print('Test targets shape: {0}'.format(test_targets.shape)) # torch.Size([16, 75])
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,377 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/__init__.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
"""
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
path = {
'BASE_PATH': os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
'LEARNING_TO_LEARN': os.path.dirname(os.path.abspath(__file__))
}
for k, v in path.items():
sys.path.append(v)
# module not found:
"""
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
"""
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,378 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/method/metric/prototype.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
"""
import torch
import torch.nn.functional as F
class PrototypicalNetwork(object):
def __init__(self):
pass
def get_num_samples(targets, num_classes, dtype=None):
batch_size = targets.size(0)
with torch.no_grad():
ones = torch.ones_like(targets, dtype=dtype)
num_samples = ones.new_zeros((batch_size, num_classes))
num_samples.scatter_add_(1, targets, ones)
return num_samples
def get_prototypes(embeddings, targets, num_classes):
"""Compute the prototypes (the mean vector of the embedded training/support
points belonging to its class) for each classes in the task.
Parameters
----------
embeddings : `torch.FloatTensor` instance
A tensor containing the embeddings of the support points. This tensor
has shape `(batch_size, num_examples, embedding_size)`.
targets : `torch.LongTensor` instance
A tensor containing the targets of the support points. This tensor has
shape `(batch_size, num_examples)`.
num_classes : int
Number of classes in the task (`num_ways`).
Returns
-------
prototypes : `torch.FloatTensor` instance
A tensor containing the prototypes for each class. This tensor has shape
`(batch_size, num_classes, embedding_size)`.
"""
batch_size, embedding_size = embeddings.size(0), embeddings.size(-1)
num_samples = get_num_samples(targets, num_classes, dtype=embeddings.dtype)
num_samples.unsqueeze_(-1)
num_samples = torch.max(num_samples, torch.ones_like(num_samples))
prototypes = embeddings.new_zeros((batch_size, num_classes, embedding_size))
indices = targets.unsqueeze(-1).expand_as(embeddings)
prototypes.scatter_add_(1, indices, embeddings).div_(num_samples)
return prototypes
def prototypical_loss(prototypes, embeddings, targets, **kwargs):
"""Compute the loss (i.e. negative log-likelihood) for the prototypical
network, on the test/query points.
Parameters
----------
prototypes : `torch.FloatTensor` instance
A tensor containing the prototypes for each class. This tensor has shape
`(batch_size, num_classes, embedding_size)`.
embeddings : `torch.FloatTensor` instance
A tensor containing the embeddings of the query points. This tensor has
shape `(batch_size, num_examples, embedding_size)`.
targets : `torch.LongTensor` instance
A tensor containing the targets of the query points. This tensor has
shape `(batch_size, num_examples)`.
Returns
-------
loss : `torch.FloatTensor` instance
The negative log-likelihood on the query points.
"""
# [batch_size, num_classes, num_examples]
squared_distances = torch.sum((prototypes.unsqueeze(2) - embeddings.unsqueeze(1)) ** 2, dim=-1)
return F.cross_entropy(-squared_distances, targets, **kwargs)
def get_accuracy(prototypes, embeddings, targets):
"""Compute the accuracy of the prototypical network on the test/query points.
Parameters
----------
prototypes : `torch.FloatTensor` instance
A tensor containing the prototypes for each class. This tensor has shape
`(meta_batch_size, num_classes, embedding_size)`.
embeddings : `torch.FloatTensor` instance
A tensor containing the embeddings of the query points. This tensor has
shape `(meta_batch_size, num_examples, embedding_size)`.
targets : `torch.LongTensor` instance
A tensor containing the targets of the query points. This tensor has
shape `(meta_batch_size, num_examples)`.
Returns
-------
accuracy : `torch.FloatTensor` instance
Mean accuracy on the query points.
"""
proto = prototypes.unsqueeze(1) # [batch, 1, classes, emb_size]
embed = embeddings.unsqueeze(2) # [batch, num_examples, 1, emb_size]
sq_distances = torch.sum((proto - embed) ** 2, dim=-1) # [batch, num_examples, num_classes]
_, predictions = torch.min(sq_distances, dim=-1) # [batch, num_examples] (values, *indices*)
return torch.mean(predictions.eq(targets).float())
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,379 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/dataset/__init__.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
"""
from .dataloader import get_benchmark_by_name
__all__ = [get_benchmark_by_name]
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,380 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/util/__init__.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
"""
from .utils import ToTensor1D, tensors_to_device, compute_accuracy
__all__ = [
compute_accuracy, ToTensor1D, tensors_to_device
]
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,381 | ximinng/Learning-To-Learn | refs/heads/master | /train_leo.py | # -*- coding: utf-8 -*-
"""
Description : Meta-Learning with Latent Embedding Optimization(LEO)
Author : xxm
"""
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,382 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/method/__init__.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
"""
from .optimization import *
from .metric import *
__all__ = ['optimization', 'metric']
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,383 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/method/graph/egnn.py | # -*- coding: utf-8 -*-
"""
Description : Edge-Labeling Graph Neural Network for Few-shot Learning
Author : xxm
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
class NodeUpdateNetwork(nn.Module):
def __init__(self,
in_features,
num_features, # 96
ratio=[2, 1],
dropout=0.0,
device=None):
super(NodeUpdateNetwork, self).__init__()
# set size
self.in_features = in_features
self.num_features_list = [num_features * r for r in ratio] # [192, 96]
self.dropout = dropout
self.device = device
# layers
layer_list = OrderedDict()
for l in range(len(self.num_features_list)): # 0,1,2
layer_list['conv{}'.format(l)] = nn.Conv2d(
in_channels=self.num_features_list[l - 1] if l > 0 else self.in_features * 3,
out_channels=self.num_features_list[l],
kernel_size=1,
bias=False)
layer_list['norm{}'.format(l)] = nn.BatchNorm2d(num_features=self.num_features_list[l])
layer_list['relu{}'.format(l)] = nn.LeakyReLU()
if self.dropout > 0 and l == (len(self.num_features_list) - 1):
layer_list['drop{}'.format(l)] = nn.Dropout2d(p=self.dropout)
self.network = nn.Sequential(layer_list)
def forward(self, node_feat, edge_feat):
# node_feat: torch.Size([200, 6, 96])
# edge_feat: torch.Size([200, 2, 6, 6])
num_tasks = node_feat.size(0) # 200
num_data = node_feat.size(1) # 6
# get eye matrix (batch_size, 2, node_size, node_size). [200, 2, 6, 6]
diag_mask = 1.0 - torch.eye(num_data).unsqueeze(0).unsqueeze(0).repeat(num_tasks, 2, 1, 1).to(self.device)
# set diagonal as zero and normalize. [200, 2, 6, 6]
edge_feat = F.normalize(edge_feat * diag_mask, p=1, dim=-1)
# compute attention and aggregate
aggr_feat = torch.bmm(torch.cat(torch.split(edge_feat, 1, 1), 2).squeeze(1), node_feat)
node_feat = torch.cat([node_feat, torch.cat(aggr_feat.split(num_data, 1), -1)], -1).transpose(1, 2)
# non-linear transform
node_feat = self.network(node_feat.unsqueeze(-1)).transpose(1, 2).squeeze(-1)
return node_feat
class EdgeUpdateNetwork(nn.Module):
def __init__(self,
in_features,
num_features,
ratio=[2, 2, 1, 1],
separate_dissimilarity=False,
dropout=0.0):
super(EdgeUpdateNetwork, self).__init__()
# set size
self.in_features = in_features
self.num_features_list = [num_features * r for r in ratio]
self.separate_dissimilarity = separate_dissimilarity
self.dropout = dropout
# layers
layer_list = OrderedDict()
for l in range(len(self.num_features_list)):
# set layer
layer_list['conv{}'.format(l)] = nn.Conv2d(
in_channels=self.num_features_list[l - 1] if l > 0 else self.in_features,
out_channels=self.num_features_list[l],
kernel_size=1,
bias=False)
layer_list['norm{}'.format(l)] = nn.BatchNorm2d(num_features=self.num_features_list[l])
layer_list['relu{}'.format(l)] = nn.LeakyReLU()
if self.dropout > 0:
layer_list['drop{}'.format(l)] = nn.Dropout2d(p=self.dropout)
layer_list['conv_out'] = nn.Conv2d(in_channels=self.num_features_list[-1],
out_channels=1,
kernel_size=1)
self.sim_network = nn.Sequential(layer_list)
if self.separate_dissimilarity:
# layers
layer_list = OrderedDict()
for l in range(len(self.num_features_list)):
# set layer
layer_list['conv{}'.format(l)] = nn.Conv2d(
in_channels=self.num_features_list[l - 1] if l > 0 else self.in_features,
out_channels=self.num_features_list[l],
kernel_size=1,
bias=False)
layer_list['norm{}'.format(l)] = nn.BatchNorm2d(num_features=self.num_features_list[l],
)
layer_list['relu{}'.format(l)] = nn.LeakyReLU()
if self.dropout > 0:
layer_list['drop{}'.format(l)] = nn.Dropout(p=self.dropout)
layer_list['conv_out'] = nn.Conv2d(in_channels=self.num_features_list[-1],
out_channels=1,
kernel_size=1)
self.dsim_network = nn.Sequential(layer_list)
def forward(self, node_feat, edge_feat):
# compute abs(x_i, x_j)
x_i = node_feat.unsqueeze(2)
x_j = torch.transpose(x_i, 1, 2)
x_ij = torch.abs(x_i - x_j)
x_ij = torch.transpose(x_ij, 1, 3)
# compute similarity/dissimilarity (batch_size x feat_size x num_samples x num_samples)
sim_val = F.sigmoid(self.sim_network(x_ij))
if self.separate_dissimilarity:
dsim_val = F.sigmoid(self.dsim_network(x_ij))
else:
dsim_val = 1.0 - sim_val
diag_mask = 1.0 - torch.eye(node_feat.size(1)).unsqueeze(0).unsqueeze(0).repeat(node_feat.size(0), 2, 1, 1).to(
tt.arg.device)
edge_feat = edge_feat * diag_mask
merge_sum = torch.sum(edge_feat, -1, True)
# set diagonal as zero and normalize
edge_feat = F.normalize(torch.cat([sim_val, dsim_val], 1) * edge_feat, p=1, dim=-1) * merge_sum
force_edge_feat = torch.cat(
(torch.eye(node_feat.size(1)).unsqueeze(0), torch.zeros(node_feat.size(1), node_feat.size(1)).unsqueeze(0)),
0).unsqueeze(0).repeat(node_feat.size(0), 1, 1, 1).to(tt.arg.device)
edge_feat = edge_feat + force_edge_feat
edge_feat = edge_feat + 1e-6
edge_feat = edge_feat / torch.sum(edge_feat, dim=1).unsqueeze(1).repeat(1, 2, 1, 1)
return edge_feat
class GraphNetwork(nn.Module):
def __init__(self,
in_features, # embedding size
node_features, # num of egde feature
edge_features, # num of node feature
num_layers, # default is 3
dropout=0.0):
super(GraphNetwork, self).__init__()
# set size
self.in_features = in_features
self.node_features = node_features
self.edge_features = edge_features
self.num_layers = num_layers
self.dropout = dropout
# for each layer
for l in range(self.num_layers):
# set edge to node
edge2node_net = NodeUpdateNetwork(in_features=self.in_features if l == 0 else self.node_features,
num_features=self.node_features,
dropout=self.dropout if l < self.num_layers - 1 else 0.0)
# set node to edge
node2edge_net = EdgeUpdateNetwork(in_features=self.node_features,
num_features=self.edge_features,
separate_dissimilarity=False,
dropout=self.dropout if l < self.num_layers - 1 else 0.0)
self.add_module('edge2node_net{}'.format(l), edge2node_net)
self.add_module('node2edge_net{}'.format(l), node2edge_net)
# forward
def forward(self, node_feat, edge_feat):
# for each layer
edge_feat_list = []
for l in range(self.num_layers):
# (1) edge to node
node_feat = self._modules['edge2node_net{}'.format(l)](node_feat, edge_feat)
# (2) node to edge
edge_feat = self._modules['node2edge_net{}'.format(l)](node_feat, edge_feat)
# save edge feature
edge_feat_list.append(edge_feat)
return edge_feat_list
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,384 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/method/generation/__init__.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
""" | {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,385 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/method/optimization/meta_sgd.py | # -*- coding: utf-8 -*-
"""
Description : Meta-SGD
Author : xxm
"""
import torch.nn.functional as F
from learningTolearn.method.optimization import ModelAgnosticMetaLearning
__all__ = ['MetaSGD']
class MetaSGD(ModelAgnosticMetaLearning):
def __init__(self, model, optimizer=None, init_step_size=0.1,
num_adaptation_steps=1, scheduler=None,
loss_function=F.cross_entropy, device=None):
super(MetaSGD, self).__init__(model, optimizer=optimizer,
step_size=init_step_size, learn_step_size=True,
per_param_step_size=True, num_adaptation_steps=num_adaptation_steps,
scheduler=scheduler, loss_function=loss_function, device=device)
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,386 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/backbone/__init__.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
"""
from .conv import ModelConv, ModelConvOmniglot, ModelConvMiniImagenet
from .mlp import ModelMLPSinusoid
from .resnet import *
__all__ = [
ModelConv, ModelConvOmniglot, ModelConvMiniImagenet,
ModelMLPSinusoid,
'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2',
'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b',
'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b'
]
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,387 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/method/optimization/__init__.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
"""
from .maml import ModelAgnosticMetaLearning, FOMAML
from .meta_sgd import MetaSGD
__all__ = ['ModelAgnosticMetaLearning', 'FOMAML', 'MetaSGD']
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,388 | ximinng/Learning-To-Learn | refs/heads/master | /learningTolearn/util/utils.py | # -*- coding: utf-8 -*-
"""
Description :
Author : xxm
"""
import torch
from collections import OrderedDict
def compute_accuracy(logits, targets):
"""Compute the accuracy (used by MAML)"""
with torch.no_grad():
_, predictions = torch.max(logits, dim=1)
accuracy = torch.mean(predictions.eq(targets).float())
return accuracy.item()
def tensors_to_device(tensors, device=torch.device('cpu')):
"""Place a collection of tensors in a specific device"""
if isinstance(tensors, torch.Tensor):
return tensors.to(device=device)
elif isinstance(tensors, (list, tuple)):
return type(tensors)(tensors_to_device(tensor, device=device)
for tensor in tensors)
elif isinstance(tensors, (dict, OrderedDict)):
return type(tensors)([(name, tensors_to_device(tensor, device=device))
for (name, tensor) in tensors.items()])
else:
raise NotImplementedError()
class ToTensor1D(object):
"""Convert a `numpy.ndarray` to tensor. Unlike `ToTensor` from torchvision,
this converts numpy arrays regardless of the number of dimensions.
Converts automatically the array to `float32`.
"""
def __call__(self, array):
return torch.from_numpy(array.astype('float32'))
def __repr__(self):
return self.__class__.__name__ + '()'
| {"/learningTolearn/method/metric/__init__.py": ["/learningTolearn/method/metric/prototype.py"], "/learningTolearn/train_protonet.py": ["/learningTolearn/dataset/__init__.py", "/learningTolearn/method/metric/__init__.py", "/learningTolearn/backbone/__init__.py"], "/learningTolearn/util/__init__.py": ["/learningTolearn/util/utils.py"], "/learningTolearn/method/__init__.py": ["/learningTolearn/method/optimization/__init__.py", "/learningTolearn/method/metric/__init__.py"], "/learningTolearn/method/optimization/meta_sgd.py": ["/learningTolearn/method/optimization/__init__.py"], "/learningTolearn/backbone/__init__.py": ["/learningTolearn/backbone/conv.py", "/learningTolearn/backbone/mlp.py"], "/learningTolearn/method/optimization/__init__.py": ["/learningTolearn/method/optimization/meta_sgd.py"]} |
76,400 | s0eger/working_title | refs/heads/master | /flask_app/app.py | from flask import Flask, request, render_template
import pandas as pd
import numpy as np
from words_to_vals import NMF_Time
from work_with_counts import Count_Worker
import json
import matplotlib.pyplot as plt
import pickle
from io import BytesIO
import mpld3
from mpld3 import plugins
app = Flask(__name__)
#TODO: grab images to replace template images
#TODO: center forms on index page
# Want to say that topic 0 is cw.trending_order[0]
@app.route('/')
def index():
return render_template('index.html', total_topics=TOPIC_LIST, tot_topics=TOPIC_ARTICLE)
@app.route('/topic_counts/')
def topic_counts():
c_topic = request.args.get('topic_select')
if c_topic == None:
c_topic = TOPIC_LIST[0]
cw.web_index = -1
topic_word = 'All'
list_index = -1
if type(c_topic) == int:
cw.web_index = cw.trending_order[c_topic]
c_topic = TOPIC_LIST[c_topic+1]
list_index = cw.web_index
else:
cw.web_index = TOPIC_LIST.index(c_topic) - 1
list_index = cw.web_index
if cw.web_index >= 0:
cw.web_index = cw.trending_order[cw.web_index]
topic_word = cw.topics[cw.web_index][0]
else:
topic_word = 'All'
return render_template('topic_counts.html', plot_title="Counts for Topic {1} ('{0}')".format(topic_word, cw.web_index), current_topic = list_index, total_topics = TOPIC_LIST)
@app.route('/word_cloud_list')
def word_cloud_words():
'''
Given a topic by its indece, generates a json list of tokens and their weights in that topic for jQCloud to work with. Possibly will add links given time
'''
word_list = [{}]
if cw.web_index >= 0:
#TODO: looking at better scaling the weights so that they look better
#TODO: way to define the url to this web app through flask attribute
word_list = [{ 'text': k, 'weight': (v**0.1), 'link' : 'http://0.0.0.0:8080/token_topics/?c_token={}'.format(k) } for k, v in cw.dc[cw.web_index].items()]
print(len(word_list))
return json.dumps(word_list)
@app.route('/word_plot.png')
def word_plot():
'''
Given a topic by its indece, generates a png of a plot containing the recent activity of that topic. Contains counts, smoothed counts, predicted counts/acceleration
With no topic, shows just the smoothed counts (including predicted) of the n most active topics
'''
#TODO: limit data to be in range of 'recent' news
#TODO: generate subplots for acceleration on one and counts on the other
if cw.web_index >= 0:
plt.figure(figsize=(12,5))
# topic_index = request.args.get('topic_index')
pb = 42 # One week period back
pa = 6 # one day ahead
counts_back = cw.smooth_data[cw.web_index]
counts_back = counts_back[-pb:]
back_times = cw.times[-pb:]
counts_fwd = cw.predicted_values[cw.web_index]
plt.plot(back_times, counts_back,'r',label='Current')
plt.plot(cw.predicted_times, counts_fwd,'r--',label='Predicted')
plt.plot(cw.trend_times, cw.trend_points[cw.web_index], 'g-.',alpha=0.3, linewidth=3.0, label='Trend')
plt.axvline(x=cw.predicted_times[0], c='k', ls=':',label='Today')
plt.legend()
plt.xlabel('Date')
plt.ylabel('Topic Article Counts')
plt.grid(True, alpha=0.6)
# plt.plot(cw.times, cw.topic_counts[cw.web_index], '--', alpha=0.6, label = 'Counts')
# plt.plot(cw.times, cw.smooth_data[cw.web_index], label = 'Counts Smoothed')
# plt.plot(cw.times, cw._s[cw.web_index], ':', label = 'S')
# plt.plot(cw.times, cw._b[cw.web_index], '-.', label = 'B')
# plt.plot(cw.times, cw.pos_accel[cw.web_index], label = 'Acceleration')
# plt.legend()
image = BytesIO()
plt.savefig(image)
return image.getvalue(), 200, {'Content-Type': 'image/png'}
else:
plt.figure(figsize=(12,5))
for i in range(10):
counts_back = cw.smooth_data[cw.web_index]
counts_back = counts_back[-pb:]
back_times = cw.times[-pb:]
counts_fwd = cw.predicted_values[cw.web_index]
plt.plot(back_times, counts_back, label='Current')
plt.plot(cw.predicted_times, counts_fwd,'--',label='Predicted')
plt.legend()
plt.xlabel('Date')
plt.ylabel('Topic Article Counts')
plt.grid(True, alpha=0.6)
image = BytesIO()
plt.savefig(image)
return image.getvalue(), 200, {'Content-Type': 'image/png'}
return ''' '''
@app.route('/token_topics/')
def related_topics():
''' This page will look at a selected token and show all the topics with that token and the significance of the token in that topic (in top n words). The topics will have links so that you can visit the word cloud plot page to see recent activity of that topic '''
c_token = request.args.get('c_token')
if c_token == None or type(c_token) != str:
return ''' Invalid token provided '''
c_token = c_token.lower()
t_list = [["Topic {}".format(key), 1 + np.argwhere(topic == c_token)[0][0], ', '.join(topic[:5])] for key, topic in cw.topics.items() if c_token in topic]
return render_template('topics_from_token.html', token=c_token, topic_list=t_list)
# return '''page under development'''
@app.route('/topic_articles/')
def topic_articles():
'''
Given a provided topic, shows a list of the articles that make up that topic
'''
c_topic = request.args.get('topic_select')
topic_index = cw.trending_order[TOPIC_ARTICLE.index(c_topic)]
topic_index = np.where(cw.all_dc==cw.dc[topic_index])[0][0]
columns = ['web_url','headline','pub_date']
if topic_index >= 0:
df_topic = df.iloc[cw.article_relates[topic_index],:]
df_topic = df_topic[columns]
headlines = df_topic.headline.values
hf = [h[h.find(':')+3:] for h in headlines]
hf = [h[:h.find("'")] for h in hf]
df_topic['headline']= hf
article_list = df_topic.to_dict(orient='records')
return render_template('topic_articles.html', columns = ['Website', 'Headline', 'Publication Date'], article_list = article_list, topic=c_topic)
else:
return ''' No Topic Selected '''
if __name__ == '__main__':
#TODO: grab 'current' articles (define current)
model = NMF_Time(load_model=True)
with open('app_model/output_data.pkl', 'rb') as od:
cw = pickle.load(od)
df = pd.read_csv('../temp_data1.csv', index_col=0)
df = df[df['news_source'] == 'NYT']
TOPIC_LIST = ['Topic {}'.format(i) for i in cw.trending_order]
TOPIC_ARTICLE = TOPIC_LIST.copy()
#cw.topics.shape[0])]
TOPIC_LIST.insert(0, 'All Topics')
cw.web_index = -1
app.run(host='0.0.0.0', port=8080, debug=True)
| {"/flask_app/words_to_vals.py": ["/flask_app/work_with_counts.py"]} |
76,401 | s0eger/working_title | refs/heads/master | /transfer_work.py | import pickle
import pandas as pd
import numpy as np
from flask_app.words_to_vals import NMF_Time, _tokenize
from flask_app.work_with_counts import Count_Worker
import datetime as dt
from sklearn.metrics.pairwise import pairwise_distances
import matplotlib.pyplot as plt
def generate_model_data_from_articles ():
""" Generates a model and saves it (currently used for testing on model, not for finalized model)
Parameters
----------
None
Returns
-------
None, but saves the model as a pickle file
"""
df = pd.read_csv('temp_data1.csv',index_col=0)
df = df[df['news_source'] == 'NYT']
testy = NMF_Time()
testy.generate_topics(df['content'].values, tok=_tokenize, min_df = 0.01, max_features = 10000, n_components=500)
testy.perform_time_counting_self(df, delta=dt.timedelta(hours=4), threshold=0.05)
testy.save_model()
def feature_variance_cutoff(plot_results=False):
""" Trains model with varying number of features for NMF and looks at reconstruction error, within topic similarity, and across topic similarity
Parameters
----------
plot_results: boolean to where or not you want to see plot of results
Returns
-------
None, but saves these values to 'nmf_trends.csv' for further access
"""
df = pd.read_csv('temp_data1.csv',index_col=0)
df = df[df['news_source'] == 'NYT']
testy = NMF_Time(verbose=False)
n_feats = np.linspace(10,1000,10).astype(int)
errors = []
in_err = []
out_err = []
print('Vectorizing')
testy._fit_vectorizer(df['content'].values, tok=_tokenize, min_df = 0.01, max_features = 10000)
print('Range of Features:')
print(n_feats)
for n in n_feats:
print('{} features'.format(n))
testy._fit_factorizer(n_components=n)
errors.append(testy.nmf.reconstruction_err_)
in_similiarity = 0
for i in range(testy.W.shape[1]):
topic_articles = testy.W[np.argmax(testy.W,axis=1)==i]
if len(topic_articles) > 1:
sim = pairwise_distances(topic_articles,metric='cosine')
ind = np.tril_indices(topic_articles.shape[0],k=-1)
in_similiarity += np.sum(sim[ind])
in_err.append(in_similiarity / testy.W.shape[0])
H = testy.nmf.components_
across_sim = pairwise_distances(H,metric='cosine')
ind = np.tril_indices(H.shape[0],k=-1)
out_err.append(np.mean(across_sim[ind]))
if plot_results:
plt.close('all')
plt.subplot(3,1,1)
plt.title('Reconstruction Errors')
plt.plot(n_feats,errors)
plt.subplot(3,1,2)
plt.title('Average In Topic Similarity')
plt.plot(n_feats,in_err)
plt.subplot(3,1,3)
plt.title('Average Across Topic Similarity')
plt.plot(n_feats,out_err)
plt.xlabel('Number of Features')
plt.show()
df = pd.DataFrame(columns=['n_feats','reconstruction_error','in_similarity','across_similiarity'])
df['n_feats']= n_feats
df['reconstruction_error']= errors
df['in_similarity'] = in_err
df['across_similiarity'] = out_err
df.to_csv('nmf_trends.csv')
if __name__ == '__main__':
# errors, in_err, out_err, n_feats = feature_variance_things()
# print(errors)
# testy = generate_model_data_from_articles()
# obj = pull_up_data()
| {"/flask_app/words_to_vals.py": ["/flask_app/work_with_counts.py"]} |
76,402 | s0eger/working_title | refs/heads/master | /comparing_errors.py | import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
''' Utilized code provided by https://grisha.org/blog/2016/01/29/triple-exponential-smoothing-forecasting/ to compare with my implementation of triple exp smoothing'''
def triple_exp_me(data, alpha=0.5, beta=0.5, gamma=0.5):
""" Given hyper-parameters and a number of periods ahead to predict, returns the RMSE
Utilizes triple exponential smoothing to produce predictions
Parameters
----------
data: data to smooth
alpha,beta,gamma: the specified values of these hyper-parameters for the triple exponential
Returns
-------
predicted trend across time
"""
s = data.copy()
b = np.zeros_like(s)
c = np.zeros_like(s)
L = 42 # weekly, sampling rate is 4 hours -> 7 days/week * 24 hours/day / 4 hours/sample = 42 samples/week
n_cycles = s.shape[0] // L
c_0 = np.zeros((s.shape[0],L))
avgs = [np.sum(s[:,i*L:(i+1)*L],axis=1)/L for i in range(n_cycles)]
for i in range(L):
b[:,0] += (s[:,i+L]-s[:,i])/(L*L)
c_0[:,i] = sum([s[:,L*j + i]-avgs[j] for j in range(n_cycles)])/n_cycles
c[:,0]=c_0[:,0]
for i in range(1, s.shape[0]):
if i < L:
s[:,i]=alpha*(data[:,i]-c_0[:,i])+(1-alpha)*(s[:,i-1] + b[:,i-1])
b[:,i]=beta*(s[:,i]-s[:,i-1])+(1-beta)*b[:,i-1]
c[:,i]=gamma*(data[:,i]-s[:,i])+(1-gamma)*c_0[:,i]
else:
s[:,i]=alpha*(data[:,i]-c[:,i-L])+(1-alpha)*(s[:,i-1] + b[:,i-1])
b[:,i]=beta*(s[:,i]-s[:,i-1])+(1-beta)*b[:,i-1]
c[:,i]=gamma*(data[:,i]-s[:,i])+(1-gamma)*c[:,i-L]
return c + s + b
''' The following three functions are provided by source above '''
def initial_seasonal_components(series, slen):
seasonals = {}
season_averages = []
n_seasons = int(len(series)/slen)
# compute season averages
for j in range(n_seasons):
season_averages.append(sum(series[slen*j:slen*j+slen])/float(slen))
# compute initial values
for i in range(slen):
sum_of_vals_over_avg = 0.0
for j in range(n_seasons):
sum_of_vals_over_avg += series[slen*j+i]-season_averages[j]
seasonals[i] = sum_of_vals_over_avg/n_seasons
return seasonals
def initial_trend(series, slen):
ssum = 0.0
for i in range(slen):
ssum += float(series[i+slen] - series[i]) / slen
return ssum / slen
def triple_exponential_smoothing(series, slen, alpha, beta, gamma, n_preds):
result = []
seasonals = initial_seasonal_components(series, slen)
for i in range(len(series)+n_preds):
if i == 0: # initial values
smooth = series[0]
trend = initial_trend(series, slen)
result.append(series[0])
continue
if i >= len(series): # we are forecasting
m = i - len(series) + 1
result.append((smooth + m*trend) + seasonals[i%slen])
else:
val = series[i]
last_smooth, smooth = smooth, alpha*(val-seasonals[i%slen]) + (1-alpha)*(smooth+trend)
trend = beta * (smooth-last_smooth) + (1-beta)*trend
seasonals[i%slen] = gamma*(val-smooth) + (1-gamma)*seasonals[i%slen]
result.append(smooth+trend+seasonals[i%slen])
return result
if __name__ == '__main__':
with open('flask_app/app_model/output_data.pkl','rb') as f:
cw = pickle.load(f)
other_vals = triple_exponential_smoothing(cw.smooth_data[5],42,0.5,0.5,0.5,0)
my_vals = triple_exp_me(cw.smooth_data)
plt.plot(cw.smooth_data[5],'b',label='counts')
plt.plot(other_vals,'y--',label='other')
plt.plot(my_vals[5],'g-.',label='mine')
plt.legend()
plt.show()
| {"/flask_app/words_to_vals.py": ["/flask_app/work_with_counts.py"]} |
76,403 | s0eger/working_title | refs/heads/master | /flask_app/words_to_vals.py | '''Used to take in a csv or other dataframe like file with the content of the articles under 'content' and then outputs another dataframe now only containing the tf_idf matrix
What I can customize:
- TfidfVectorizer
- tokenize function
- max_df and min_df
- max_features
- NMF
- init
‘random’: non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
‘nndsvd’: Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
‘nndsvda’: NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
‘nndsvdar’: NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa for when sparsity is not desired)
- n_components
- solver 'cd' or 'mu'
- alpha (regularization)
- l1_ratio
In Counting:
dt: time interval for grouping counts of articles by topic
threshold: minimum similarity between an article and a topic to count as being about that topic
'''
import pickle
import spacy
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
import boto3
# from post_to_s3 import get_client_bucket
import numpy as np
import pandas as pd
import datetime as dt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from string import punctuation
import matplotlib.pyplot as plt
import pyflux as pf
import os
if os.path.dirname(os.path.abspath('words_to_vals.py')) == '/home/peter/Documents/working_title/flask_app':
from work_with_counts import Count_Worker
else:
from flask_app.work_with_counts import Count_Worker
nlp = spacy.load('en')
spacy_tokenizer = English().Defaults.create_tokenizer(nlp)
punct = punctuation + '’' + '--' + '’s'
def _tokenize(doc):
'''
tokenizer function to use for the TfidfVectorizer class
Currently using spacy, can replace with nltk stuff as well for comparison
'''
# ITER_COUNT += 1
# print('Tokenizing ({0}/{1})'.format(ITER_COUNT, ITER_LENGTH), end="\r")
wList = [t.text if t.lemma_ == '-PRON-' else t.lemma_ for t in [token for token in spacy_tokenizer(doc) if token.is_alpha]]
return [token for token in wList if token not in punct and '@' not in token]
class NMF_Time():
"""docstring for NMF_Time."""
def __init__(self, top_n_words=10, load_model=False, verbose=False):
""" Initialization of class
Parameters
----------
top_n_words: the number of words you want to show per topic
load_model: whether or not to load the previously saved model under 'model/'
"""
print('Intializing Class')
self.top_n_words = top_n_words
self.t_vect = None
self.nmf = None
self.counts = None
self.total_counts = None
self.times = None
self.topics = None
self.topic_dc = None
self.verbose = verbose
if load_model:
self.load_model()
# Need to cap my dictionary or modify min_df
def generate_topics (self, content, tok, **kwargs):
""" Converts a list of str containing the article text into a feature matrix
Allows for ability to permutate the functional components to form comparisons
Parameters
----------
content: article contents as a list of str
tok: the tokenizer function to use within the vectorizer
kwargs
---------
max_df: In the vectorizer, the maximum allowable frequency of the words (default = 1.0)
min_df: In the vectorizer, the minimum allowable frequency of the words (default = 0.0)
max_features: The maximum number of words to use in the vectorized vocabulary (default = None)
n_components: Number of topics you want to work with (default = 10)
init: Method of performing the NMF (default = 'nndsvd')
solver: solver for the NMF algorithm (default = 'cd')
alpha: Reqularization of the algorithm (default = 0)
l1_ratio: ratio between l1 and l2 Reqularization (default = 0)
Returns
-------
W: np.array docs v topics
H: np.array topics v words
"""
if kwargs == None:
kwargs = dict()
self._fit_vectorizer(content=content,tok=tok,**kwargs)
self._fit_factorizer(**kwargs)
self._generate_topics()
def _fit_vectorizer (self, content, tok, **kwargs):
self.t_vect = TfidfVectorizer(stop_words = 'english', tokenizer = tok, max_df = kwargs.get('max_df', 1.0), min_df = kwargs.get('min_df', 0.0), max_features = kwargs.get('max_features', None))
self.t_mat = self.t_vect.fit_transform(content)
def _fit_factorizer (self, **kwargs):
self.nmf = NMF(n_components = kwargs.get('n_components', 10), init = kwargs.get('init', 'nndsvd'), solver = kwargs.get('solver', 'cd'), random_state = 2, alpha = kwargs.get('alpha', 0), l1_ratio = kwargs.get('l1_ratio', 0), shuffle = True, verbose = self.verbose)
self.W = self.nmf.fit_transform(self.t_mat)
def _generate_topics (self):
H = self.nmf.components_
vocab = { v: k for k, v in self.t_vect.vocabulary_.items()}
top_words = []
temp_dict = []
ordering = H.argsort(axis=1)[:,:-self.top_n_words-1:-1]
for i in range(H.shape[0]):
tdict = {vocab[ordering[i,j]] : H[i, ordering[i,j]] for j in range(self.top_n_words)}
temp_dict.append(tdict)
tp = [vocab[ordering[i,j]] for j in range(self.top_n_words)]
top_words.append(tp)
self.topics = np.array(top_words)
self.topic_dc = np.array(temp_dict)
def perform_time_counting_self (self, df, delta=dt.timedelta(days=1), threshold=0.1, start_time=None):
""" Takes in a dataframe of data, and returns across time the percentage of total articles that are part of topics
This assumes that df content is that the model was fitted on
Parameters
----------
df (pandas.dataframe) : DataFrame of articles containing article content and article publication date. Can be completely new data
dt (datetime.timedelta) : timespan for which to bin articles into (default = 3 days)
threshold : the value at which equal to or above an article is considered counted as of that topic (default = 0.1)
Returns
-------
topic_counts : counts of articles that are pertaining to a topic, across time
total_counts : total number of articles in that time period
time_periods : the periods of time relating to topic_counts
"""
df['pub_date'] = pd.to_datetime(df['pub_date'])
if start_time == None:
start_time = df['pub_date'].min()
end_time = start_time + delta
ending_point = df['pub_date'].max()
topic_counts = []
period_counts = []
time_periods = []
print("Starting time analysis")
while start_time <= ending_point:
sub_W = self.W[(df['pub_date'] < end_time) & (df['pub_date'] >= start_time),:]
topic_pick = np.sum(1*(sub_W >= threshold),axis=0)
topic_counts.append(topic_pick)
period_counts.append(sub_W.shape[0])
time_periods.append(start_time)
start_time = end_time
end_time = start_time + delta
self.counts = np.array(topic_counts)
self.total_counts = np.array(period_counts)
self.times = np.array(time_periods)
self.topic_threshold = threshold
print('Time Counts is Complete')
def comprehensive_time_count_self (self):
""" Generates plots to look at the counts of articles to topics based on thresholds.
Parameters
----------
None
Returns
-------
None
A figure containing plots of article counts against thresholds
- Plot of % of articles in corpus that meet the threshold of a topic
- Plot of average number of topics per article
- Plot of topics with atleast a critical number of related articles
"""
threshold = np.logspace(-2,-.7,100)
percent_counts = np.zeros(100)
average_topics = np.zeros(100)
n = [3,5,7,10]
valid_topics = np.zeros((100, len(n)))
for i, t in enumerate(threshold):
percent_counts[i] = np.sum(1*(np.max(self.W, axis=1) >= t))/self.W.shape[0]
average_topics[i] = np.sum(1*(self.W>=t))/self.W.shape[0]
for j, nn in enumerate(n):
valid_topics[i,j] = np.sum(1*(np.sum(1*(self.W >=t), axis=0)>=nn))/self.W.shape[1]
plt.subplot(2,2,1)
plt.title('Percentage of Articles with a Topic above Threshold')
plt.plot(threshold,percent_counts)
plt.xlabel('Threshold')
plt.ylabel('% of Articles')
plt.subplot(2,2,2)
plt.title('Average # of Topics per Article above Threshold')
plt.plot(threshold,average_topics)
plt.xlabel('Threshold')
plt.ylabel('Average # of Topics')
plt.subplot(2,1,2)
plt.title('Percentage of Topics with # of related Articles >= n')
for i in range(len(n)):
plt.plot(threshold, valid_topics[:,i],label='n={}'.format(n[i]))
plt.legend()
plt.xlabel('Threshold')
plt.ylabel('% of Topics with Articles')
plt.show()
def perform_time_counting_new (self, df, delta=dt.timedelta(days=1), threshold=0.1, prior_times=False):
""" Takes in a dataframe of data, and returns across time the percentage of total articles that are part of topics
Parameters
----------
df (pandas.dataframe) : DataFrame of articles containing article content and article publication date. Can be completely new data
dt (datetime.timedelta) : timespan for which to bin articles into (default = 3 days)
threshold : the value at which equal to or above an article is considered counted as of that topic (default = 0.1)
Returns
-------
None, but assigns to self:
topic_counts : counts of articles that are pertaining to a topic, across time
total_counts : total number of articles in that time period
time_periods : the periods of time relating to topic_counts
"""
if 'content' not in df.columns or 'pub_date' not in df.columns:
print('Provided dataframe of Invalid type')
return
elif self.t_vect == None or self.nmf == None:
content = df['content'].values
generate_topics(content)
t_mat = self.t_vect.transform(df['content'].values)
self.W = self.nmf.transform(t_mat)
start_time = None
if prior_times:
delta = self.times[1] - self.times[0]
start_time = self.times[-1]
self.perform_time_counting_self(df, delta, threshold, start_time)
# df['pub_date'] = pd.to_datetime(df['pub_date'])
# start_time = df['pub_date'].min()
# end_time = start_time + delta
# ending_point = df['pub_date'].max()
# topic_counts = []
# period_counts = []
# time_periods = []
# print("Starting time analysis")
# # May instead utilize spacy similarity to determine similarity between article and topics
# while start_time <= ending_point:
# print('Time period left (days): {}'.format((ending_point-start_time).days))
# df_dt = df[(df['pub_date'] < end_time) & (df['pub_date'] >= start_time)]
# dt_content = df_dt['content'].values
# topic_vals = self.nmf.transform(self.t_vect.transform(dt_content))
# topic_pick = np.sum(1*(topic_vals >= threshold),axis=0)
# topic_counts.append(topic_pick)
# period_counts.append(dt_content.shape[0])
# time_periods.append(start_time)
# start_time = end_time
# end_time = start_time + delta
# self.counts = np.array(topic_counts)
# self.total_counts = period_counts
# self.times = np.array(time_periods)
# print('Time Counts is Complete')
def save_model(self):
""" Pickle dumps the object into relevant files under directory 'model/'. Requires fitting of model and time counting to be done.
Parameters
----------
None
Returns
-------
None
"""
cw = Count_Worker(self)
cw.setup_work()
with open('app_model/output_data.pkl', 'wb') as od:
pickle.dump(cw, od)
with open('app_model/vectorizer.pkl', 'wb') as vc:
pickle.dump(self.t_vect, vc)
with open('app_model/factorization.pkl', 'wb') as fc:
pickle.dump(self.nmf, fc)
def load_model(self):
""" Pickle loads this class from relevant files under directory 'model/'.
Parameters
----------
None
Returns
-------
None
"""
with open('app_model/output_data.pkl', 'rb') as od:
cw = pickle.load(od)
with open('app_model/vectorizer.pkl', 'rb') as vc:
self.t_vect = pickle.load(vc)
with open('app_model/factorization.pkl', 'rb') as fc:
self.nmf = pickle.load(fc)
self.topics = cw.all_topics
self.counts = cw.all_counts.T
self.total_counts = cw.total_counts
self.topic_dc = cw.all_dc
self.times = cw.times
self.W = cw.W
self.top_n_words = len(cw.dc[0].keys())
self.topic_threshold = cw.topic_threshold
| {"/flask_app/words_to_vals.py": ["/flask_app/work_with_counts.py"]} |
76,404 | s0eger/working_title | refs/heads/master | /scrape_web.py | import os
import requests
import bs4
import json
import urllib
import datetime as dt
import pandas as pd
import time
# Query the NYT API once
def single_query(link, payload):
response = requests.get(link, params=payload)
if response.status_code != 200:
print ('WARNING', response.status_code)
else:
return response.json()
# Scrape the meta data (link to article and put it into Mongo)
def nyt_scrape_meta(scrape_date = dt.date.today()):
""" Grabs a query containing NYT articles from that date and stores those in the specified location
Parameters
----------
scrape_date: specified date to request data on
Returns
-------
JSON object from a single query for the specified data containing urls, headlines, and timestamps
"""
nyt_key = os.environ['NYT_API_KEY']
# The basic parameters for the NYT API
link = 'http://api.nytimes.com/svc/search/v2/articlesearch.json'
payload = {'api-key': nyt_key }
today = scrape_date
payload['end_date'] = str(today).replace('-','')
yesterday = today - dt.timedelta(days=1)
payload['begin_date'] = str(yesterday).replace('-','')
print ('Scraping period: %s - %s ' % (str(yesterday), str(today)))
content = single_query(link, payload)
df = nyt_table_setup(content)
df_tot = pd.read_csv('temp_data1.csv',index_col=0)
temp_cols = ['_id', 'content', 'headline', 'news_source', 'pub_date', 'section_name', 'web_url', 'word_count']
df_tot = df_tot[temp_cols]
df_tot = df_tot.append(df[temp_cols])
for col in df_tot.columns:
if col not in temp_cols:
print('dropping', col)
df_tot.drop(col, inplace=True,axis=1)
df_tot.dropna(subset=['content'],axis=0, inplace=True)
# df_tot.drop_duplicates(subset=['headline'], inplace=True) WTF???????
df_tot.to_csv('temp_data1.csv')
def nyt_scrape_meta_continuous(days=1, end_date = dt.date.today()):
""" Updates a dataframe containing information on all articles.
Will save the updated csv file with the scraped data
Is scraping NYT
Parameters
----------
days: how many days backwards from that date your want to go
end_date: specified date to request data on
Returns
-------
None
"""
nyt_key = os.environ['NYT_API_KEY']
link = 'http://api.nytimes.com/svc/search/v2/articlesearch.json'
payload = {'api-key': nyt_key }
# TODO: replace with actual source
df_tot = pd.read_csv('temp_data1.csv',index_col=0)
temp_cols = ['_id', 'content', 'headline', 'news_source', 'pub_date', 'section_name', 'web_url', 'word_count']
df_tot = df_tot[temp_cols]
today = end_date
for day in range(days):
payload['end_date'] = str(today).replace('-','')
yesterday = today - dt.timedelta(days=1)
payload['begin_date'] = str(yesterday).replace('-','')
print ('Scraping period: %s - %s ' % (str(yesterday), str(today)))
today -= dt.timedelta(days=1)
content = single_query(link, payload)
df = nyt_table_setup(content)
'''Have issue with appending where it creates an extra column. This removes that column '''
df_tot = df_tot.append(df[temp_cols])
# May not need
for col in df_tot.columns:
if col not in temp_cols:
print('dropping', col)
df_tot.drop(col, inplace=True,axis=1)
''' Sleeps so that NYT doesn't consider the requests spam (429 error)'''
time.sleep(5)
''' Cleans out duplicates and any rows where there is no content in the article (e.g. Visualization articles with just images) '''
df_tot.dropna(subset=['content'],axis=0, inplace=True)
# df_tot.drop_duplicates(subset=['headline'], inplace=True) WTF???????
df_tot.to_csv('temp_data1.csv')
# For NYT
def nyt_table_setup (content):
""" For NYT, converts from a NYT API requests object lacking the article text, gathers that text and puts all info into a new dataframe. Lastly will return that dataframe
Parameters
----------
content: NYT API article requests JSON object, a standard response to a request under article search
Returns
-------
df: a dataframe containing article headlines, urls, content, source (NYT), section name, and publication date
"""
if content == None:
return None
df = pd.DataFrame(content['response']['docs'])
content_list = []
for d in content['response']['docs']:
d_id = d['_id']
link = d['web_url']
r = requests.get(link)
html = r.content
soup = bs4.BeautifulSoup(html, 'html.parser')
print(r.status_code, link)
article_content = ' '.join([i.text for i in soup.select('p.story-body-text')])
content_list.append(article_content)
df['content'] = content_list
df['news_source'] = 'NYT'
return df
# Do batch time saves
def big_batch_nyt(month, year):
""" Generates the NYT articles from a given month and year and saves them to disk
Parameters
----------
month: the numerical version of the month to grab articles from
year: the year to grab articles from
Returns
-------
df_tot: the dataframe now containing all articles from requested time period
"""
nyt_key = os.environ['NYT_API_KEY']
link = 'http://api.nytimes.com/svc/archive/v1/'
link = link + str(year) + '/' + str(month) + '.json'
payload = {'api-key': nyt_key }
content = single_query(link, payload)
df_tot = pd.read_csv('temp_data2.csv',index_col=0)
temp_cols = ['_id', 'content', 'headline', 'news_source', 'pub_date', 'section_name', 'web_url', 'word_count']
df_tot = df_tot[temp_cols]
save_rate = 25
for i in range(len(content['response']['docs']) // save_rate):
df_tot = nyt_batch_save(content['response']['docs'][i * save_rate : (i+1) * save_rate], df_tot)
return nyt_batch_save(content['response']['docs'][-(len(content['response']['docs']) % save_rate):], df_tot)
def nyt_batch_save (jsony, df_tot):
""" Given a batch from a json response from a NYT API request done in big_batch_nyt, generates the content and writes it to disk
Parameters
----------
jsony: the batch of json objects of NYT articles provided by big_batch_nyt
df_tot: the dataframe that stores the article info
Returns
-------
df_tot: the dataframe now updated with the information from the NYT articles in jsony
"""
temp_cols = ['_id', 'content', 'headline', 'news_source', 'pub_date', 'section_name', 'web_url', 'word_count']
''' Looks to remove duplicate articles based on '_id' '''
jsony = [d for d in jsony if d['_id'] not in df_tot['_id'].values]
if len(jsony)==0:
return df_tot
df = pd.DataFrame(jsony)
for t in temp_cols:
if t not in df.columns:
df[t] = None
df['news_source'] = 'NYT'
content_list = []
for d in jsony:
r = requests.get(d['web_url'])
print(r.status_code, d['web_url'])
soup = bs4.BeautifulSoup(r.content, 'html.parser')
article_content = ' '.join([i.text for i in soup.select('p.story-body-text')])
content_list.append(article_content)
df['content'] = content_list
df_tot = df_tot.append(df[temp_cols])
print('saving')
df_tot.to_csv('temp_data2.csv')
return df_tot
def news_thingy_scrape_meta(source, sortBy):
""" Similar to nyt_scrape_meta, but now is using NEWS API as a requests source. Returns the response for a single request given a news source and a sorting method
Parameters
----------
source: news source to request. Visit https://newsapi.org/sources for names
sortBy: specifed sorting method. Valid types are 'top', 'latest', or 'popular'
Returns
-------
The response to the specified single query. See https://newsapi.org/#documentation for more information
"""
n_key = os.environ['NEWS_API_KEY']
link = 'https://newsapi.org/v1/articles'
payload = { 'apiKey' : n_key, 'source' : source, 'sortBy' : sortBy }
return single_query(link, payload)
# works for washington post
def news_thingy_table_setup(content, source):
""" Similar to nyt_table_setup, converts from a News API requests object lacking the article text, gathers that text and puts all info into a new dataframe. Lastly will return that dataframe
Parameters
----------
content: NYT API article requests JSON object, a standard response to a request under article search
source: news source to request. Visit https://newsapi.org/sources for names
Returns
-------
df: a dataframe containing article headlines, urls, content, source, section name, and publication date
"""
''' Given a web source, picks the method that will pull from a BeautifulSoup object the article content '''
if source == 'the-washington-post':
soup_to_text = wa_post_text
elif source == 'bbc-news':
soup_to_text = bbc_text
elif source == 'cnn':
soup_to_text = cnn_text
elif source == 'breitbart-news':
soup_to_text = brtbr_text
else:
print('Could not handle source')
return
df = pd.DataFrame(content['articles'])
df.drop(['author', 'description', 'urlToImage'], axis = 1, inplace = True)
content_list = []
topic_list = []
for d in content['articles']:
link = d['url']
r = requests.get(link)
print(r.status_code, link)
html = r.content
soup = bs4.BeautifulSoup(html, 'html.parser')
soup.a.decompose()
text, section = soup_to_text(soup, link)
content_list.append(text)
topic_list.append(section)
df.columns = ['pub_date','headline','web_url']
df['content'] = content_list
df['section_name'] = topic_list
df['news_source'] = source
df = df[df['content'] != '']
return df
''' The following methods take in a BeautifulSoup object and the link it is from and returns the text of that article and it's section name. The difference between the methods are the differences between web sites (news source) '''
def wa_post_text (soup, link):
""" Digs through the BeautifulSoup object to extract the article text. In addition, extracts the section name if available
Parameters
----------
soup: BeautifulSoup object for the desired article
link: the website url used to generate 'soup'
Returns
-------
text: article text extracted from the BeautifulSoup object
topic: the section name that this article falls under
"""
tempy = [t.text for t in soup.findAll('p') if '<p ' not in t]
while len(tempy) > 0 and 'email address' not in tempy[0]:
tempy = tempy[1:]
tempy = tempy[1:]
topic = link[link.find('.com/')+5:]
return ' '.join(tempy), topic[:topic.find('/')]
def bbc_text (soup, link):
""" Digs through the BeautifulSoup object to extract the article text. In addition, extracts the section name if available
Parameters
----------
soup: BeautifulSoup object for the desired article
link: the website url used to generate 'soup'
Returns
-------
text: article text extracted from the BeautifulSoup object
topic: the section name that this article falls under
"""
tempy = [t.text for t in soup.findAll('p') if '<p ' not in t]
while len(tempy) > 0 and 'external links' not in tempy[0]:
tempy = tempy[1:]
tempy=tempy[1:]
if len(tempy) > 0 and 'newsletter' in tempy[-1]:
tempy = tempy[:-1]
topic = link[link.find('.co.uk/')+7:]
return ' '.join(tempy), topic[:topic.find('/')]
def cnn_text (soup, link):
""" Digs through the BeautifulSoup object to extract the article text. In addition, extracts the section name if available
Parameters
----------
soup: BeautifulSoup object for the desired article
link: the website url used to generate 'soup'
Returns
-------
text: article text extracted from the BeautifulSoup object
topic: the section name that this article falls under
"""
tempy = [t.text for t in soup.findAll("div", {"class" : ["zn-body__paragraph speakable", "zn-body__paragraph"]})]
topic = link[link.find('.com/')+16:]
return ' '.join(tempy), topic[:topic.find('/')]
def brtbr_text (soup, link):
""" Digs through the BeautifulSoup object to extract the article text. In addition, extracts the section name if available
Parameters
----------
soup: BeautifulSoup object for the desired article
link: the website url used to generate 'soup'
Returns
-------
text: article text extracted from the BeautifulSoup object
topic: the section name that this article falls under
"""
tempy = [t.text for t in soup.select('p')]
tempy = tempy[1:-5]
topic = link[link.find('.com/')+5:]
return ' '.join(tempy), topic[:topic.find('/')]
def tot_newsy (sources):
""" Updates the dataframe with articles from each sorting order from all in sources
All sources have top, but may not have latest or popular
Will print out following for when that sorting order is not available from that news service:
WARNING 400
{Sorting Order} not available for {News Source}
Parameters
----------
sources: List of news sources. Visit https://newsapi.org/sources for names.
Returns
-------
None
"""
df_tot = pd.read_csv('temp_data1.csv', index_col=0)
temp_cols = ['_id', 'content', 'headline', 'news_source', 'pub_date', 'section_name', 'web_url', 'word_count']
df_tot = df_tot[temp_cols]
orders = ['top', 'latest', 'popular']
for o in orders:
for s in sources:
time.sleep(5)
content = news_thingy_scrape_meta(s, o)
if content == None:
print('{0} not available for {1}'.format(o,s))
else:
df = news_thingy_table_setup(content, s)
df_tot = df_tot.append(df)
# May not need
for col in df_tot.columns:
if col not in temp_cols:
print('dropping', col)
df_tot.drop(col, inplace=True, axis=1)
df_tot.dropna(subset=['content'],axis=0, inplace=True)
# df_tot.drop_duplicates(subset=['headline'], inplace=True) WTF???????
df_tot.to_csv('temp_data1.csv')
def fox_news_web_scrape(start_date = dt.date(2017,8,1),end_date = dt.date.today()):
""" Utilizies Fox News Advance Article Search to grab all articles across a period
Parameters
----------
start_date: beginning date of collection period
end_date: end date of collection period
Returns
-------
df: pandas dataframe for the collected articles (no content yet)
"""
link = 'http://www.foxnews.com/search-results/search?q=a&ss=fn&min_date={0}&max_date={1}&start=0'.format(start_date, end_date)
r = requests.get(link)
html = r.content
soup = bs4.BeautifulSoup(html, 'html.parser')
# objs = soup.findAll("div", {"class" : ["search-info responsive-image"])
# objs1 = soup.findAll('a.ng-binding')
links = []
titles = []
sections = []
dates = []
# http://www.foxnews.com/us/2017/11/15/texas-sheriff-concerned-about-truck-with-anti-trump-message.html
for obj in soup.findAll('a.ng-binding'):
t = obj.get('href')
links.append(t)
l = len(t) - t[::-1].find('/')
f = t[23:l-1]
e = t[l:-5]
titles.append(e.replace('-',' '))
sections.append(f[:f.find('/')])
dates.append(f[f.find('/')+1:-1].replace('/','-'))
temp_cols = ['_id', 'content', 'headline', 'news_source', 'pub_date', 'section_name', 'web_url', 'word_count']
df = pd.DataFrame(columns=temp_cols)
df['headline']=titles
df['web_url']=links
df['news_source']='Fox News'
df['pub_date'] = dates
df['section_name']=sections
return df
def fox_news_to_table(links,titles,sections):
""" Given work done by fox_news_web_scrape, grabs the article content for those articles and adds that information to the current storage location
Parametersthree
----------
links: website urls for articles
titles: the headlines of those articles
sections: the news section of those articles
Returns
-------
None
"""
save_rate = 100
df_tot = pd.read_csv('temp_data1.csv',index_col=0)
temp_cols = ['_id', 'content', 'headline', 'news_source', 'pub_date', 'section_name', 'web_url', 'word_count']
df_tot = df_tot[temp_cols]
i = 0
while (i+1)*save_rate < len(links):
df = pd.DataFrame(columns=temp_cols)
content, dates = [fxc(link) for link in links[i*save_rate:(i+1)*save_rate]]
df['content'] = content
df['pub_date'] = dates
df['headline'] = titles[i*save_rate:(i+1)*save_rate]
df['section_name'] = sections[i*save_rate:(i+1)*save_rate]
df_tot.append(df,inplace=True)
df_tot.to_csv('temp_data1.csv')
i += 1
'''Have issue with appending where it creates an extra column. This removes that column '''
df_tot = df_tot.append(df[temp_cols])
def clean_up_df(df):
''' Given a dataframe of articles, drops articles with no contents, drops duplcate articles, and converts pub_date to a datetime object '''
df.dropna(subset=['content'], axis=0, inplace=True)
df.drop_duplicates(subset='headline', inplace=True)
df['pub_date'] = pd.to_datetime(df['pub_date'])
df.to_csv('temp_data1.csv')
if __name__ == '__main__':
sources = ['the-washington-post','bbc-news','cnn','breitbart-news']
# The various general things you can run (Pick 1)
big_batch_nyt(12, 2017)
# nyt_scrape_meta_continuous(days=16, end_date=dt.datetime(2017, 7, 25)) r = requests.get(link)
# html = r.content
# soup = bs4.BeautifulSoup(html, 'html.parser')
# soup.a.decompose()
# tot_newsy(sources)
# nyt_scrape_meta() # Good for getting today's nyt news
| {"/flask_app/words_to_vals.py": ["/flask_app/work_with_counts.py"]} |
76,405 | s0eger/working_title | refs/heads/master | /post_to_s3.py | # Takes in scraped information and sends it to an s3 bucket
import boto3
import pandas as pd
import os
def get_client_bucket():
''' Returns a boto3 resource object and my bucket for where my data is stored '''
ACCESS_KEY = os.environ['AWS_ACCESS_KEY_ID']
SECRET_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
client = boto3.resource('s3',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
)
bucket = client.Bucket('peterrussodsiproj')
return client, bucket
def create_new(start_df):
''' Given a fresh dataframe object, creates a csv locally and then saves that csv to bucket '''
boto3_connection, bucket = get_client_bucket()
start_df.to_csv('temp_data.csv')
bucket.Object('data.csv').put('temp_data.csv')
def grab_df():
''' Grabs the csv from the bucket and returns it as a pandas dataframe '''
boto3_connection, bucket = get_client_bucket()
obj = bucket.Object('temp_data1.csv')
return pd.read_csv(obj)
def Add_New(adding_df):
''' Takes additional data and appends it to the csv file on the bucket '''
boto3_connection, bucket = get_client_bucket()
obj = bucket.Object('data.csv')
df = pd.read_csv(obj)
temp_cols = df.columns.copy()
df = df.append(adding_df)
for col in df.columns:
if col not in temp_cols:
df.drop(col, inplace=True, axis=1)
df.to_csv('temp_data.csv')
new_bucket.Object('data.csv').put('temp_data.csv')
if __name__ == '__main__':
df = grab_df()
| {"/flask_app/words_to_vals.py": ["/flask_app/work_with_counts.py"]} |
76,406 | s0eger/working_title | refs/heads/master | /flask_app/find_best_hyperparams.py | from words_to_vals import NMF_Time, _tokenize
from work_with_counts import Count_Worker
import pickle
from scipy.optimize import minimize, differential_evolution
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
def minimize_error(weights, *args):
""" Minimization function, the y values are stored in the main block
Parameters
----------
weights: a tuple of current alpha, beta, gamma values
Returns
-------
error: Sum of squared errors value looking to be minimized
"""
Y = args[0]
periods_ahead = 6
m = 7
alpha = weights[0]
beta = weights[1]
gamma = weights[2]
s = Y.copy()
b = np.zeros_like(s)
c = np.zeros_like(s)
L = 42 # weekly, sampling rate is 4 hours -> 7 days/week * 24 hours/day / 4 hours/sample = 42 samples/week
n_cycles = s.shape[1] // L
c_0 = np.zeros((s.shape[0],L))
avgs = [np.sum(s[:,i*L:(i+1)*L],axis=1)/L for i in range(n_cycles)]
for i in range(L):
b[:,0] += (s[:,i+L]-s[:,i])/(L*L)
c_0[:,i] = sum([s[:,L*j + i]-avgs[j] for j in range(n_cycles)])/n_cycles
c[:,0]=c_0[:,0]
for i in range(1, s.shape[0]):
if i < L:
s[:,i]=alpha*(Y[:,i]-c_0[:,i])+(1-alpha)*(s[:,i-1] + b[:,i-1])
b[:,i]=beta*(s[:,i]-s[:,i-1])+(1-beta)*b[:,i-1]
c[:,i]=gamma*(Y[:,i]-s[:,i])+(1-gamma)*c_0[:,i]
else:
s[:,i]=alpha*(Y[:,i]-c[:,i-L])+(1-alpha)*(s[:,i-1] + b[:,i-1])
b[:,i]=beta*(s[:,i]-s[:,i-1])+(1-beta)*b[:,i-1]
c[:,i]=gamma*(Y[:,i]-s[:,i])+(1-gamma)*c[:,i-L]
error = 0
# for i in range(s.shape[0]): # For each topic
for j in range(s.shape[1]-periods_ahead): #for all times that can be predicted ahead
error += np.sum(Y[:,j+m-1]-(s[:,j]+m*b[:,j]+c[:,(j+m)%L]))**2
return error
def minimize_start():
""" Utilizes scipy optimization to mimimize the Holt-Winter hyper-parameters
Parameters
----------
None
Returns
-------
The response object of the scipy optimization
"""
with open('app_model/output_data.pkl','rb') as f:
cw = pickle.load(f)
w0 = np.array([0.2,0.2,0.2])
tsum = np.sum(cw.topic_counts, axis=1)
sum_sort = np.argsort(tsum)
Y = cw.smooth_data[sum_sort[:50],:]
r = (0.0,1.0)
# return minimize(minimize_error, w0, args = (Y), bounds=[r,r,r])
return differential_evolution(minimize_error, args = (Y,), bounds=[r,r,r])
def generate_model(data_location, save_model=True):
""" Generates a model for the flask app to utilize as data source
Parameters
----------
data_location: the relative location of the data to generate the model
save_model: if you want to save this generated model for the flask app to use
Returns
-------
nmf_model: the generated model
"""
nmf_model = NMF_Time(top_n_words=25, verbose=True)
df = pd.read_csv(data_location, index_col=0)
df = df[df['news_source'] == 'NYT'] # Currently due to not enough from other sources
nmf_model.generate_topics(df['content'].values, tok=_tokenize, min_df = 0.005, max_features = 10000, n_components=500)
nmf_model.perform_time_counting_self(df, delta=dt.timedelta(hours=4), threshold=0.05)
if save_model:
nmf_model.save_model()
return nmf_model
def load_prior_model():
''' Loads and returns the currently saved pickled model found under '/app_model' '''
return NMF_Time(load_model=True)
def valid_error(new_df):
model = NMF_Time(load_model=True)
new_df['pub_date'] = pd.to_datetime(new_df['pub_date'])
new_df = new_df[new_df['pub_date'] > model.times[-1]]
model.perform_time_counting_new(new_df,threshold=0.05, prior_times=True)
cw = Count_Worker(model)
with open('app_model/output_data.pkl','rb') as f:
pcw = pickle.load(f)
mask = np.sum(pcw.all_counts,axis=1) >= 3
cw.topic_counts = cw.all_counts[mask,:]
cw.W_a = cw.W[:,mask]
# cw.topics = cw.all_topics[mask,:]
# cw.topics = {i : cw.topics[i,:] for i in range(pcw.topics.shape[0])}
cw.dc = cw.all_dc[mask]
cw.data_smoothing()
top_topics = pcw.trending_order[:25]
# top_topics = np.argsort(np.sum(pcw.topic_counts,axis=1))[:-26:-1]
test_counts = cw.topic_counts[top_topics,:]
pcw.predict_all(periods_ahead=cw.times.shape[0])
test_predicted = pcw.predicted_values[top_topics,:]
error = (np.sum((test_counts - test_predicted[:,1:])**2,axis=0)/cw.topic_counts.shape[0])**0.5
base_counts = np.zeros_like(cw.topic_counts)
for i in range(base_counts.shape[1]):
base_counts[:,i]= pcw.topic_counts[:,-1]
base_counts = base_counts[top_topics,:]
base_error = (np.sum((test_counts - base_counts)**2,axis=0)/cw.topic_counts.shape[0])**0.5
return error, base_error, pcw.predicted_times[1:]
def show_example_trend(topic_index = 1):
with open('app_model/output_data.pkl','rb') as f:
cw = pickle.load(f)
p_vals = np.zeros(cw.times.shape[0])
test_vals = cw.smooth_data[topic_index,:]
L = 6
# starting points are i*6
# for i in range (1, p_vals.shape[0]):
# p_vals[i] = cw.triple_exp_predict(topic_index,periods_ahead= 1 + ((i-1) % L), at_time=6*((i-1) // L))[0]
for i in range (1, p_vals.shape[0]):
p_vals[i] = cw.triple_exp_predict(topic_index,periods_ahead = 1, at_time= i - 1)[0]
p_vals = np.clip(p_vals,a_min=0,a_max=None)
plt.plot(cw.times,test_vals,'b',linewidth=5,alpha=0.5, label='Actual')
plt.plot(cw.times, p_vals,c='k',linewidth=2,ls='--',label='Predicted')
# plt.ylabel('Article Counts', fontsize=18)
# plt.xlabel('Date (Year-Month)',fontsize=18)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend()
plt.show()
print(cw.all_dc[topic_index])
if __name__ == '__main__':
# obj = generate_model('../article_data.csv',save_model=True)
# result = minimize_start()
df = pd.read_csv('../temp_data2.csv',index_col=0)
err, b_err, times = valid_error(df)
# show_example_trend()
| {"/flask_app/words_to_vals.py": ["/flask_app/work_with_counts.py"]} |
76,407 | s0eger/working_title | refs/heads/master | /flask_app/work_with_counts.py | import numpy as np
import pyflux as pf
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import pandas as pd
from itertools import product
class Count_Worker(object):
"""docstring for Count_Worker."""
def __init__(self, obj):
self.all_counts = obj.counts.T
self.topic_counts = self.all_counts.copy()
self.total_counts = obj.total_counts
self.all_topics = obj.topics
self.topics = self.all_topics.copy()
self.all_dc = obj.topic_dc
self.dc = self.all_dc.copy()
self.times = obj.times
self.W = obj.W
self.H = obj.nmf.components_
self.web_index = None
self.topic_threshold = obj.topic_threshold
def setup_work(self):
self.drop_useless_topics()
self.create_article_topic_relation()
self.calc_accel()
self.double_exp_smoothing()
self.data_smoothing()
self.predict_all()
self.find_trending_topics()
def drop_useless_topics (self, useful_count = 3):
tsum = np.sum(self.all_counts,axis=1)
self.topic_counts = self.all_counts[tsum >= useful_count,:]
self.W_a = self.W[:,tsum >= useful_count]
self.topics = self.all_topics[tsum >= useful_count,:]
self.topics = {i : self.topics[i,:] for i in range(self.topics.shape[0])}
self.dc = self.all_dc[tsum >= useful_count]
def create_article_topic_relation(self, useful_count = 3):
""" Creates a dictionary where the keys are each topic and the values is a list of articles by their indece that relate to that topic
Parameters
----------
threshold: the threshold utilized to cut off the similarity between article and topic
Returns
-------
None
Assigns self.article_relates as the dictionary between topics and articles
"""
article_relates = dict()
mask = np.sum(self.all_counts,axis=1)>=useful_count
for i in range(self.W.shape[1]):
if mask[i]:
article_relates[i] = np.argwhere(self.W[:,i] >= self.topic_threshold)[:,0]
self.article_relates = article_relates
def calc_accel(self):
""" Using the calculated counts of articles in the topics, finds the velocity and acceleration of those counts across all topics
Assigns the velocity to self._vel
Assigns the acceleration to self._accel
Returns
-------
None
"""
# if type(self.topic_counts) != np.ndarray or type(self.times) != np.ndarray:
# print("Requires 'perform_time_counting' to be done first")
# return
rolling_means = np.zeros_like(self.topic_counts)
N = 5
for i in range(self.topic_counts.shape[0]):
rolling_means[i] = np.convolve(self.topic_counts[i], np.ones((N,))/N, mode='same')
vel = np.zeros_like(rolling_means)
accel = np.zeros_like(rolling_means)
for i in range(self.topic_counts.shape[0]):
vel[i] = np.convolve(rolling_means[i], np.array([1,0,-1]),mode='same')
accel[i] = np.convolve(rolling_means[i], np.array([1,-2,1]),mode='same')
self._vel = vel
self._accel = accel
self.pos_accel = accel*(vel > 0)*(accel > 0)
def plot_topics_across_time(self, top_n_topics=None):
""" Plots the counts of the desired topics across time
Parameters
----------
top_n_topics: None shows all topics, type(int) returns the top topics of that number, type(list/numpy.array) returns those topics if they exist
Returns
-------
A plot of those topics in top_n_topics
"""
if type(self.topic_counts) != np.ndarray or type(self.times) != np.ndarray:
print("Requires 'perform_time_counting' to be done first")
return
plt.close('all')
if type(top_n_topics) == int:
if top_n_topics > self.topic_counts.shape[0]:
top_n_topics = self.topic_counts.shape[0]
for i in range(top_n_topics):
plt.plot(self.times, self.topic_counts[i], label=i)
elif type(top_n_topics) == np.array or type(top_n_topics) == list:
for t in top_n_topics:
if t in range(self.topic_counts.shape[0]):
plt.plot(self.times, self.topic_counts[t], label=t)
else:
for i in range(self.topic_counts.shape[0]):
plt.plot(self.times, self.topic_counts[i], label=i)
plt.legend()
plt.show()
def plot_count_accel(self, topic_index=5):
""" Given a selected topic, plots the counts, averaged counts, and acceleration of that topic
Parameters
----------
topic_index: The index of the desired topic to see its counts and acceleration
Returns
-------
A plot of the topic at topic_index containing acceleration and counts
"""
N = 5
rolling_means = np.convolve(self.topic_counts[topic_index], np.ones((N,))/N, mode='same')
plt.close('all')
plt.plot(self.times, self.topic_counts[topic_index], '--', alpha=0.6, label = 'Counts')
plt.plot(self.times, rolling_means, label = 'Counts Smoothed')
plt.plot(self.times, self._s[topic_index], ':', label = 'S')
plt.plot(self.times, self._b[topic_index], '-.', label = 'B')
plt.plot(self.times, self.pos_accel[topic_index], label = 'Acceleration')
plt.legend()
plt.show()
# TODO: look at statsmodels to see what they can offer http://www.statsmodels.org/stable/vector_ar.html#module-statsmodels.tsa.vector_ar
# http://www.statsmodels.org/stable/vector_ar.html#module-statsmodels.tsa.vector_ar.var_model
# TODO: look at the capabilities of http://www.pyflux.com/
#TODO: cut counts data off early a couple time periods and use this to go back in and predict those values
# Vary alpha and beta to see if there are optimal values for predictions
def double_exp_smoothing(self, alpha=0.5, beta=0.5):
""" Applies double exponential smoothing to the counts of articles for topics
Parameters
----------
None, requires counts to be created before this step
Returns
-------
None, assigns self._s and self._b which can be used for predictions
"""
s = self.topic_counts.copy()
b = s.copy()
b[0,:] = (s[5,:]-s[0,:])/5
b[1,:] -= s[0,:]
for i in range(2, s.shape[1]):
s[:,i] = alpha * s[:,i] + (1 - alpha) * (s[:,i-1] + b[:,i-1])
b[:,i] = beta * (s[:,i] - s[:,i-1]) + (1 - beta) * b[:,i-1]
self._s = s
self._b = b
def exp_smooth_range(self, topic_index=0):
alphas = [0.1,0.5,0.9]
betas = [0.1,0.5,0.9]
s_vals = dict()
b_vals = dict()
for al in alphas:
for bt in betas:
s = self.topic_counts[topic_index].copy()
b = s.copy()
b[0] = (s[5]-s[0])/5
b[1] -= s[0]
for i in range(2,s.shape[0]):
s[i] = al*s[i]+(1-al)*(s[i-1]+b[i-1])
b[i] = bt*(s[i]-s[i-1])+(1-bt)*b[i-1]
s_vals[(al,bt)] = s
b_vals[(al,bt)] = b
plt.close('all')
plt.plot(self.times,self.topic_counts[topic_index], alpha=0.5, label='Counts')
for k, v in s_vals.items():
plt.plot(self.times, v, '--', label='a:{} , b:{}'.format(k[0],k[1]))
plt.legend()
plt.show()
def predict_ahead(self, topic_index=0, periods_ahead=1):
""" Predicts topic counts a desired periods ahead
Parameters
----------
topic_index: The index of the desired topic to see its counts and acceleration
periods_ahead: how many periods ahead to predict
Returns
-------
A predicted count of articles for that topic
"""
return self._s[topic_index,-1] + periods_ahead * self._b[topic_index,-1]
def bte (self, topic_index=0):
model = pf.EGARCH(self.topic_counts[topic_index],p=1,q=1)
x = model.fit()
x.summary()
model.plot_fit()
def gasy (self, topic_index=0):
model = pf.GAS(ar=2, sc=2, data=self.topic_counts[topic_index], family=pf.Poisson())
x = model.fit()
x.summary()
model.plot_fit()
def data_smoothing(self):
import scipy.stats as scs
N = 13
kernel = np.ones((N,))
for i in range (N):
kernel[i] = scs.norm(scale=4).pdf(i - N//2)
kernel = kernel/np.sum(kernel)
self.smooth_data = 1.0 * self.topic_counts.copy()
# self.tot_sm = np.convolve(1.0*self.total_counts, kernel, mode='same')
for i in range(self.topic_counts.shape[0]):
self.smooth_data[i] = 6.0*np.convolve(self.topic_counts[i],kernel,mode='same')
def plot_smoothing_techniques(self):
plt.close('all')
plt.subplot(3,1,1)
plt.title('Simple Counts')
for i in range(self.topic_counts.shape[0]):
plt.plot(self.times, self.topic_counts[i], label=i)
plt.subplot(3,1,2)
plt.title('Smoothed Counts (Avg)')
for i in range(self.topic_counts.shape[0]):
plt.plot(self.times, self.smooth_data[i], label=i)
plt.subplot(3,1,3)
plt.title('Smoothed Counts (Exp)')
for i in range(self.topic_counts.shape[0]):
plt.plot(self.times, self._s[i], label=i)
plt.show()
def triple_exp_smoothing(self, alpha = 0.5, beta = 0.5, gamma = 0.5):
""" Generates the variables for which to show and predict triple exponential smoothing
Applies them to self under self.s_e3, self.b_e3, self.c_e3
Requires this to run 'triple_exp_predict'
Parameters
----------
None
Returns
-------
None
"""
s = self.smooth_data.copy()
b = np.zeros_like(s)
c = b.copy()
L = 42 # weekly, sampling rate is 4 hours -> 7 days/week * 24 hours/day / 4 hours/sample = 42 samples/week
n_cycles = s.shape[1] // L
c_0 = np.zeros((s.shape[0],L))
avgs = [np.sum(s[:,i*L:(i+1)*L],axis=1)/L for i in range(n_cycles)]
for i in range(L):
b[:,0] += (s[:,i+L]-s[:,i])/(L*L)
c_0[:,i] = sum([s[:,L*j + i]-avgs[j] for j in range(n_cycles)])/n_cycles
c[:,0]=c_0[:,0]
for i in range(1, s.shape[0]):
if i < L:
s[:,i]=alpha*(self.smooth_data[:,i]-c_0[:,i])+(1-alpha)*(s[:,i-1] + b[:,i-1])
b[:,i]=beta*(s[:,i]-s[:,i-1])+(1-beta)*b[:,i-1]
c[:,i]=gamma*(self.smooth_data[:,i]-s[:,i])+(1-gamma)*c_0[:,i]
else:
s[:,i]=alpha*(self.smooth_data[:,i]-c[:,i-L])+(1-alpha)*(s[:,i-1] + b[:,i-1])
b[:,i]=beta*(s[:,i]-s[:,i-1])+(1-beta)*b[:,i-1]
c[:,i]=gamma*(self.smooth_data[:,i]-s[:,i])+(1-gamma)*c[:,i-L]
self.s_e3 = s
self.b_e3 = b
self.c_e3 = c
def triple_exp_predict(self, topic_index=0, periods_ahead=1, at_time=-1):
""" Triple Exponential Prediction. Requires the generation of s,b,c first.
Parameters
----------
topic_index: The index of the desired topic to see predict
periods_ahead: how many periods ahead to predict
at_time: at which point in time you want to predict
Returns
-------
A predicted count of articles for that topic
"""
L = 42
predictions = np.zeros(periods_ahead)
while at_time < 0:
at_time += self.s_e3.shape[1]
for p in range(periods_ahead):
predictions[p] = self.s_e3[topic_index,at_time]+(p+1)*self.b_e3[topic_index,at_time]+self.c_e3[topic_index,(at_time+p+1) % L]
return predictions
def triple_exp_error(self, alpha=0.5, beta=0.5, gamma=0.5, periods_ahead=1):
""" Given hyper-parameters and a number of periods ahead to predict, returns the RMSE
Utilizes triple exponential smoothing to produce predictions
Parameters
----------
alpha,beta,gamma: the specified values of these hyper-parameters for the triple exponential
periods_ahead: number of periods ahead to predict
Returns
-------
error: the RMSE from the provided parameters
"""
s = self.smooth_data.copy()
b = np.zeros_like(s)
c = np.zeros_like(s)
L = 42 # weekly, sampling rate is 4 hours -> 7 days/week * 24 hours/day / 4 hours/sample = 42 samples/week
n_cycles = s.shape[1] // L
c_0 = np.zeros((s.shape[0],L))
avgs = [np.sum(s[:,i*L:(i+1)*L],axis=1)/L for i in range(n_cycles)]
for i in range(L):
b[:,0] += (s[:,i+L]-s[:,i])/(L*L)
c_0[:,i] = sum([s[:,L*j + i]-avgs[j] for j in range(n_cycles)])/n_cycles
c[:,0]=c_0[:,0]
for i in range(1, s.shape[0]):
if i < L:
s[:,i]=alpha*(self.smooth_data[:,i]-c_0[:,i])+(1-alpha)*(s[:,i-1] + b[:,i-1])
b[:,i]=beta*(s[:,i]-s[:,i-1])+(1-beta)*b[:,i-1]
c[:,i]=gamma*(self.smooth_data[:,i]-s[:,i])+(1-gamma)*c_0[:,i]
else:
s[:,i]=alpha*(self.smooth_data[:,i]-c[:,i-L])+(1-alpha)*(s[:,i-1] + b[:,i-1])
b[:,i]=beta*(s[:,i]-s[:,i-1])+(1-beta)*b[:,i-1]
c[:,i]=gamma*(self.smooth_data[:,i]-s[:,i])+(1-gamma)*c[:,i-L]
error = 0
for i in range(s.shape[0]): # For each topic
for j in range(s.shape[1]-periods_ahead): #for all times that can be predicted ahead
for m in range(1,periods_ahead+1): # for that specified ahead prediction rate
error += (self.topic_counts[i,j+m]-(s[i,j]+(m+1)*b[i,j]+c[i,(j+m)%L]))**2/periods_ahead
return (error/(periods_ahead*(s.shape[1]-periods_ahead)*s.shape[0]))**0.5
# (0.0, 0.80000000000000004, 1.0) 3.066492
def triple_exp_select (self, N=3, start_new=False):
""" Triple Exponential hyper-parameter Selection using a temporary storage pickle file
Looks through an N set of values in range 0-1 for alpha, beta, gamma
Finds the parameters with the lowest RMSE
Parameters
----------
topic_index: The index of the desired topic to see its counts and acceleration
periods_ahead: how many periods ahead to predict
Returns
-------
A predicted count of articles for that topic
"""
r = np.linspace(0.01,0.99,N)
if start_new:
df = pd.DataFrame(columns=['hyperparams','error'])
else:
df = pd.read_csv('error_data1.csv',index_col=0)
for c in product(r,repeat=3):
if c not in df.hyperparams.values:
print("Working on", c)
error = self.triple_exp_error(alpha=c[0],beta=c[1],gamma=c[2],periods_ahead=6)
df.loc[len(df.index)]=[c,error]
df.to_csv('error_data1.csv')
def plot_triple_exp_from_df(self):
fig = plt.figure()
ax = fig.gca(projection='3d')
df = pd.read_csv('error_data1.csv',index_col=0)
hp = np.array([t.strip('(').strip(')').split(',') for t in df.hyperparams.values]).astype(float)
df['alpha']= hp[:,0]
df['beta'] = hp[:,1]
df['gamma'] = hp[:,2]
for g in df['gamma'].unique():
df_min = df[df.gamma == g]
# df_min = df[df['gamma']==df[df['error']==df.error.min()].gamma.values[0]]
X, Y = np.meshgrid(df_min.alpha.unique(), df_min.beta.unique())
Z = np.zeros_like(X)
for i in range(Z.shape[0]):
Z[i,:]=df_min[df_min.alpha == X[0,i]].error.values
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, linewidth=0, antialiased=False)
e_min = np.argmin(df.error.values)
ax.scatter(hp[e_min,0], hp[e_min,1], df.error.min(), c='k',s=50)
ax.set_xlabel('alpha')
ax.set_ylabel('beta')
ax.set_zlim(df.error.min()-.1,df.error.max()+.1)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
# Finds alpha/beta with lowest error
def exp_dc (self, periods_ahead = 1):
N = 11
alphas = np.linspace(0,1,N)
betas = alphas.copy()
errors = np.zeros((N,N))
for i in range(N):
for j in range(N):
errors[i,j] = self.double_exp_comparions(alpha = alphas[i], beta = betas[j],periods_ahead = periods_ahead)
e_min = np.argmin(errors)
return alphas[e_min // N], betas[e_min % N], errors
def double_exp_comparions(self, alpha=0.5, beta=0.5, periods_ahead=1):
error = 0
s = self.smooth_data.copy()
b = s.copy()
b[0,:] = (s[5,:]-s[0,:])/5
b[1,:] -= s[0,:]
for i in range(2, s.shape[1]):
s[:,i] = alpha * s[:,i] + (1 - alpha) * (s[:,i-1] + b[:,i-1])
b[:,i] = beta * (s[:,i] - s[:,i-1]) + (1 - beta) * b[:,i-1]
for i in range(s.shape[0]):
for j in range(periods_ahead, s.shape[1]):
error += np.abs(self.topic_counts[i,j] - (s[i,j-periods_ahead] + periods_ahead * b[i,j-periods_ahead]))/s.shape[1]
return error
# 0.37 1.0 24.2570408562
def plot_double_exp_vars(self, periods_ahead=1, N=11):
alphas = np.linspace(0.1,1,N)
betas = np.linspace(0.1,1,N)
errors = np.zeros((N,N))
for i in range(N):
for j in range(N):
errors[i,j] = self.double_exp_comparions(alpha = alphas[i], beta = betas[j],periods_ahead = periods_ahead)
X, Y = np.meshgrid(alphas,betas)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, errors, cmap=cm.coolwarm, linewidth=0, antialiased=False)
e_min = np.argmin(errors)
print(alphas[e_min // N], betas[e_min % N], np.min(errors))
ax.scatter([alphas[e_min // N]], [betas[e_min % N]], np.min(errors), c='k',s=50)
ax.set_xlabel('alpha')
ax.set_ylabel('beta')
ax.set_zlim(np.min(errors)-.1,np.max(errors)+.1)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
def predict_all(self, periods_ahead=6):
params = np.array([0.79134282, 0.80664629, 0.94580101])
self.triple_exp_smoothing(alpha=params[0], beta=params[1], gamma=params[2])
predicted_values = np.zeros((self.smooth_data.shape[0],periods_ahead+1))
predicted_values[:,0]= self.smooth_data[:,-1]
for i in range(predicted_values.shape[0]):
predicted_values[i,1:]=self.triple_exp_predict(topic_index=i, periods_ahead=periods_ahead)
self.predicted_values = np.clip(predicted_values,a_min=0.0,a_max=None)
delta_time = self.times[1] - self.times[0]
self.predicted_times = np.array([self.times[-1] + delta_time*i for i in range(periods_ahead + 1)])
def plot_predicted_thginy(self,topic_index=0):
pb = 42 # One week period back
pa = 6 # one day ahead
counts_back = self.smooth_data[topic_index,-pb:]
back_times = self.times[-pb:]
counts_fwd = self.predicted_values[topic_index]
plt.plot(back_times, counts_back,'r',label='Current')
plt.plot(self.predicted_times, counts_fwd,'r--',label='Predicted')
plt.axvline(x=self.predicted_times[0], c='k', ls=':',label='Today')
plt.legend()
plt.show()
def find_trending_topics(self):
'''
Creates a sorted order of topics in descending order of trendingness
'''
recent_counts = np.zeros((self.smooth_data.shape[0], 48))
recent_counts[:,:42] = self.smooth_data[:,-42:]
recent_counts[:,41:] = self.predicted_values
trend = np.zeros((self.smooth_data.shape[0],3))
points = np.zeros_like(recent_counts)
for i in range(recent_counts.shape[0]):
if np.sum(recent_counts[i]) >= 3:
trend[i], points[i] = self.recent_trend_strength(recent_counts[i])
else:
trend[i,:] = np.array([0.0,0.0,0.0])
points[i] = np.zeros_like(recent_counts[i])
self.trend_points = points
self.trend_times = np.array([self.times[-42] + i*(self.times[1] - self.times[0]) for i in range(recent_counts.shape[1])])
trend = trend[:,0]*(1.0*trend[:,1] >= 0.0)
self.trending_order = np.argsort(trend)[::-1]
def recent_trend_strength(self, counts):
# vel = np.convolve(counts, np.array([1,0,-1]), mode='same')
# accel = np.convolve(counts, np.array([1,-2,1]), mode='same')
vals = np.polyfit(np.arange(counts.shape[0]), counts, deg=2)
points = np.zeros_like(counts)
for i in range(len(points)):
points[i] = vals[0]*i**2 + vals[1]*i + vals[2]
return np.array(vals), points
# import pickle
# with open('app_model/output_data.pkl','rb') as f:
# cw = pickle.load(f)
# cw.plot_double_exp_vars(N=3)
# END
| {"/flask_app/words_to_vals.py": ["/flask_app/work_with_counts.py"]} |
76,422 | hirusha-adi/Contact-Manager-CLI | refs/heads/main | /backend.py | import sqlite3
def connect():
conn = sqlite3.connect("contacts.db")
cur = conn.cursor()
# Table name: conlist
# id : primary key: integer
# name: text
# pnumber: text
cur.execute("CREATE TABLE IF NOT EXISTS conlist (id INTEGER PRIMARY KEY, name text, pnumber text)")
conn.commit()
conn.close()
def insert(name, number):
conn = sqlite3.connect("contacts.db")
cur = conn.cursor()
cur.execute("INSERT INTO conlist VALUES (NULL,?,?)",(name, number))
conn.commit()
conn.close()
def view():
conn = sqlite3.connect("contacts.db")
cur = conn.cursor()
cur.execute("SELECT * FROM conlist")
rows = cur.fetchall()
conn.commit()
conn.close()
return rows
def delete(id):
conn = sqlite3.connect("contacts.db")
cur = conn.cursor()
cur.execute("DELETE FROM conlist WHERE id=?", (id,))
conn.commit()
conn.close()
def search(name="", number=""):
conn = sqlite3.connect("contacts.db")
cur = conn.cursor()
cur.execute("SELECT * FROM conlist WHERE name=? OR pnumber=?", (name, number))
rows = cur.fetchall()
conn.commit()
conn.close()
return rows
# def update(id, name, number):
| {"/frontend.py": ["/backend.py"]} |
76,423 | hirusha-adi/Contact-Manager-CLI | refs/heads/main | /frontend.py | import os
import time
import backend
import platform
import datetime
class bc:
G = '\033[92m' #GREEN
Y = '\033[93m' #YELLOW
R = '\033[91m' #RED
A = '\033[0m' #RESET COLOR
def CONTACT_PROGRAM_ENTIRE_PROGRAM():
if os.path.isfile("log_status.txt") == True:
pass
else:
with open("log_status.txt", 'w') as file:
file.write("on")
try:
filelogs1 = open("log_status.txt", "r")
logs = filelogs1.read()
filelogs1.close()
except Exception as e:
print(f'{bc.R}\nError has occurred: {e}{bc.A}')
if os.path.isfile("log.txt") == True:
pass
else:
file = open("log.txt", "w")
file.write(f'\n{datetime.datetime.now()} - Log File Created')
if logs == "on":
file.write(f'\nLogging is enabled')
else:
file.write(f'\nLogging is disabled')
file.close()
def CLEAR_L():
clear = "clear"
if platform.system() == 'Windows':
clear = "cls"
os.system(clear)
print(f'{datetime.datetime.now()} - Cleared Screen')
if logs == "on":
file = open("log.txt", "a+")
file.write(f'\n{datetime.datetime.now()} - Cleared Screen')
file.close()
else:
pass
def CLEAR_NL():
clear = "clear"
if platform.system() == 'Windows':
clear = "cls"
os.system(clear)
# WORD LIST AREA --------------------
# new
new_wl = ["new", "newcontact", "new contact", "create", "add", "addnew", "add new", "create new"]
# view
view_wl = ["view", "list", "show", "showall", "show all", "list all", "listall", "viewall", "view all"]
# delete
del_wl = ["delete", "del", "remove", "erase"]
# search
search_wl = ["search", "find", "lookup"]
# cls
cls_wl = ["clear", "cls", "clearconsole", "clear console"]
# switch log on
logson_wl = ["logon", "log on", "enable log", "logenable", "log enable", "onlog", "logon"]
# switch log off
logsoff_wl = ["logoff", "log off", "disable log", "logdisable", "log disable", "offlog", "logoff"]
# save to file - txt
save_file_wl = ["save to file", "make file", "save", "file", "text", "make text", "create", "output", "save all", "saveall", "txt"]
# save to file - csv
csv_file_wl = ["csv", "comma separated values", "comma separated value", "spreadsheet"]
# help
help_wl = ["help", "how", "how to", "support"]
# PROGRAM STARTS HERE ---------------
backend.connect()
main_choice = input(f'\n+ What to do?: ')
if logs == "on":
file = open("log.txt", "a")
file.write(f'\n{datetime.datetime.now()} - Entered command: {main_choice}')
file.close()
else:
pass
if main_choice.lower() in new_wl:
CLEAR_NL()
# namei = "hirusha"
# numberi = "0710758322"
try:
namei = input(f'{bc.Y}+ Name: {bc.A}')
numberi = input(f'{bc.Y}+ Number: {bc.A}')
backend.insert(namei.lower(), numberi.lower())
print(f'\n{bc.G}+ Added: {namei.lower()} - {numberi.lower()} to the database{bc.A}')
if logs == "on":
file = open("log.txt", "a")
file.write(f'\n{datetime.datetime.now()} - Added: {namei.lower()} - {numberi.lower()} to the database')
file.close()
else:
pass
except Exception as e:
print(f'{bc.R}\nError has occurred: {e}{bc.A}')
elif main_choice.lower() in view_wl:
CLEAR_NL()
fc = backend.view()
for i in fc:
print(f'{i[0]} -- {i[1]} -- {i[2]}')
if logs == "on":
file = open("log.txt", "a")
file.write(f'\n{datetime.datetime.now()} - Listed all contacts')
file.close()
else:
pass
elif main_choice.lower() in save_file_wl:
CLEAR_NL()
fc = backend.view()
fileout = open("saved_text.txt", "w+")
for i in fc:
fileout.write(f'\n{i[0]} -- {i[1]} -- {i[2]}')
fileout.close()
if logs == "on":
file = open("log.txt", "a")
file.write(f'\n{datetime.datetime.now()} - Saved all contacts to a text file')
file.close()
else:
pass
elif main_choice.lower() in csv_file_wl:
CLEAR_NL()
fc = backend.view()
fileout = open("saved_csv.csv", "w+")
fileout.write(f'ID,Name,Number')
for i in fc:
fileout.write(f'\n{i[0]},{i[1]},{i[2]}')
fileout.close()
if logs == "on":
file = open("log.txt", "a")
file.write(f'\n{datetime.datetime.now()} - Saved all contacts to a csv file')
file.close()
else:
pass
elif main_choice.lower() in del_wl:
CLEAR_NL()
try:
print(r"""
_____ _ _
| __ \ | | | |
| | | | ___| | ___| |_ ___
| | | |/ _ \ |/ _ \ __/ _ \
| |__| | __/ | __/ |_ __/
|_____/ \___|_|\___|\__\___|
Use command 'view' to show all the Names, Numbers and ID""")
idfi = input(f"\n{bc.Y}+ Enter the ID: {bc.A}")
backend.delete(idfi)
print(f'{bc.Y}+ Successfully deleted the contact with ID: {idfi}{bc.A}')
if logs == "on":
file = open("log.txt", "a")
file.write(f'\n{datetime.datetime.now()} - Deleted the contact with ID: {idfi}')
file.close()
else:
pass
except Exception as e:
print(f'{bc.R}\nError has occurred: {e}{bc.A}')
elif main_choice.lower() in search_wl:
CLEAR_NL()
namefi = input(f"{bc.Y}+ Name: {bc.A}")
numberfi = input(f"{bc.Y}+ Number: {bc.A}")
result = backend.search(name=namefi.lower(), number=numberfi)
if result == []:
print(f'{bc.R}\nNo results found!{bc.A}')
log_finds = "No results found"
else:
for i in result:
print(f'\nID: {i[0]}\nName: {i[1]}\nNumber: {i[2]}')
log_finds = "Found results"
if logs == "on":
file = open("log.txt", "a")
file.write(f'\n{datetime.datetime.now()} - Searched for {namefi.lower()} - {numberfi} --- {log_finds}')
file.close()
else:
pass
elif main_choice.lower() in cls_wl:
CLEAR_L()
elif main_choice.lower() in logsoff_wl:
try:
filelogs2 = open("log_status.txt", "w")
filelogs2.write("off")
filelogs2.close()
print(f'+ Disabled Logging!')
if logs == "on":
file = open("log.txt", "a")
file.write(f'\n{datetime.datetime.now()} - Disabled Logging')
file.close()
else:
pass
except Exception as e:
print(f'{bc.R}\nError has occurred: {e}{bc.A}')
elif main_choice.lower() in logson_wl:
try:
filelogs2 = open("log_status.txt", "w")
filelogs2.write("on")
filelogs2.close()
print(f'+ Enabled Logging!')
if logs == "on":
file = open("log.txt", "a")
file.write(f'\n{datetime.datetime.now()} - Enabled Logging')
file.close()
else:
pass
except Exception as e:
print(f'{bc.R}\nError has occurred: {e}{bc.A}')
elif main_choice.lower() in help_wl:
try:
print(f'''{bc.Y}
new --> Create a new contact
view --> Show all the saved contacts
delete --> Delete contact with ID
find --> Search for a contact
( with name and number, hit enter if you dont know )
logon --> Enable Logging (you can do this manually too)
logogg --> Disable Logging (you can do this manually too)
save --> Save the contacts to a text file in a readable format
help --> show this!
{bc.A}''')
if logs == "on":
file = open("log.txt", "a")
file.write(f'\n{datetime.datetime.now()} - Showing Help')
file.close()
else:
pass
except Exception as e:
print(f'{bc.R}\nError has occurred: {e}{bc.A}')
else:
print(f"{bc.R}- Command not found!{bc.A}")
time.sleep(2)
CLEAR_NL()
CONTACT_PROGRAM_ENTIRE_PROGRAM()
if __name__ == '__main__':
try:
try:
filelogs1 = open("log_status.txt", "r")
logs = filelogs1.read()
filelogs1.close()
except Exception as e:
print(f'{bc.R}\nError has occurred: {e}{bc.A}')
if logs == "on":
file = open("log.txt", "a")
file.write(f'\n{datetime.datetime.now()} - Opened the program!')
file.close()
else:
pass
while True:
CONTACT_PROGRAM_ENTIRE_PROGRAM()
except Exception as e:
print(f'An Error has occured: {e}')
finally:
try:
filelogs1 = open("log_status.txt", "r")
logs = filelogs1.read()
filelogs1.close()
except Exception as e:
print(f'{bc.R}\nError has occurred: {e}{bc.A}')
if logs == "on":
file = open("log.txt", "a")
file.write(f'\n{datetime.datetime.now()} - Program Closed')
file.close()
else:
pass
| {"/frontend.py": ["/backend.py"]} |
76,437 | M-110/learning-python-complexity | refs/heads/master | /Graphs/watts_stogatz.py | import networkx as nx
import random
import numpy as np
import matplotlib.pyplot as plt
class WattsStogatz:
"""Watts Stogatz Graph."""
def __init__(self, n: int, k: int, p: float):
graph = self.create_ring_lattice(n, k)
def create_ring_lattice(n: int, k: int):
"""Create a ring lattice graph."""
graph = nx.Graph()
nodes = range(n)
graph.add_nodes_from(nodes)
graph.add_edges_from((node, (node + i) % n)
for node in range(n)
for i in range(k // 2 + 1))
return graph
def create_ring_lattice(n, k):
graph = nx.Graph()
nodes = range(n)
graph.add_nodes_from(nodes)
graph.add_edges_from((node, (node+i)%n) for node in range(n) for i in range(k//2+1))
return graph
def rewire_nodes(graph, p):
nodes = set(graph)
for a, b in graph.edges():
if p >= random.random():
possible_nodes = nodes - {a} - set(graph[b])
new_b = random.choice(list(possible_nodes))
graph.remove_edge(a, b)
graph.add_edge(a, new_b)
def watts_stogatz_graph(n, k, p):
graph = create_ring_lattice(n, k)
rewire_nodes(graph, p)
return graph
def all_pairs(nodes):
return ((x, y) for i, x in enumerate(nodes) for j, y in enumerate(nodes)
if i>j)
def node_clustering(graph, node):
neighbors = graph[node]
k = len(neighbors)
if k < 2:
return np.nan
possible = k * (k - 1) / 2
exist = sum(1 for v, w in all_pairs(neighbors) if graph.has_edge(v, w))
return exist / possible
def clustering_coefficient(graph):
return np.nanmean([node_clustering(graph, node) for node in graph])
def path_lengths(graph):
length_map = list(nx.shortest_path_length(graph))
lengths = [length_map[a][1][b] for a, b in all_pairs(graph)]
return lengths
def characteristic_path_length(graph):
return np.mean(path_lengths(graph))
def analyze_graph(n, k, p):
graph = watts_stogatz_graph(n, k, p)
cpl = characteristic_path_length(graph)
cc = clustering_coefficient(graph)
return cpl, cc
def watts_stogatz_experiment(ps, n=1000, k=10, iters=20):
results = []
for p in ps:
graph_results = [analyze_graph(n, k, p) for _ in range(iters)]
means = np.array(graph_results).mean(axis=0)
results.append(means)
return np.array(results)
ps = np.logspace(-4, 0, 9)
results = watts_stogatz_experiment(ps)
L, C = np.transpose(results)
L /= L[0]
C /= C[0]
plt.plot(L)
plt.plot(C)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,438 | M-110/learning-python-complexity | refs/heads/master | /complexity/graphs/random_graph.py | """Create a graph of 10 nodes with a 34% any node pairs are connected and print a graph"""
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple, Iterator, List
from complexity.graphs.complete_graph import CompleteGraph
Node = int
Edge = Tuple[Node, Node]
class RandomGraph(CompleteGraph):
"""Graph with randomly connected nodes."""
def __init__(self, n: int, p: float):
self.graph: nx.Graph = self.create_graph(n, p)
def random_pairs(self, nodes: List[Node], p: float) -> Iterator[Edge]:
"""Generate random pairings of nodes."""
for edge in self.pair_all(nodes):
if np.random.random() < p:
yield edge
def create_graph(self, n: int, p: float) -> nx.Graph:
"""Create a random graph with n nodes, using p as the probability of each node being connected."""
graph = nx.Graph()
nodes = tuple(range(n))
graph.add_nodes_from(nodes)
graph.add_edges_from(self.random_pairs(nodes, p))
return graph
@staticmethod
def log_plot(n: int = 10):
"""Plot the probability it is connected over different probabilities in log space."""
ps: List[float] = list(np.logspace(-2.5, 0, 11))
ys: List[float] = [RandomGraph.prob_connected(n, p) for p in ps]
plt.plot(ys)
@staticmethod
def prob_connected(n: int, p: float, iters: int = 10000) -> float:
"""Iterate through a series of random graphs and track what percent of them are fully connected."""
connected_graphs: List[bool, ...] = [RandomGraph(n, p).is_connected() for i in range(iters)]
return float(np.mean(connected_graphs))
if __name__ == "__main__":
rg = RandomGraph(10, .34)
rg.draw_circular_graph(filename="random_graph.png", save=True)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,439 | M-110/learning-python-complexity | refs/heads/master | /Self-Organizing Criticality/sandpile_cdf_plot.py | from sandpile import SandPile
from timeit import timeit
import matplotlib.pyplot as plt
import numpy as np
from empiricaldist import Pmf, Cdf
def generate_time_and_duration(n=10000):
pile = SandPile(rows=50, level=30)
pile.run()
# **WARNING** This takes a few minutes
res = [pile.drop_and_run() for i in range(n)]
T, S = np.transpose(res)
T = T[T>1]
S = S[S>1]
return T, S
def plot_pmf(T, S):
pmfT = Pmf.from_seq(T)
pmfS = Pmf.from_seq(S)
fig = plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
pmfT.plot(xlim=(0,50), xlabel="Avalanche duration", ylabel="PMF")
plt.subplot(1, 2, 2)
pmfS.plot(xlim=(0,50), xlabel="Avalanche size", ylabel="PMF")
plt.show('PMF size and duration')
fig.savefig("pmf_plot.png")
def plot_cdf(T, S):
cdfT = Cdf.from_seq(T)
cdfS = Cdf.from_seq(S)
fig = plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
cdfT.plot(xlim=(0,50), xlabel="Avalanche duration", ylabel="CDF")
plt.subplot(1, 2, 2)
cdfS.plot(xlim=(0,50), xlabel="Avalanche size", ylabel="CDF")
plt.show('PMF size and duration')
fig.savefig("cdf_plot.png")
if __name__ == "__main__":
T, S = generate_time_and_duration(100000)
plot_cdf(T, S)
plot_pmf(T, S) | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,440 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/continous_ca_2.py | import matplotlib.pyplot as plt
import numpy as np
from math import floor, ceil
a = lambda x: sum(x)/3, [1,1,1]
b = lambda x: sum(x[:2])/3, [1,1,0]
c = lambda x: (x[0] + x[2])/2, [1,0,1]
d = lambda x: x[0], [1,0,0]
e = lambda x: sum(x[1:])/2, [0,1,1]
f = lambda x: x[1], [0,1,0]
g = lambda x: x[2], [0,0,1]
h = lambda x: 1, [0,0,0]
rule_table = a,b,c,d,e,f,g,h
def apply_rule(n, row):
rules = bin(n)[2:].rjust(8, '0')
output = 0
applied_rules = [rule_table[i] for i, rule in enumerate(rules) if rule == '1']
for rule in applied_rules:
if all(int(ceil(row[i])) == rule[1][i] for i in range(3)):
output = rule[0]([row[i]*rule[1][i] for i in range(3)])
return output
return output
output = apply_rule(122, [.5, .3, .6])
rows = 100
cols = rows
rule_number = 137
grid = np.zeros((rows, cols), dtype=float)
grid[0,rows//2] = 1
for i in range(1, rows):
for j in range(2, cols-2):
new_value = apply_rule(rule_number, [grid[i-1, j-1], grid[i-1, j], grid[i-1, j+1]])
if new_value > 1:
new_value = new_value - floor(new_value)
grid[i, j] = new_value
plt.imshow(grid, cmap='twilight')
#plt.imshow(grid, cmap='Greys') | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,441 | M-110/learning-python-complexity | refs/heads/master | /complexity/cellular_automata/cell_1d.py | """An object-oriented implementation of cellular automata.
Uses rule 110 and a string as the initial conditions and saves
an image.
The class allows for random initial conditions, string-based initial conditions,
and a single square initial conditions.
"""
import matplotlib.pyplot as plt
import numpy as np
from binascii import a2b_base64
class Cell1D:
"""1D cellular automaton
Args:
rules: an integer that will be converted to a binary rule
rows: number of rows for the graph
columns: number of columns for the graph
"""
# Window pattern for adding above numbers
WINDOW = [4, 2, 1]
def __init__(self, rules: int, rows: int, columns: int = None):
if columns is None:
columns = 2 * rows + 1
self.rows = rows
self.columns = columns
self.array = np.zeros((rows, columns), dtype=np.int8)
self.table = self.make_table(rules)
self._current_row = 0
def start_single(self):
"""Start with one cell in the top center"""
self.array[0, self.columns // 2] = 1
self._current_row += 1
def start_random(self):
"""Start with each cell on the top row having a 50% probability of
being active.
"""
self.array[0] = np.random.random(self.columns).round()
self._current_row += 1
def start_string(self, string_: str):
"""Use a string as a random seed for the top row cells by converting
the string into a binary value"""
binary_string = ''.join(format(ord(char), 'b') for char in string_)
string_to_int = [int(x) for x in binary_string]
for i in range(min(len(self.array[0]), len(string_to_int))):
self.array[0, i] = string_to_int[i]
self._current_row += 1
def make_table(self, rule: int):
"""Make table given the CA rule"""
rule = np.array([rule], dtype=np.uint8)
table = np.unpackbits(rule)[::-1]
return table
def loop(self, steps: int = 1):
"""Execute a given number of steps"""
for i in range(steps):
self.step()
def step(self):
"""Computes the next row in the array"""
c = np.correlate(self.array[self._current_row - 1], self.WINDOW, mode='same')
self.array[self._current_row] = self.table[c]
self._current_row += 1
def plot_show(self):
"""Show the plot of the array"""
plt.imshow(self.array, cmap='Blues', alpha=0.7)
plt.xticks([])
plt.yticks([])
plt.savefig('Cell1D_ca_110.png')
def draw_rows(self, n: int = None):
"""Calculate the rows and call the plot function"""
if n is None:
n = self.rows - 1
else:
n = min(self.rows - 1, n)
self.loop(n)
self.plot_show()
if __name__ == "__main__":
initial_string = """
Von Neumann cellular automata are the original expression of cellular automata,
the development of which was prompted by suggestions made to John von Neumann by
his close friend and fellow mathematician Stanislaw Ulam. Their original purpose
was to provide insight into the logical requirements for machine self-replication,
and they were used in von Neumann's universal constructor."""
a = Cell1D(110, 200)
a.start_string(initial_string)
a.draw_rows()
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,442 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/2D_cell_grid.py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
class Cell2D:
def __init__(self, width: int):
self.width = width
self.previous_layer = np.zeros((width, width), dtype=np.uint8)
self.current_layer = np.zeros((width, width), dtype=np.uint8)
self.current_layer[width//2][width//2] = 1
def step(self):
self.previous_layer = self.current_layer
self.current_layer = np.zeros((self.width, self.width), dtype=np.uint8)
for i in range(1, self.width-1):
for j in range(1, self.width-1):
neighbors = self.get_neighbors(i, j)
self.apply_rules(i, j, *neighbors, switch=True)
def get_neighbors(self, i, j) -> tuple:
a = self.previous_layer[i-1][j]
d = self.previous_layer[i][j+1]
b = self.previous_layer[i+1][j]
c = self.previous_layer[i][j-1]
return a, b, c, d
def apply_rules(self, i, j, a, b, c, d, switch=False):
neighbor_sum = sum([a, b, c, d])
if neighbor_sum == 1 or neighbor_sum == 4:
if switch:
self.current_layer[i][j] = -self.current_layer[i][j] + 1
else:
self.current_layer[i][j] = 1
else:
self.current_layer[i][j] = 0
def run_n_steps(self, n):
for _ in range(n):
self.step()
self.draw()
def draw(self):
self.image = plt.imshow(self.current_layer)
def save_gif(self, filename='my_gif', frames=200, interval=.1, fps=30):
writergif = animation.PillowWriter(fps=fps)
self.animate_gif(frames, interval).save(filename+'.gif',
writer=writergif)
def animate_gif(self ,frames, interval):
fig = plt.gcf()
self.draw()
return animation.FuncAnimation(fig, self.animate_function,
interval=interval,
frames=frames)
def animate_function(self, i):
if i > 0:
self.step()
self.image.set_array(self.current_layer)
return (self.image,)
cell = Cell2D(50)
cell.save_gif(frames=1000, fps=15)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,443 | M-110/learning-python-complexity | refs/heads/master | /Self-Organizing Criticality/cell.py | import numpy as np
import matplotlib.pyplot as plt
from time import sleep
from matplotlib import animation
class Cell2D:
"""Base class for 2D cellular automata."""
def __init__(self, rows, cols=None):
if cols is None:
cols = rows
self.grid = np.zeros((rows, cols), np.uint8)
def add_cells(self, row, col, *strings):
for i, string in enumerate(strings):
self.grid[row+i, col:col+len(string)] = np.array([int(c) for c in string])
def loop(self, count=1):
for i in range(count):
self.step()
def step():
pass
def draw(self, **options):
self.image = self.draw_grid(self.grid, **options)
def draw_grid(self, grid, **options_):
x, y = grid.shape
options = dict(cmap='Greens', alpha=.7, vmin=0, vmax=1,
interpolation='none', origin='upper',
extent = [0, x, 0, y])
options.update(options_)
plt.axis([0, x, 0, y])
plt.xticks([])
plt.yticks([])
#return plt.imshow(grid, cmap='YlOrRd', alpha=.7, vmin=0, vmax=5, interpolation='none', origin='upper',extent = [0, x, 0, y])
return plt.imshow(grid, cmap='Blues', alpha=.3, vmax=4)
# TODO: REMOVE THIS, UNUSED?
def animate(self, frames, interval=None, step=None):
if step is None:
step = self.step
plt.figure()
try:
for i in range(frames-1):
self.draw()
plt.show()
if interval:
sleep(interval)
step()
self.draw()
plt.show()
except KeyboardInterrupt:
pass
def save_gif(self, filename='my_gif', frames=200, interval=0.1, fps=30):
writergif = animation.PillowWriter(fps=fps)
self.animate_gif(frames, interval).save(filename+'.gif',
writer=writergif)
def animate_gif(self,frames, interval):
fig = plt.gcf()
self.draw()
a = animation.FuncAnimation(fig, self.animate_function,
init_func=self.init_func,
frames=frames, interval=interval)
return a
def init_func(self):
print('starting animation')
def animate_function(self, i):
if i > 0:
self.step()
self.image.set_array(self.grid)
return self.image,
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,444 | M-110/learning-python-complexity | refs/heads/master | /GoL/game_of_life.py | import numpy as np
import matplotlib.pyplot as plt
from random import randint
from matplotlib import animation
from scipy.signal import convolve2d, correlate2d
class Life:
"""Conway's Game of Life implementation.
Args:
rows: number of rows in the grid
cols: number of columns in the grid
"""
def __init__(self, rows, cols=None):
# Default to square grid if cols not provided
if cols is None:
cols = rows
# Initialize the empty grid
self.grid = np.zeros((rows, cols), np.uint8)
self.kernel = np.array([[1, 1, 1],
[1, 10, 1],
[1, 1, 1]])
# Create an empty array of length 20
self.table = np.zeros(20, dtype=np.uint8)
# Set the 3rd, 12th and 13th value to 1
self.table[[3, 12, 13]] = 1
self.image = None
def step(self):
"""Execute 1 step using the game's rules"""
correlation = correlate2d(self.grid, self.kernel, mode='same')
self.grid = self.table[correlation]
def make_life(self, row, col, *strings):
"""Create grid squares beginning at position (row, col) using strings"""
for i, string in enumerate(strings):
self.grid[row + i, col:(col + len(string))] = np.array(
[int(char) for char in string])
def draw(self):
"""Draw the grid"""
g = self.grid.copy()
cmap = plt.get_cmap('Greens')
options = dict(interpolation='nearest', alpha=0.8, vmin=0, vmax=1, origin='upper')
# Get width/height of grid for plotting
x, y = g.shape
plt.axis([0, x, 0, y])
plt.xticks([])
plt.yticks([])
self.image = plt.imshow(g, cmap, **options)
def animate(self):
fig = plt.gcf()
self.draw()
a = animation.FuncAnimation(fig, self.animate_function,
init_func=self.init_func,
frames=1000,
interval=200)
return a
def init_func(self):
print('starting animation')
def animate_function(self, i):
if i > 0:
self.step()
self.image.set_array(self.grid)
return (self.image,)
if __name__ == "__main__":
size = 150
life = Life(size)
for i in range(size):
for j in range(size):
life.make_life(i, j, str(randint(0, 1)))
writergif = animation.PillowWriter(fps=30)
life.animate().save('test.gif', writer=writergif)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,445 | M-110/learning-python-complexity | refs/heads/master | /complexity/physical_modeling/reaction_diffusion.py | import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import correlate2d
from complexity.game_of_life.cell_2d import Cell2D
class ReactionDiffusion(Cell2D):
kernel = np.array([[.05, .2, .05],
[.2, -1, .2],
[.05, .2, .05]])
def __init__(self, rows, cols, ra, rb, f, k, noise=0.1):
super().__init__(rows)
self.rows = rows
self.cols = cols
self.ra = ra # diffusion rate of a
self.rb = rb # diffusion rate of b
self.f = f # feed rate
self.k = k # kill rate
self.array_a = np.ones((rows, cols), dtype=float)
self.array_b = noise * np.random.random((rows, cols))
self.add_island()
def step(self):
A = self.array_a
B = self.array_b
correlation_A = correlate2d(A, self.kernel, mode='same', boundary='wrap')
correlation_B = correlate2d(B, self.kernel, mode='same', boundary='wrap')
reaction = A * B ** 2
self.array_a += self.ra * correlation_A - reaction + self.f * (1 - A)
self.array_b += self.rb * correlation_B + reaction - (self.f + self.k) * B
def draw(self):
"""Draw the reaction."""
plt.axis([0, self.rows, 0, self.cols])
plt.xticks([])
plt.yticks([])
shared_options = dict(interpolation='bicubic', vmin=None, vmax=None, alpha=0.3,
origin='upper', extent=[0, self.rows, 0, self.cols])
plt.imshow(self.array_a, cmap='twilight', **shared_options)
plt.imshow(self.array_b, cmap='cool', **shared_options)
self.image = plt.gci()
def animate_function(self, i):
if i > 0:
for i in range(100):
self.step()
self.draw()
return self.image,
def add_island(self, height=0.1):
rows, cols = self.array_b.shape
radius = min(rows, cols) // 20
i = rows // 2
j = cols // 2
self.array_b[(i - radius):(i + radius), (j - radius):(j + radius)] += height
if __name__ == '__main__':
diffusion = ReactionDiffusion(100, 100, 0.5, 0.25, 0.035, 0.062)
# for _ in range(500):
# diffusion.step()
# diffusion.draw()
# plt.show()
# diffusion.add_cells(7, 7, ['111', '111', '111'])
# pairs = [(0.0354, 0.057), # X
# (0.055, 0.062), # X
# (0.039, 0.065), # OO
# (0.1, 0.002),
# (0.04, 0.035),
# (0.02, 0.085),
# (0.065, 0.043)]
# pairs = [(0.043, 0.055),
# (0.039, 0.064),
# (0.042, 0.069)]
pairs = [(0.012, 0.032),
(0.034, 0.066)]
for f, k in pairs:
diffusion = ReactionDiffusion(100, 100, 0.5, 0.26, f, k)
diffusion.save_gif(filename=f'X_reaction_diffusion_f_{f}_k_{k}_long', frames=150, fps=10)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,446 | M-110/learning-python-complexity | refs/heads/master | /complexity/graphs/random_m_graph.py | """Plot the probability of a 7 node graph being connected given different numbers of random edges."""
import networkx as nx
import numpy as np
import random
import matplotlib.pyplot as plt
from typing import Tuple, List
from complexity.graphs.complete_graph import CompleteGraph
Node = int
Edge = Tuple[Node, Node]
class RandomMGraph(CompleteGraph):
"""Graph with randomly connected nodes."""
def __init__(self, n: int, m: int):
self.graph: nx.Graph = self.create_graph(n, m)
def create_m_edges(self, nodes: List[Node], m: int) -> List[Edge]:
"""Randomly generates m edges from nodes."""
return random.sample(list(self.pair_all(nodes)), m)
def create_graph(self, n: int, m: int) -> nx.Graph:
"""Create a random graph with n nodes, and m randomly assigned edges."""
graph = nx.Graph()
nodes = tuple(range(n))
graph.add_nodes_from(nodes)
graph.add_edges_from(self.create_m_edges(nodes, m))
return graph
@staticmethod
def prob_connected(n: int, m: int, iters: int = 10000) -> float:
"""Iterate through a series of random graphs and track what percent of them are fully connected."""
connected_graphs: List[bool] = [RandomMGraph(n, m).is_connected() for i in range(iters)]
return float(np.mean(connected_graphs))
if __name__ == "__main__":
m_probabilities: List[float] = [RandomMGraph.prob_connected(7, m) for m in range(20)]
plt.plot(m_probabilities)
plt.ylabel("Probability a 7 node graph is complete")
plt.xlabel("Number of edges")
plt.title("Probability a 7 node graph is complete vs number of edges")
plt.savefig("random_m_graph_complete_probability_graph.png")
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,447 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/test.py | o=['buffalo']
list(map(lambda x: o.insert(0,x+o[-1][1:]), ['B' if int(i) else 'b' for i in bin(69)[:1:-1]]))
print(' '.join(o)) | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,448 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/binary_graphing/addition_sequence.py | from binary_graphing import BinaryGrapher
class AdditionGrapher(BinaryGrapher):
def step_function(self, n, i):
... | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,449 | M-110/learning-python-complexity | refs/heads/master | /complexity/physical_modeling/percolation.py | """This simulates percolation which is the process of fluid flowing through
semi-porous materials such as water through paper.
Multiple gifs are saved with various parameters.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import correlate2d
from complexity.game_of_life.cell_2d import Cell2D
class Percolation(Cell2D):
"""CA Simulation of percolation."""
kernel = np.array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
def __init__(self, n: int, q: float = 0.5):
self.q = q
self.array = np.random.choice([1, 0], (n, n), p=[q, 1-q])
# Initial conditions
self.array[0] = 5
def step(self):
"""Simulation of one step."""
a = self.array
c = correlate2d(a, self.kernel, mode='same')
self.array[(a == 1) & (c >= 5)] = 5
@property
def num_wet(self) -> int:
"""Returns total number of wet cells."""
return int(np.sum(self.array == 5))
@property
def is_bottom_row_wet(self) -> int:
"""Returns True if there are wet cells in the bottom row."""
return int(np.sum(self.array[-1] == 5))
def draw(self):
"""Draw cells."""
x, y = self.array.shape
plt.axis([0, y, 0, x])
plt.xticks([])
plt.yticks([])
plt.imshow(self.array, cmap='cool', vmax=8)
self.image = plt.gci()
if __name__ == '__main__':
for i in [.55, .57, .6, .63, .65, .7, .75, .8]:
perc = Percolation(200, i)
perc.save_gif(f'percolation_q_{i}', frames=500, fps=30)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,450 | M-110/learning-python-complexity | refs/heads/master | /Physical/diffusion.py | import numpy as np
from scipy.signal import correlate2d
from cell import Cell2D
class Diffusion(Cell2D):
kernel = np.array([[0, 1, 0],
[1,-4, 1],
[0, 1, 0]])
def __init__(self, rows, rate=0.1):
self.rate = rate
self.grid = np.zeros((rows, rows), np.float)
self.image = None
def add_cells(self, row, col, *strings):
for i, string in enumerate(strings):
self.grid[row+i, col:col+len(string)] = np.array([int(char) for char in string])
def step(self):
correlation = correlate2d(self.grid, self.kernel, mode='same')
self.grid += self.rate * correlation
def draw(self):
"""Draws the cells."""
self.image = self.draw_grid(self.grid, cmap='Reds')
if __name__ == '__main__':
diffusion = Diffusion(10)
diffusion.add_cells(3, 3, '111', '111', '111')
diffusion.draw()
diffusion.animate(frames=20, interval=0.1)
diffusion.save_gif()
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,451 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/binary_grapher_sqrt.py | """This module saves visual representations of binary sequences.
The sequences are done through constant addition.
"""
import matplotlib.pyplot as plt
from binary_grapher import BinaryGrapher
from decimal import Decimal, getcontext
from random import randint
class RootGrapher(BinaryGrapher):
def __init__(self, *args, value, **kwargs):
super().__init__(*args, **kwargs)
getcontext().prec = 2_000
sqrt_decimal = Decimal(value).sqrt()
str_sqrt = str(sqrt_decimal)
if '.' not in str_sqrt:
str_sqrt = '0.' + '0'*50000
self.sqrt_iter = iter(str_sqrt.split('.')[1])
def step_function(self, n: int, i: int):
x = int(next(self.sqrt_iter))
return x
def fill_grid(self):
width = max(len(row) for row in self.grid)
for i in range(len(self.grid)):
padding = width - len(self.grid[i])
self.grid[i] += [0] * padding
if __name__ == "__main__":
grapher = RootGrapher(100, value=7)
grapher.draw_graph()
for i in range(2, 100):
grapher = RootGrapher(100, value=i)
grapher.save_image(f'sqrt({i})', 'root', cmap='Greens')
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,452 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/complexity_binary_multiplication_sequence_representation.py | """This module saves visual representations of binary sequences.
The sequences are done through constant multiplication
"""
import matplotlib.pyplot as plt
from math import sqrt, log
def to_binary(num: int) -> str:
"""Convert int to string form of binary sequence"""
return bin(num)[2:]
def binary_mult_image(multiplier: int = 2, length: int = 50):
"""Print a plot representing a simple sequence of binary numbers.
Step corresponds to how many times to multiply"""
grid = []
n=1
for i in range(1, length):
grid.append([0]*(len(to_binary((multiplier**(length))))))
for j, char in enumerate(to_binary(n)):
#print(f'grid[{i-1}][{j}]', len(grid[i-1]))
grid[i-1][j] = float(char)
n = n*multiplier+1
#print(grid)
fig1 = plt.figure(figsize=(100,100))
plt.pcolormesh(grid[::-1], cmap='Greens')
plt.axis('off')
plt.show()
plt.draw()
fig1.savefig(f'binary_image_mult_multiplier({multiplier})_length({length}).png', dpi=100)
print(f'Saved as binary_image_mult_multiplier({multiplier})_length({length}).png')
# The first pattern using the multiplier 2 shows a solid simple triangle.
# But once you change the multiplier to 3, you can see the pattern becomes
# complex.
# As you increase the multiple, you see about 4 pattern variations emerge.
base_size=5000
for i in range(2, 20):
binary_mult_image(multiplier=i, length=base_size//i)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,453 | M-110/learning-python-complexity | refs/heads/master | /Physical/reaction_diffusion.py | from diffusion import Diffusion
import numpy as np
from scipy.signal import correlate2d
import matplotlib.pyplot as plt
from matplotlib import animation
class ReactionDiffusion(Diffusion):
"""Simulation of reaction diffusion using 2D cellular automata"""
kernel = np.array([[.05, .2, .05],
[ .2, -1, .2],
[.05, .2, .05]])
def __init__(self, rows, parameters, noise=0.1):
self.parameters = parameters
# Create first material as a solid square
self.grid1 = np.ones((rows, rows), dtype=float)
# Create second material as a bunch of noise
self.grid2 = noise * np.random.random((rows, rows))
self.add_island(self.grid2)
def add_island(self, grid, height=0.1):
x, y = grid.shape
radius = min(x, y) // 20
i = x//2
j = y//2
grid[i-radius:i+radius, j-radius:j+radius] += height
def step(self):
"""Execute one step of diffusion"""
a = self.grid1
b = self.grid2
ra, rb, f, k = self.parameters
ca = correlate2d(a, self.kernel, mode='same', boundary='wrap')
cb = correlate2d(b, self.kernel, mode='same', boundary='wrap')
reaction = a * b**2
self.grid1 += ra * ca - reaction + f * (1 - a)
self.grid2 += rb * cb + reaction - (f + k) * b
def draw(self):
options = dict(interpolation='bicubic', vmin=None, vmax=None)
self.image = self.draw_grid(self.grid1, cmap='Reds', **options)
self.image += self.draw_grid(self.grid2, cmap='Blues', **options)
params1 = 0.5, 0.25, 0.035, 0.057 # pink spots and stripes
params2 = 0.5, 0.25, 0.055, 0.062 # coral
params3 = 0.5, 0.25, 0.039, 0.065 # blue spots
rd = ReactionDiffusion(rows=100, parameters=params1)
rd.draw()
rd.save_gif() | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,454 | M-110/learning-python-complexity | refs/heads/master | /complexity/self_organizing_criticality/sandpile.py | import numpy as np
import itertools
from scipy.signal import correlate2d
import matplotlib.pyplot as plt
from complexity.game_of_life.cell_2d import Cell2D
class SandPile(Cell2D):
"""Simulation of a sandpile using 2D cellular automata."""
# The pattern of how to distribute grains of sand when one collapses.
# 4 are removed from the center and distributed to the 4 neighbor piles.
kernel = np.array([[0, 1, 0],
[1, -4, 1],
[0, 1, 0]])
def __init__(self, rows, cols=None, level=9):
if cols is None:
cols = rows
self.array = np.ones((rows, cols), dtype=np.int32) * level
self.toppled_sequence = []
def step(self, k=3) -> int:
"""Apply one step of the sandpile rules.
Returns the number of cells which collapsed."""
# Find which cells are collapsing.
is_toppling = self.grid > k
toppled_count = np.sum(is_toppling)
self.toppled_sequence.append(toppled_count)
# Create correlation of where these collapsing cells should distribute
# their grains of sand.
correlation = correlate2d(is_toppling, self.kernel, mode='same',
boundary='fill', fillvalue=0)
# Apply the distribution
self.grid += correlation
return toppled_count
def run(self):
"""Run simulation steps until thye sand pile is stable.
returns number of steps ran and number of topples as a tuple pair"""
total_toppled = 0
for i in itertools.count(1):
toppled_count = self.step()
total_toppled += toppled_count
if toppled_count == 0:
return i, total_toppled
def drop_random_grain(self):
rows, cols = self.grid.shape
random_cell = np.random.randint(rows), np.random.randint(cols)
self.grid[random_cell] += 1
def drop_and_run(self):
"""Drops a random grain and then runs until pile is stable"""
self.drop_random_grain()
return self.run()
def draw_after_n_iterations(self, n):
for i in range(n):
print(i)
self.drop_and_run()
self.draw_grid(self.grid, cmap='Blues', alpha=.9, vmin=0, vmax=4, origin='upper')
def animate_function(self, i):
if i > 0:
self.drop_and_run()
print(i)
self.draw()
self.image.set_array(self.grid)
return (self.image,)
def print_topple_sequence(self):
plt.plot(self.toppled_sequence)
def draw_layers(self):
plt.figure(figsize=(8, 8))
for i in range(4):
plt.subplot(2, 2, i + 1)
self.draw_grid(self.grid == i, cmap='Blues', alpha=.9, vmin=0, vmax=1, origin='upper')
if __name__ == "__main__":
pile = SandPile(rows=200, level=12)
# pile.run()
# pile.draw_grid(pile.grid)
# pile.draw_layers()
# pile.draw_after_n_iterations(1500)
pile.save_gif(frames=500, fps=10) | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,455 | M-110/learning-python-complexity | refs/heads/master | /complexity/small_world_graphs/watts_strogatz.py | """Run the Watts Strogatz experiment to demonstrate the small world
phenomenon.py
The plot shows that there is a large range of probabilities for which the
graph demonstrates high clustering and low path lengths which are
characteristics of the small world phenomenon.
"""
import networkx as nx
import random
import numpy as np
import matplotlib.pyplot as plt
class WattsStrogatz:
"""Watts Strogatz Graph."""
def __init__(self, n: int, k: int, p: float):
self.graph = self.create_ring_lattice(n, k)
self.rewire_nodes(p)
def create_ring_lattice(self, n: int, k: int):
"""Create a ring lattice graph."""
graph = nx.Graph()
nodes = range(n)
graph.add_nodes_from(nodes)
graph.add_edges_from((node, (node + i) % n)
for node in range(n)
for i in range(k // 2 + 1))
return graph
def rewire_nodes(self, p: float):
"""Randomly rewire nodes based on random probability p."""
nodes = set(self.graph)
for a, b in self.graph.edges():
if p >= random.random():
possible_nodes = nodes - {a} - set(self.graph[b])
new_b = random.choice(list(possible_nodes))
self.graph.remove_edge(a, b)
self.graph.add_edge(a, new_b)
def all_pairs(self, nodes):
"""Get all node pairs in the graph."""
return ((x, y) for i, x in enumerate(nodes) for j, y in enumerate(nodes)
if i > j)
def node_clustering(self, node):
"""Calculate the clustering around a specific node."""
neighbors = self.graph[node]
k = len(neighbors)
if k < 2:
return np.nan
possible = k * (k - 1) / 2
exist = sum(1 for v, w in self.all_pairs(neighbors) if self.graph.has_edge(v, w))
return exist / possible
def clustering_coefficient(self):
"""Calculate the clustering coefficient of the entire graph."""
return np.nanmean([self.node_clustering(node) for node in self.graph])
def path_lengths(self):
"""Calculate all path lengths on the graph."""
length_map = list(nx.shortest_path_length(self.graph))
lengths = [length_map[a][1][b]
for a, b in self.all_pairs(self.graph)]
return lengths
def characteristic_path_length(self):
"""Calculate the average path length on the graph."""
return np.mean(self.path_lengths())
def analyze_graph(n: int, k: int, p: float):
"""Create a graph of n nodes with k edges and p probability of
rewiring and return the characteristic path length and
clustering coefficient."""
graph = WattsStrogatz(n, k, p)
cpl = graph.characteristic_path_length()
cc = graph.clustering_coefficient()
return cpl, cc
def watts_strogatz_experiment(ps, n=1000, k=10, iters=20):
"""Run the Watts Strogatz experiment using probabilities from
ps, with a graph of n nodes and k edges. Iterate iters times.
Graph and save the results as 'watts_strogatz.png'.
"""
results = []
for p in ps:
graph_results = [analyze_graph(n, k, p) for _ in range(iters)]
means = np.array(graph_results).mean(axis=0)
results.append(means)
results_array = np.array(results)
L, C = np.transpose(results_array)
L /= L[0]
C /= C[0]
plt.title("Normalized clustering coefficient and path length vs rewiring probability")
plt.xlabel("Rewiring probability")
plt.ylabel("Clustering coefficient")
plt.plot(C, '*-', label="Normalized Clustering Coefficient")
plt.plot(L, 'o-', label="Normalized Path Length")
plt.xscale = 'log'
plt.savefig("watts_strogatz.png")
print("Saved figure as 'watts_strogatz.png'")
if __name__ == '__main__':
ps = np.logspace(-4, 0, 9)
watts_strogatz_experiment(ps)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,456 | M-110/learning-python-complexity | refs/heads/master | /complexity/cellular_automata/1d_ca_plot_using_table.py | """Runs a functional cellular automaton using a given rule table.
The make_table function allows you to call any of the 256 possible
sets of rules for the cells.
The example uses rule 110.
"""
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import seaborn as sns
from random import randint
def generate_array(rows, cols):
"""Create a 2d array"""
array = np.zeros((rows, cols), dtype=np.uint8)
return array
def step(array, i, window=[4, 2, 1]):
previous_row = array[i - 1]
c = np.correlate(previous_row, window, mode='same')
array[i] = table[c]
def make_table(rule):
rule = np.array([rule], dtype=np.uint8)
table = np.unpackbits(rule)[::-1]
return table
def step_over_all_rows(array, step_function):
# Iterate through all rows in the array
for i in range(1, array.shape[0]):
step_function(array, i)
plt.imshow(array)
plt.savefig("1d_plot_using_table.png")
if __name__ == "__main__":
rows = 500
cols = 1000
table = make_table(110)
# Create an array
array = generate_array(rows, cols)
# set initial conditions
array[0, 500] = 1
step_over_all_rows(array, step)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,457 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/binary_grapher_fractions.py | """This module saves visual representations of binary sequences.
The sequences are done through constant addition.
"""
import matplotlib.pyplot as plt
from binary_grapher import BinaryGrapher
from decimal import Decimal, getcontext
class FractionGrapher(BinaryGrapher):
def __init__(self, *args, numerator, divisor, **kwargs):
super().__init__(*args, **kwargs)
getcontext().prec = 2_000
fraction = Decimal(numerator) / Decimal(divisor)
self.fraction_iter = iter(str(fraction)[2:].ljust(500, '0'))
def step_function(self, n: int, i: int):
print(i)
x = int(next(self.fraction_iter))
return x
def fill_grid(self):
width = max(len(row) for row in self.grid)
for i in range(len(self.grid)):
padding = width - len(self.grid[i])
self.grid[i] += [0] * padding
if __name__ == "__main__":
#grapher = FractionGrapher(100, numerator=9, divisor=13)
#grapher.draw_graph()
# Generate random stuff
for i in range(50):
divisor = randint(2,100)
numerator = randint(1, divisor)
grapher = FractionGrapher(100, numerator=numerator, divisor=divisor)
grapher.save_image(f'random_{numerator}_over_{divisor}', 'division', cmap='Greens')
for i in range(1, 100):
grapher = FractionGrapher(100, numerator=1, divisor=i)
grapher.save_image(f'1_over_{i}', 'division', cmap='Greens') | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,458 | M-110/learning-python-complexity | refs/heads/master | /Graphs/Old/network.py | import networkx as nx
"""
G = nx.DiGraph()
G.add_node('Alicia')
G.add_node('Sally')
G.add_node('Ella')
print(G.nodes())
G.add_edge('Alicia', 'Sally')
G.add_edge('Alicia', 'Ella')
G.add_edge('Ella', 'Sally')
G.add_edge('Sally', 'Ella')
print(G.edges())
nx.draw_circular(G, node_color='cyan', node_size=2000, with_labels=True)
G.clear()
"""
positions = dict(Albany=(-74, 43),
Boston=(-71, 42),
NYC=(-74, 41),
Philly=(-75, 40))
G2 = nx.Graph()
G2.add_nodes_from(positions)
drive_times = {('Albany', 'Boston'): 3,
('Albany', 'NYC'): 1,
('Boston', 'NYC'): 4,
('NYC', 'Philly'): 2}
G2.add_edges_from(drive_times)
nx.draw(G2, positions, node_color='pink', node_shape='s', node_size=2500, with_labels=True) | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,459 | M-110/learning-python-complexity | refs/heads/master | /Graphs/Old/random_graph.py | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple, Iterator
Node = int
class Graph:
"""Node-based graph."""
def __init__(self, n: int):
self.graph: nx.Graph = self.create_graph(n)
def pair_all(nodes: Tuple[Node, ...]) -> Iterator[Tuple[Node, Node]]:
"""Generates all consecutive pairs of nodes."""
return ((x, y) for i, x in enumerate(nodes)
for j, y in enumerate(nodes)
if i > j)
def create_graph(n: int) -> nx.Graph:
"""Create a graph with n nodes"""
graph = nx.Graph()
nodes = range(length)
graph.add_nodes_from(nodes)
graph.add_edges_from(self.pair_all(nodes))
return graph
def reachable_nodes(start: int) ->
def pair_all(nodes):
"""Generator"""
for i, x in enumerate(nodes):
for j, y in enumerate(nodes):
if i > j:
yield u, v
def pair_all(nodes):
return ((x, y) for i, x in enumerate(nodes) for j, y in enumerate(nodes)
if i>j)
def create_complete_graph(n):
g = nx.Graph()
nodes = range(n)
g.add_nodes_from(nodes)
g.add_edges_from(pair_all(nodes))
return g
complete_graph = create_complete_graph(10)
def reachable_nodes(g, start):
done = set()
stack = [start]
while stack:
node = stack.pop()
if node not in done:
done.add(node)
stack.extend(g.neighbors(node))
return done
def is_connected(g):
start = next(iter(g))
reachable = reachable_nodes(g, start)
return len(reachable) == len(g)
print(reachable_nodes(complete_graph, 0))
print(f'Is connected? {is_connected(complete_graph)}')
def random_pairs(nodes, probability):
for edge in pair_all(nodes):
if np.random.random() < probability:
yield edge
def make_random_graph(n, p):
g = nx.Graph()
nodes = range(n)
g.add_nodes_from(nodes)
g.add_edges_from(random_pairs(nodes, p))
return g
random_graph = make_random_graph(10, 0.3)
# nx.draw_circular(random_graph, node_color="pink", node_size=1000, with_labels=True)
def prob_connected(n, p, iters=100):
tf = [is_connected(make_random_graph(n, p))
for i in range(iters)]
return np.mean(tf)
print(prob_connected(10, .23, iters=10000))
n=10
ps = np.logspace(-2.5, 0, 11)
ys = [prob_connected(n, p) for p in ps]
plt.plot(ys)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,460 | M-110/learning-python-complexity | refs/heads/master | /Agents/schelling.py | import numpy as np
from scipy.signal import correlate2d
from typing import Tuple, List
from nptyping import NDArray
from cell import Cell2D
import seaborn as sns
from matplotlib.colors import LinearSegmentedColormap
Grid = NDArray
Location = Tuple[int, int]
palette = sns.color_palette('muted')
colors = 'white', palette[1], palette[0]
cmap = LinearSegmentedColormap.from_list('cmap', colors)
class Schelling(Cell2D):
"""Schelling model grid.
Args:
n: number or rows in grid.
p: threshold of similar neighbors for not moving."""
kernel = np.array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]], dtype=np.int8)
def __init__(self, n: int, p: float):
self.p = p
choices = [0, 1, 2]
# 10% of homes are empty, 45% are 1, 45% are 2
probs = [0.1, 0.45, 0.45]
self.grid: Grid[int] = np.random.choice(choices, (n, n), p=probs)
self.image = None
def count_neighbors(self) -> Tuple[Grid[bool], Grid[float], Grid[float], Grid[float]]:
"""Count the neighbors and the fraction of their neighbors.
Returns:
empty: Grid with 1s in cells that are empty
frac_red: Grid with fraction of neighbors that are red
frac_blue: Grid with fraction of neighbors that are blue
frac_same: Grid with fraction of neighbors that are the same"""
empty: Grid[bool] = self.grid == 0
red: Grid[bool] = self.grid == 1
blue: Grid[bool] = self.grid == 2
num_red: Grid[int] = correlate2d(red, self.kernel,
mode='same', boundary='wrap')
num_blue: Grid[int] = correlate2d(blue, self.kernel,
mode='same', boundary='wrap')
num_neighbors: Grid[int] = num_red + num_blue
frac_red: Grid[float] = num_red / num_neighbors
frac_blue: Grid[float] = num_red / num_neighbors
frac_same: Grid[float] = np.where(red, frac_red, frac_blue)
frac_same[empty] = np.nan
return empty, frac_red, frac_blue, frac_same
def get_segregation_fraction(self) -> float:
"""Returns average fraction of similar nieghbors."""
*_, frac_same = self.count_neighbors()
return np.nanmean(frac_same)
@staticmethod
def locations_where(condition: Grid[bool]) -> List[Location]:
return list(zip(*np.nonzero(condition)))
def step(self) -> float:
"""Exceutes one step.
Return:
Average fraction of similar neighbors."""
empty, *_, frac_same = self.count_neighbors()
unhappy: Grid[bool] = frac_same < self.p
unhappy_locations: List[Location] = self.locations_where(unhappy)
empty_locations: List[Location] = self.locations_where(empty)
np.random.shuffle(unhappy_locations)
num_empty = np.sum(empty)
for source in unhappy_locations:
# Random empty destination.
i = np.random.randint(num_empty)
destination: Location = empty_locations[i]
# Move source to destination.
self.grid[destination] = self.grid[source]
self.grid[source] = 0
# Change empty location from destination to source.
empty_locations[i] = source
return np.nanmean(frac_same)
def draw(self):
"""Draw cells."""
self.image = self.draw_grid(self.grid, cmap=cmap, vmax=2)
s = Schelling(10, .3)
for i in range(5000):
s.step()
print(s.get_segregation_fraction())
s.draw()
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,461 | M-110/learning-python-complexity | refs/heads/master | /Self-Organizing Criticality/gol_cdf_plot.py | from sandpile import SandPile
from timeit import timeit
import matplotlib.pyplot as plt
import numpy as np
from empiricaldist import Pmf, Cdf
from game_of_life_helper import Life
from random import randint
def generate_time_and_duration(n=10000):
size = 50
life = Life(size)
for i in range(size):
for j in range(size):
life.make_life(i,j,str(randint(0,1)))
# **WARNING** This takes a few minutes
res = life.run_n_iterations(n)
T, S = res
T = np.array(T)
S = np.array(S)
#T, S = np.transpose(res)
T = T[T>3]
S = S[S>10]
print(S, len(S))
return T, S
def plot_pmf(T, S):
pmfT = Pmf.from_seq(T)
pmfS = Pmf.from_seq(S)
fig = plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)a
pmfT.plot(xlim=(0,50), xlabel="Avalanche duration", ylabel="PMF")
plt.subplot(1, 2, 2)
pmfS.plot(title="Game of Life PMF", xlim=(0,100), xlabel="Avalanche size", ylabel="PMF")
plt.show('PMF size and duration')
fig.savefig("pmf_gol_plot.png")
def plot_cdf(T, S):
cdfT = Cdf.from_seq(T)
cdfS = Cdf.from_seq(S)
fig = plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
cdfT.plot(xlim=(0,50), xlabel="Avalanche duration", ylabel="CDF")
plt.subplot(1, 2, 2)
cdfS.plot(title="Game of Life CDF", xlim=(0,5000), xlabel="Avalanche size", ylabel="CDF")
plt.show('PMF size and duration')
fig.savefig("cdf_gol_plot.png")
if __name__ == "__main__":
T, S = generate_time_and_duration(5900)
#plot_cdf(T, S)
plot_pmf(T, S) | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,462 | M-110/learning-python-complexity | refs/heads/master | /complexity/game_of_life/cell_2d.py | """Cell2D is meant as a base class which can be subclassed and allow
subclasses the functionality of things like saving the plot as an animated gif."""
from abc import abstractmethod
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
class Cell2D:
"""Implementation of a 2D CA.
Args:
n: number of rows
m: number of columns
"""
def __init__(self, n, m=None):
if m is None:
m = n
self.array = np.zeros((n, m), np.uint8)
self.image = None
self.plot_options = None
self.set_plot_options()
def set_plot_options(self, cmap='Blues', alpha=1, vmin=0, vmax=4,
interpolation='nearest', origin='upper'):
self.plot_options = dict(cmap=cmap, alpha=alpha, vmin=vmin, vmax=vmax,
interpolation=interpolation, origin=origin)
def add_cells(self, row, col, values):
"""Add cells using the given values at beginning
at the row/column location.
Args:
row: index of the row
col: index of the col
values: strings containing 1s and 0s
"""
for i, value in enumerate(values):
self.array[row + i, col:col + len(value)] = np.array([int(x) for x in value])
@abstractmethod
def step(self):
"""One step."""
...
def draw(self):
"""Draw the grid."""
g = self.array.copy()
# Get width/height of grid for plotting
x, y = g.shape
plt.axis([0, x, 0, y])
plt.xticks([])
plt.yticks([])
self.image = plt.imshow(g, **self.plot_options)
def animation_init_function(self):
"""Called when the animate_gif method begins."""
print('Beginning animation.')
def animate_function(self, i):
"""Executes one step and updates the image with the new array."""
if i > 0:
self.step()
self.image.set_array(self.array)
return self.image,
def animate_gif(self, frames, interval):
"""Returns an animation that calls the animation for the length of the
interval."""
fig = plt.gcf()
self.draw()
return animation.FuncAnimation(fig, self.animate_function,
init_func=self.animation_init_function,
frames=frames, interval=interval)
def save_gif(self, filename='my_gif', frames=300, interval=1, fps=30):
"""Save the plot as an animated gif file."""
writer_gif = animation.PillowWriter(fps=fps)
self.animate_gif(frames, interval).save(f'{filename}.gif',
writer=writer_gif)
print(f'Saved gif as "{filename}.gif".')
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,463 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/continuous_ca.py | import numpy as np
import matplotlib.pyplot as plt
from math import floor
rows = 67
cols = rows
grid = np.zeros((rows, cols), dtype=float)
grid[0,rows//2] = 1
for i in range(1, rows):
for j in range(1, cols-1):
new_value = (grid[i-1, j-1] + grid[i-1, j] + grid[i-1, j+1])/3*-4.68 + .9
if new_value > 1:
new_value = new_value - floor(new_value)
grid[i, j] = new_value
plt.imshow(grid, cmap='twilight') | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,464 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/recursive_sequence.py | """This module saves visual representations of binary sequences.
The sequences are done by taking the previous value and adding the reverse
binary representation to get a new value.
"""
import matplotlib.pyplot as plt
from math import sqrt, log
def to_binary(num: int) -> str:
"""Convert int to string form of binary sequence"""
return bin(num)[2:]
def reverse_digit(num: int) -> int:
"""Reverse the digits of an int."""
return int(bin(num)[2:][::-1], 2)
def binary_image(num: int, initial_digit=2, width=100, step=1):
"""Print a plot representing a simple sequence of binary numbers.
Step corresponds to how much to add"""
grid = []
digit = initial_digit
for i in range(num//step):
grid.append([0]*width)
for j, char in enumerate(to_binary(digit)):
grid[i][j] = float(char)
digit = digit + reverse_digit(digit)
fig1 = plt.figure(figsize=(100,100))
plt.pcolormesh(grid[::-1], cmap='Greens')
plt.axis('off')
plt.show()
plt.draw()
fig1.savefig(f'initial_digit={initial_digit}.png', dpi=100)
#fig1.savefig(f'binary_reversed_digit_addition_sequence_range({num})_initial_digit({initial_digit})_stepsize({step}).png', dpi=100)
def recursive(n: int, func, f: list = None) -> list:
if f is None:
f = [1, 1]
else:
f = f[:]
for i in range(len(f), n):
f.append(func(i, f))
return f
# The Python syntax for defining these recursive functions is actually
# identical to the traditional way you'd write them out so they self-document.
# The first if block is the initial conditions for that function.
# The return statement is the recursive formula being used.
def func_a(n: int, f: list) -> int:
return 1 + f[n - f[n - 1]]
def func_b(n: int, f: list) -> int:
return 2 + f[n - f[n - 1]]
def func_c(n: int, f: list) -> int:
return f[f[n - 1]] + f[n - f[n-1]]
def func_d(n: int, f: list) -> int:
return f[n - f[n - 1]] + f[n - f[n - 2] - 1]
def func_e(n: int, f: list) -> int:
return f[n - f[n - 1]] + f[n - f[n - 2]]
def func_f(n: int, f: list) -> int:
return f[n - f[n - 1] - 1] + f[n - f[n - 2] - 1]
def func_g(n: int, f: list) -> int:
return f[f[n - 1]] + f[n - f[n - 2] - 1]
def func_h(n: int, f: list) -> int:
return f[f[n - 1]] + f[n - 2 * f[n - 1] + 1]
def func_i(n: int, f: list) -> int:
return f[n-f[n-1]-1] + f[n-f[n-2]-1]
def func_j(n: int, f: list) -> int:
return f[f[n-1]] + f[n - f[n-2]-1]
def func_fib(n: int, f: list) -> int:
return f[n-1] + f[n-2]
a = recursive(100, func_a, f=[1])
b = recursive(100, func_b)
c = recursive(100, func_c)
d = recursive(10000, func_d)
e = recursive(10000, func_e)
f = recursive(100, func_f)
g = recursive(100, func_g)
#h = recursive(100, func_h)
i = recursive(2000, func_i)
j = recursive(1000, func_j)
fib = recursive(100, func_fib)
plt.plot([y- x/2 for x, y in enumerate(e, start=1)])
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,465 | M-110/learning-python-complexity | refs/heads/master | /Self-Organizing Criticality/single_source_sandpile.py | from sandpile import SandPile
import numpy as np
import matplotlib.pyplot as plt
class SingleSource(SandPile):
"""A sand pile that starts with a single cell"""
def __init__(self, rows, cols=None, level=9):
if cols is None:
cols = rows
self.grid = np.zeros((rows, cols), dtype=np.int32) * level
self.grid[rows//2][cols//2] = 50000
self.toppled_sequence = []
if __name__ == "__main__":
single_source = SingleSource(150, level=5)
single_source.draw_after_n_iterations(1) | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,466 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/complexity_binary_reverse_digit_addition_sequence_representation.py | """This module saves visual representations of binary sequences.
The sequences are done by taking the previous value and adding the reverse
binary representation to get a new value.
"""
import matplotlib.pyplot as plt
from math import sqrt, log
def to_binary(num: int) -> str:
"""Convert int to string form of binary sequence"""
return bin(num)[2:]
def reverse_digit(num: int) -> int:
"""Reverse the digits of an int."""
return int(bin(num)[2:][::-1], 2)
def binary_image(num: int, initial_digit=2, width=100, step=1):
"""Print a plot representing a simple sequence of binary numbers.
Step corresponds to how much to add"""
grid = []
digit = initial_digit
for i in range(num//step):
grid.append([0]*width)
for j, char in enumerate(to_binary(digit)):
grid[i][j] = float(char)
digit = digit + reverse_digit(digit)
fig1 = plt.figure(figsize=(100,100))
plt.pcolormesh(grid[::-1], cmap='Greens')
plt.axis('off')
plt.show()
plt.draw()
fig1.savefig(f'initial_digit={initial_digit}.png', dpi=100)
#fig1.savefig(f'binary_reversed_digit_addition_sequence_range({num})_initial_digit({initial_digit})_stepsize({step}).png', dpi=100)
for i in range(50):
binary_image(250, initial_digit=i, width=190, step=1)
for i in range(64, 512, 16):
binary_image(250, initial_digit=i, width=190, step=1)
# When the initial digit is between 1 and 15, the only variations are in the
# first few rows. The patterns all quickly simplify to the same pattern.
#for i in range(1, 16):
# binary_image(200, initial_digit=i, width=110, step=1)
# When the initial digit is 16, you can see the pattern eventually terminates.
#binary_image(325, initial_digit=16, width=200, step=1)
# Changin the stepsize to 5 does not change the underlying pattern.
#binary_image(325*5, initial_digit=16, width=200, step=5)
# But using a diferent initial digit, 512, creates a different pattern which
# is complex and seemingly endless.
#binary_image(1125, initial_digit=512, width=750, step=1)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,467 | M-110/learning-python-complexity | refs/heads/master | /complexity/physical_modeling/diffusion.py | """Simulation of diffusion using the Cell2D parent class.
The example creates a smaller square as the initial conditions as well saves
the result as a gif which shows the red square diffuse until it has disappeared.
"""
import numpy as np
from scipy.signal import correlate2d
from complexity.game_of_life.cell_2d import Cell2D
class Diffusion(Cell2D):
kernel = np.array([[0, 1, 0],
[1, -4, 1],
[0, 1, 0]])
def __init__(self, rows, rate=0.1):
super().__init__(rows)
self.array = np.zeros((rows, rows), np.float)
self.rate = rate
def step(self):
correlation = correlate2d(self.array, self.kernel, mode='same')
self.array += self.rate * correlation
if __name__ == '__main__':
diffusion = Diffusion(15)
diffusion.set_plot_options(cmap='Reds', vmax=1)
diffusion.add_cells(7, 7, ['111', '111', '111'])
diffusion.save_gif(filename='simple_diffusion', frames=200, interval=0.1, fps=20)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,468 | M-110/learning-python-complexity | refs/heads/master | /complexity/small_world_graphs/dijkstra.py | """Creates a ring lattice and uses the Dijkstra algorithm to find the shortest
path from node 0 to each of the 10 nodes."""
from collections import deque
import networkx as nx
from typing import Dict
from complexity.graphs.ring_lattice import RingLattice
def shortest_path_dijkstra(graph: nx.Graph, source: int) -> Dict[int, int]:
"""Finds the shortest distance from the source node to each
node in the graph."""
dist = {source: 0}
queue = deque([source])
while queue:
node = queue.popleft()
new_dist = dist[node] + 1
neighbors = set(graph[node]) - set(dist.keys())
for n in neighbors:
dist[n] = new_dist
queue.extend(neighbors)
return dist
if __name__ == "__main__":
graph = RingLattice(10, 4)
result = shortest_path_dijkstra(graph.graph, 0)
print(result)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,469 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/complex_string_sequence.py | """
This module is meant to demonstrate how even a simple function like the
string's replace method can lead to complex, random patterns given certain
simple rules and a simple initial condition.
"""
def build_complex_text_patterns(initial_string: str, n: int,
rules: dict, file_name: str):
"""Create a list of strings length n based based on the rules"""
rows: list = [initial_string]
for i in range(n):
# Apply the first applicable rule then break
for old, new in rules.items():
if old in rows[i]:
rows.append(rows[i].replace(old, new))
break
with open(file_name + '.txt', 'w') as f:
for row in rows:
f.write(row + '\n')
print(f'Saved as {file_name}.txt')
rules_a = {' X ': ' X',
' ': ' X '}
rules_b = {'X ': ' XXXX',
'X XX': ' X ',
' ': ' XX'}
rules_c = {' X': 'X ',
' ': 'XX',
'X': ' X'}
rules_d = {'XXX': 'X ',
' ': ' X',
' ': 'X '}
# This rule creates a simple repetitive pattern.
build_complex_text_patterns(' ', 1000, rules_a, 'rule_a_pattern_simple')
# This slightly different rule creates a complex pattern
build_complex_text_patterns(' ', 400, rules_b, 'rules_b_pattern_complex')
# This creates a more dynamic pattern, but not complex
build_complex_text_patterns('X', 1000, rules_c, 'rules_c_pattern_complex')
# This creates a complex pattern
build_complex_text_patterns(' ', 1000, rules_d, 'rules_d_pattern_complex')
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,470 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/binary_grapher.py | """This module saves visual representations of binary sequences.
The sequences are done through constant addition.
"""
import matplotlib.pyplot as plt
from math import sqrt, log
import os
from random import randint
class BinaryGrapher:
def __init__(self, rows: int, starting_value: int = 1, step_size: int = 1):
self.rows = rows
self.starting_value = starting_value
self.step_size = step_size
self.grid = []
@staticmethod
def int_to_binary_string(num: int) -> str:
"""Convert int to its binary equivilent and return as string"""
return bin(num)[2:]
def generate_grid(self):
self.grid = []
n = self.starting_value
for i in range(0, self.rows, self.step_size):
self.grid.append([float(char) for char in self.int_to_binary_string(n)])
n = self.step_function(n, i)
self.fill_grid()
def fill_grid(self):
width = len(self.grid[-1])
for i in range(len(self.grid)):
padding = width - len(self.grid[i])
self.grid[i] += [0] * padding
def draw_graph(self, cmap='Greens', figsize=(50,100)):
if self.grid == []:
self.generate_grid()
self.fig = plt.figure(figsize=figsize)
plt.pcolormesh(self.grid[::-1], cmap=cmap)
def save_image(self, filename, folder='', cmap='Greens', figsize=(50,100),
dpi=100):
self.draw_graph(cmap=cmap, figsize=figsize)
self.fig.savefig(os.path.join(folder, filename + '.png'), dpi=dpi)
def step_function(self, n: int, i: int):
return n * 2 + i
raise NotImplementedError('A step_function must be implimented in '
f'{self.__class__.__name__!r} class.')
if __name__ == "__main__":
grapher = BinaryGrapher(34, step_size=1)
grapher.draw_graph()
#for i in range(1, 10):
# grapher = BinaryGrapher(1000*i, step_size=i)
# grapher.save_image(f'step_size_{i}', 'test', cmap='Greens') | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,471 | M-110/learning-python-complexity | refs/heads/master | /complexity/graphs/complete_graph.py | """Complete network of nodes with a printed circular graph."""
import networkx as nx
from typing import Tuple, Iterator, List, Set
import matplotlib.pyplot as plt
Node = int
Edge = Tuple[Node, Node]
class CompleteGraph:
"""Graph with all nodes connected."""
def __init__(self, n: int):
self.graph: nx.Graph = self.create_graph(n)
def pair_all(self, nodes: Tuple[Node, ...]) -> Iterator[Edge]:
"""Generates all consecutive pairs of nodes."""
return ((x, y)
for i, x in enumerate(nodes)
for j, y in enumerate(nodes)
if i > j)
def create_graph(self, n: int) -> nx.Graph:
"""Create a graph with n nodes."""
graph = nx.Graph()
nodes = tuple(range(n))
graph.add_nodes_from(nodes)
graph.add_edges_from(self.pair_all(nodes))
return graph
def reachable_nodes(self, starting_node: Node) -> Set[Node]:
"""Get all nodes reachable from starting_node in graph."""
done = set()
stack: List[Node, ...] = [starting_node]
while stack:
node: Node = stack.pop()
if node not in done:
done.add(node)
stack.extend(self.graph.neighbors(node))
return done
def is_connected(self) -> bool:
"""Returns True if all nodes in graph are connected."""
start: Node = next(iter(self.graph))
reachable_nodes: List[Node, ...] = list(self.reachable_nodes(start))
# print(len(reachable_nodes),len(self.graph))
return len(reachable_nodes) == len(self.graph)
def draw_circular_graph(self, node_color='pink', node_size=1000, save=False, filename='output'):
"""Draw a circular graph of the current nodes/edges and save as a png file."""
nx.draw_circular(self.graph, node_color=node_color, node_size=node_size, with_labels=True)
if save:
plt.savefig(filename)
print(f'Saved graph as {filename!r}')
if __name__ == "__main__":
# Create a graph with 10 nodes and a 34% probability that any two pairs of nodes are connected.
cg = CompleteGraph(10)
cg.draw_circular_graph(save=True, filename="complete_graph.png")
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,472 | M-110/learning-python-complexity | refs/heads/master | /complexity/game_of_life/game_of_life.py | """An implementation of Conway's game of life using the Cell2D parent class.
The initial state used in the example below is a 'glider gun' which is an endless
looping pattern that keeps producing moving objects that look like gliders.
"""
import numpy as np
from scipy.signal import correlate2d
from complexity.game_of_life.cell_2d import Cell2D
class Life(Cell2D):
"""Conway's Game of Life implementation.
Args:
rows: number of rows in the grid
cols: number of columns in the grid
"""
def __init__(self, rows, cols=None):
# Default to square grid if cols not provided
super().__init__(rows, cols)
self.kernel = np.array([[1, 1, 1],
[1, 10, 1],
[1, 1, 1]])
# Create an empty array of length 20
self.table = np.zeros(20, dtype=np.uint8)
# Set the 4th, 13th and 14th value to 1
# These are the Game of Life rules
self.table[[3, 12, 13]] = 1
def step(self):
"""Execute 1 step using the game's rules"""
correlation = correlate2d(self.array, self.kernel, mode='same')
self.array = self.table[correlation]
if __name__ == "__main__":
size = 100
life = Life(size)
toad = ["0111",
"1110"]
glider_gun = ["000000000000000000000000010000000000",
"000000000000000000000011110000100000",
"000000000000010000000111100000100000",
"000000000000101000000100100000000011",
"000000000001000110000111100000000011",
"110000000001000110000011110000000000",
"110000000001000110000000010000000000",
"000000000000101000000000000000000000",
"000000000000010000000000000000000000"
]
life.add_cells(10, 10, glider_gun)
life.save_gif("glider_gun", frames=150, fps=15)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,473 | M-110/learning-python-complexity | refs/heads/master | /complexity/graphs/odd_regular_graph.py | """Creates a regular graph with 12 nodes and 3 neighbors per node."""
import networkx as nx
import matplotlib.pyplot as plt
class OddRegularGraph:
"""Creates a regular graph with k edges.
Args:
n: number of nodes, must be even
k: number of neighbors, must be odd and less than n
"""
def __init__(self, n: int, k: int):
if k % 2 == 0:
raise ValueError('k must be odd')
if n % 2:
raise ValueError('n must be even')
if k > n:
raise ValueError('k must be less than n')
self.graph: nx.Graph = self.create_graph(n, k)
self.generate_edges(n, k)
def create_graph(self, n: int, k: int) -> nx.Graph:
"""Generate the graph."""
graph = nx.Graph()
nodes = range(n)
graph.add_nodes_from(nodes)
return graph
def generate_edges(self, n: int, k: int):
"""Generates k edges for n nodes."""
# First pass
for x in self.graph:
for y in range(n + 1):
y %= n
if x == y:
continue
if len(self.graph[x]) < 1 and len(self.graph[y]) < 1:
self.graph.add_edge(x, y)
# Second pass
for i in range(n // k, n // k + k):
for x in self.graph:
if len(self.graph[x]) < k and len(self.graph[(x + i) % n]) < k:
self.graph.add_edge(x, (x + i) % n)
def draw_circular_graph(self):
"""Draw and save the graph."""
nx.draw_circular(self.graph, node_color="pink", node_size=1000, with_labels=True)
plt.savefig("odd_regular_graph.png")
if __name__ == "__main__":
org = OddRegularGraph(12, 3)
org.draw_circular_graph()
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,474 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/continuous_ca_test.py | import matplotlib.pyplot as plt
import numpy as np
from binascii import a2b_base64
from time import sleep
class Cell1D:
"""1D cellular automaton
Args:
rule: an integer that will be converted to a binary rule
rows: number of rows for the graph
columns: number of columns for the graph
"""
# Window pattern for adding above numbers
WINDOW = [4, 2, 1]
def __init__(self, rows: int, columns: int = None):
if columns is None:
columns = 2 * rows + 1
self.rows = rows
self.columns = columns
self.array = np.zeros((rows, columns), dtype=np.uint8)
self.table = self.make_table()
self._current_row = 0
def start_single(self):
"""Start with one cell in the top center"""
self.array[0, self.columns // 2] = 1
self._current_row = 1
def start_random(self):
"""Start with each cell on the top row having a 50% probability of
being active.
"""
self.array[0] = np.random.random(self.columns).round()
self._current_row = 1
def start_string(self, string_: str):
"""Use a string as a random seed for the top row cells by converting
the string into a binary value"""
binary_string = ''.join(format(ord(char), 'b') for char in string_)
string_to_int = [int(x) for x in binary_string]
for i in range(min(len(self.array[0]), len(string_to_int))):
self.array[0, i] = string_to_int[i]
self._current_row = 1
def make_table(self, rule: int = 0):
"""Make table given the CA rule"""
rule = np.array([rule], dtype=np.float64)
table = np.unpackbits(rule)[::-1]
self.table = table
def loop(self, steps: int = 1):
"""Execute a given number of steps"""
for i in range(steps):
self.step()
def step(self):
"""Computes the next row in the array"""
c = np.correlate(self.array[self._current_row - 1], self.WINDOW, mode='same')
self.array[self._current_row] = self.table[c]*.4
print(self.array)
self._current_row += 1
def plot_show(self):
"""Show the plot of the array"""
plt.imshow(self.array, cmap='Blues', alpha=0.7)
plt.xticks([])
plt.yticks([])
def draw_rows(self, rule: int = 110, n: int = None):
"""Calculate the rows and call the plot function"""
if n is None:
n = self.rows - 1
else:
n = min(self.rows - 1, n)
self.make_table(rule)
self.loop(n)
self.plot_show()
a = Cell1D(100)
a.start_random()
a.draw_rows(64)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,475 | M-110/learning-python-complexity | refs/heads/master | /complexity/cellular_automata/simple_1d_ca_functional.py | """Simple function implementation of a 1d cellular automaton.
Saves an image of the resulting CA.
"""
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import seaborn as sns
from random import randint
def generate_array(rows, cols):
"""Create a 2d array"""
array = np.zeros((rows, cols), dtype=np.uint8)
return array
def step(array, i):
"""Generate row i based on the above row"""
rows, cols = array.shape
# Select the previous row for calculating the next row
previous_row = array[i - 1]
# Iterate through all the columns
for j in range(1, cols):
# Get the 3 neighbors above current cell
neighbors = previous_row[j - 1:j + 2]
# Set the new cell to be the sum of it's above neighbors and
# use the modulus operator to restrict to 0 or 1
array[i, j] = sum(neighbors) % 2
def alt_step(array, i):
"""Generate row i based on the above row. Alternative method."""
rows, cols = array.shape
# Select the previous row for calculating the next row
previous_row = array[i - 1]
# Iterate through all the columns
for j in range(1, cols):
# Get the 5 neighbors above current cell
neighbors = previous_row[j - 2:j + 3]
# Set the new cell to be the sum of it's above neighbors and
# use the modulus operator to restrict to 0 or 1
array[i, j] = sum(neighbors) % 2
def fast_step(array, i, window=[1, 1, 1]):
"""A more efficient step function that use 2d correlation."""
previous_row = array[i - 1]
# use the np.correlate function to speed up the step calculations
correlation = np.correlate(previous_row, window, mode='same')
array[i] = correlation % 2
def step_over_all_rows(array, step_function):
# Iterate through all rows in the array
for i in range(1, array.shape[0]):
step_function(array, i)
plt.imshow(array)
plt.savefig("simple_function_ca.png")
if __name__ == "__main__":
rows = 50
cols = 100
# Create an array
array = generate_array(rows, cols)
# Set initial conditions
array[0, 50] = 1
# Step through all rows of array and print plot
step_over_all_rows(array, fast_step)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,476 | M-110/learning-python-complexity | refs/heads/master | /--Wolfram/complexity_binary_addition_sequence_representation.py | """This module saves visual representations of binary sequences.
The sequences are done through constant addition.
"""
import matplotlib.pyplot as plt
from math import sqrt, log
def to_binary(num: int) -> str:
"""Convert int to string form of binary sequence"""
return bin(num)[2:]
def binary_image(num: int, step=1):
"""Print a plot representing a simple sequence of binary numbers.
Step corresponds to how much to add"""
grid = []
for i in range(num//step):
grid.append([0]*int(log(num, 2)+1))
for j, char in enumerate(to_binary(i*step)):
grid[i][j] = float(char)
fig1 = plt.figure(figsize=(25,100))
plt.pcolormesh(grid[::-1], cmap='Greens')
plt.axis('off')
plt.show()
plt.draw()
fig1.savefig(f'binary_image_addition_range({num})_stepsize({step}).png', dpi=100)
# The visual pattern remains the same regardless of the stepsize you use.
# Evenly spaced sequences in binary seem to have a universal visual pattern.
binary_image(500, step=1)
binary_image(500*3, step=3)
binary_image(500*13, step=13)
binary_image(500*25, step=25)
binary_image(500*87, step=87) | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,477 | M-110/learning-python-complexity | refs/heads/master | /Graphs/Old/graph_exercise.py | """
1. Write a function called m_pairs that takes a list of nodes and the number
of edges, m, and returns a random selection of m edges. A simple way to do
that is to generate a list of all possible edges and use random.sample.
2. Write a function called make_m_graph that takes n and m and returns a
random graph with n nodes and m edges.
3. Make a version of prob_connected that uses make_m_graph instead of
make_random_graph.
4. Compute the probability of connectivity for a range of values of m.
"""
import networkx as nx
import random
import numpy as np
import matplotlib.pyplot as plt
def create_graph(n):
graph = nx.Graph()
graph.add_nodes_from(range(n))
return graph
def all_possible_edges(nodes):
return ((a, b)
for i, a in enumerate(nodes)
for j, b in enumerate(nodes)
if i > j)
complete_graph = create_graph(10)
def m_pairs(nodes, m):
return random.sample(list(all_possible_edges(nodes)), m)
def make_m_graph(n, m):
graph = create_graph(n)
graph.add_edges_from(m_pairs(range(n), m))
return graph
def is_connected(g):
start = next(iter(g))
reachable = reachable_nodes(g, start)
return len(reachable) == len(g)
def prob_connected(n, m, iters=1000):
tf = [is_connected(make_m_graph(n, m))
for i in range(iters)]
return np.mean(tf)
# nx.draw_circular(make_m_graph(10, 15), node_color="pink", node_size=1000, with_labels=True)
x = list(range(8,25))
y = [prob_connected(10, i) for i in x]
plt.plot(x,y) | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,478 | M-110/learning-python-complexity | refs/heads/master | /Self-Organizing Criticality/game_of_life_helper.py | import numpy as np
import matplotlib.pyplot as plt
from random import randint
from matplotlib import animation
from scipy.signal import convolve2d, correlate2d
class Life:
"""Conway's Game of Life implimentation.
Args:
rows: number of rows in the grid
cols: number of columns in the grid
"""
def __init__(self, rows, cols=None):
# Default to square grid if cols not provided
if cols is None:
cols = rows
# Initialize the empty grid
self.grid = np.zeros((rows, cols), np.uint8)
self.kernel = np.array([[1, 1, 1],
[1, 10, 1],
[1, 1, 1]])
# Create an empty array of length 20
self.table = np.zeros(20, dtype=np.uint8)
# Set the 3rd, 12th and 13th value to 1
self.table[[3, 12, 13]] = 1
self.step_count = 0
self.image = None
self.prev_grid = np.zeros((rows, cols), np.uint8)
self.change_sequence =[]
def step(self):
"""Execute 1 step using the game's rules"""
self.step_count += 1
correlation = correlate2d(self.grid, self.kernel, mode='same')
prev_prev_grid = self.prev_grid
self.prev_grid = self.grid
self.grid = self.table[correlation]
changed_cells = np.sum(self.grid != self.prev_grid)
#print(changed_cells)
self.change_sequence.append(changed_cells)
if (self.grid == prev_prev_grid).all():
#print(f'Stabalized at {self.step_count}')
return False
return True
def make_life(self, row, col, *strings):
"""Create grid squares beginning at position (row, col) using strings"""
for i, string in enumerate(strings):
self.grid[row + i, col:(col + len(string))] = np.array(
[int(char) for char in string])
def draw(self):
"""Draw the grid"""
g = self.grid.copy()
cmap = plt.get_cmap('Greens')
options = dict(interpolation='nearest', alpha=0.8, vmin=0, vmax=1, origin='upper')
# Get width/height of grid for plotting
x, y = g.shape
plt.axis([0, x, 0, y])
plt.xticks([])
plt.yticks([])
self.image = plt.imshow(g, cmap, **options)
def run_until_stable(self):
stable = True
step_count = 0
while stable:
step_count += 1
stable = self.step()
if step_count > 10000:
break
return step_count
def flip_random_cell(self):
random_x = randint(0, self.grid.shape[0]-1)
random_y = randint(0, self.grid.shape[0]-1)
self.grid[random_x, random_y] = abs(self.grid[random_x, random_y] - 1)
def run_n_iterations(self, n):
steps = []
for i in range(n):
step_count = self.run_until_stable()
self.flip_random_cell()
steps.append(step_count)
return steps, self.change_sequence
def animate(self):
fig = plt.gcf()
self.draw()
a = animation.FuncAnimation(fig, self.animate_function,
init_func=self.init_func,
frames=1000,
interval=200)
return a
def init_func(self):
print('starting animation')
def animate_function(self, i):
if i > 0:
stable = self.step()
self.image.set_array(self.grid)
return (self.image,)
if __name__ == "__main__":
size = 100
life = Life(size)
for i in range(size):
for j in range(size):
life.make_life(i,j,str(randint(0,1)))
life.run_n_iterations(500) | {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,479 | M-110/learning-python-complexity | refs/heads/master | /Self-Organizing Criticality/gol_soc_plot.py | from game_of_life_helper import Life
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,480 | M-110/learning-python-complexity | refs/heads/master | /Graphs/Old/regular_graph_with_odd_k.py | import networkx as nx
def make_odd_regular_graph(n, k):
"""
Returns a graph with n nodes and each node having k neighbors.
k must be odd
n must be even
k must be less than n
"""
if k%2 == 0:
raise ValueError('k must be odd')
if n%2:
raise ValueError('n must be even')
if k > n:
raise ValueError('k must be less than n')
graph = nx.Graph()
nodes = range(n)
graph.add_nodes_from(nodes)
# First pass
for x in graph:
for y in range(n+1):
y %= n
if x == y:
continue
if len(graph[x]) < 1 and len(graph[y]) < 1:
graph.add_edge(x, y)
# Second pass
for i in range(n//k, n//k + k):
for x in graph:
if len(graph[x]) < k and len(graph[(x+i)%n]) < k:
graph.add_edge(x, (x+i)%n)
return graph
graph = make_odd_regular_graph(8, 3)
nx.draw_circular(graph, node_color="pink", node_size=1000, with_labels=True)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,481 | M-110/learning-python-complexity | refs/heads/master | /Agents/sugarscape.py | from typing import Tuple
from random import randint
from cell import Cell2D
import numpy as np
from nptyping import NDArray
import matplotlib.pyplot as plt
def distances_from(n: int, i: int, j: int) -> NDArray:
"""Returns an array of distances from location (i, j)."""
X, Y = np.indices((n, n))
return np.hypot(X - i, Y - j)
def make_locs(n, m) -> NDArray:
"""Makes an array for locations for an n x m grid."""
return np.array([[i, j] for i in range(n) for j in range(m)])
def find_visible_locs(vision: int) -> NDArray:
def make_array(d):
a = np.array([[-d, 0], [d, 0], [0, -d], [0, d]])
np.random.shuffle(a)
return a
arrays =[make_array(d) for d in range(1, vision + 1)]
return np.vstack(arrays)
class Agent:
"""Agent created as specific location with attributes randomized by the
paramaters."""
# Default parameters
max_vision=6
max_metabolism=4
min_lifespan=10_000
max_lifespan=10_000
min_sugar=5
max_sugar=25
def __init__(self, loc: Tuple[int, int], env, params: dict):
self.loc = tuple(loc)
self.env = env
self.age = 0
# Update default paramaters with any passed to init
for key, value in params.items():
setattr(self, key, value)
self.vision = randint(1, self.max_vision+1)
self.metabolism = randint(1, self.max_metabolism)
self.lifespan = randint(self.min_lifespan, self.max_lifespan)
self.sugar = randint(self.min_sugar, self.max_sugar)
def step(self, env):
self.loc = self.env.look_and_move(self.loc, self.vision)
self.sugar += self.env.harvest(self.loc) - self.metabolism
self.age += 1
@property
def is_starving(self) -> bool:
return self.sugar < 0
@property
def is_old(self) -> bool:
return self.age > self.lifespan
class Sugarscape(Cell2D):
def __init__(self, n, **params):
self.n = n
self.params = params
self.agent_count_seq = []
self.capacity = self.make_capacity()
self.grid = self.capacity.copy()
self.make_agents()
def make_capacity(self) -> NDArray:
dist1 = distances_from(self.n, 15, 15)
dist2 = distances_from(self.n, 35, 35)
dist = np.minimum(dist1, dist2)
bins = [21, 16, 11, 6]
return np.digitize(dist, bins)
def make_agents(self):
n, m = self.params.get('starting_box', self.grid.shape)
locs = make_locs(n, m)
np.random.shuffle(locs)
num_agents = self.params.get('num_agents', 400)
self.agents = [Agent(locs[i], self, self.params)
for i in range(num_agents)]
self.occupied = {agent.loc for agent in self.agents}
def grow(self):
grow_rate = self.params.get('grow_rate', 1)
self.grid = np.minimum(self.grid + grow_rate, self.capacity)
def look_and_move(self, center, vision) -> Tuple[int, int]:
"""Returns closest to center cell with the most sugar within vision
distance.
"""
locs = find_visible_locs(vision)
locs = (locs + center) % self.n
locs = [tuple(loc) for loc in locs]
empty_locs = [loc for loc in locs if loc not in self.occupied]
if len(empty_locs) == 0:
return center
t = [self.grid[loc] for loc in empty_locs]
closest_i = np.argmax(t)
return empty_locs[closest_i]
def harvest(self, loc: Tuple[int, int]):
"""Removes and returns sugar for loc."""
sugar = self.grid[loc]
self.grid[loc] = 0
return sugar
def step(self):
replace = self.params.get('replace', False)
random_order = np.random.permutation(self.agents)
for agent in random_order:
self.occupied.remove(agent.loc)
agent.step(self)
if agent.is_starving or agent.is_old:
self.agents.remove(agent)
if replace:
self.add_agent()
else:
self.occupied.add(agent.loc)
self.agent_count_seq.append(len(self.agents))
self.grow()
return len(self.agents)
def add_agent(self) -> Agent:
new_agent = Agent(self.random_loc(), self.params)
self.agents.append(new_agent)
self.occupied.add(new_agent.loc)
return new_agent
def random_loc(self) -> Tuple[int, int]:
while True:
loc = tuple(np.random.randint(self.n, size=2))
if loc not in self.occupied:
return loc
def draw(self):
fig = plt.figure()
x, y = self.get_coords()
self.points = plt.plot(x, y, '.', color='red')[0]
self.image = self.draw_grid(self.grid, origin='lower')
return fig
def get_coords(self) -> Tuple[int, int]:
rows, cols = np.transpose([agent.loc for agent in self.agents])
return cols + .5, rows + .5
env = Sugarscape(50, num_agents=400)
env.save_gif('babycat')
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,482 | M-110/learning-python-complexity | refs/heads/master | /complexity/graphs/ring_lattice.py | """Create and save a ring lattice graph with 10 nodes and 3 degrees."""
from typing import Tuple, List
import networkx as nx
import matplotlib.pyplot as plt
class RingLattice:
def __init__(self, n: int, k: int):
self.graph = self.create_ring_lattice(n, k)
def create_ring_lattice(self, n: int, k: int) -> nx.Graph:
"""Create a ring lattice with n nodes and k degrees."""
graph = nx.Graph()
nodes = tuple(range(n))
graph.add_nodes_from(nodes)
graph.add_edges_from(self.adjacent_pairs(nodes, k))
return graph
def adjacent_pairs(self, nodes: Tuple[int], k: int) -> List[Tuple[int, int]]:
"""Returns a list of pairs of nodes which are adjacent
by k degrees."""
n = len(nodes)
return [(u, nodes[j % n])
for i, u in enumerate(nodes)
for j in range(i + 1, i + 1 + k // 2)]
def print_graph(self, filename='', save=False):
"""Print and optionally save the graph."""
nx.draw_circular(self.graph, node_color='pink', node_size=1000, with_labels=True)
if save:
plt.savefig(filename)
print(f'Saved graph as {filename!r}')
else:
plt.show()
if __name__ == '__main__':
ring_lattice = RingLattice(10, 4)
ring_lattice.print_graph('ring_lattice.png', save=True)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,483 | M-110/learning-python-complexity | refs/heads/master | /complexity/game_of_life/game_of_life_steps.py | """A simple functional implementation of Conway's Game of Life."""
import numpy as np
from scipy.signal import correlate2d
def create_grid(x, y):
return np.random.randint(2, size=(x, y), dtype=np.uint8)
grid = create_grid(10, 10)
def step_verbose(grid):
"""One step through the grid without correlate2d function"""
# Create a new empty grid of the same shape as the input grid
new_grid = np.zeros_like(grid)
# Iterate through each cell in the grid.
for i in range(1, grid.shape[0] - 1):
for j in range(1, grid.shape[1] - 1):
# Get that cells current state.
current_state = grid[i, j]
# Get its 8 surrounding neighbors and itself.
neighbors = grid[i - 1:i + 2, j - 1:j + 2]
# Sum its neighbors and subtract itself.
k = np.sum(neighbors) - current_state
# If the cell is active.
if current_state:
# Remain active as long as it is surrounded by 2 or 3 other
# active cells.
if k == 2 or k == 3:
new_grid[i, j] = 1
# If the cell is inactive
else:
# Reactivate cell if there are exactly 3 neighbors who are
# currently active
if k == 3:
new_grid[i, j] = 1
def step(grid):
"""One step through the grid using correlate2d"""
kernel = np.array([1, 1, 1],
[1, 10, 1],
[1, 1, 1])
table = np.zeros(20, dtype=np.uint8)
table[[3, 12, 13]] = 1
# Get the cross correlation of the grid/pattern
correlation = correlate2d(grid, kernel, mode='same')
# Apply game of life rules
life = (correlation == 3) | (correlation == 12) | (correlation == 13)
# Convert to int
life = life.astype(np.uint8)
| {"/complexity/graphs/random_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/graphs/random_m_graph.py": ["/complexity/graphs/complete_graph.py"], "/complexity/small_world_graphs/dijkstra.py": ["/complexity/graphs/ring_lattice.py"]} |
76,504 | qingjiangshui/GotoX | refs/heads/master | /local/compat/__init__.py | # coding:utf-8
import os
import sys
from local.path import py_dir, packages
#这段代码负责添加依赖库路径,不要改变位置
# Windows 使用发布版本自带的 Python 不用重复添加
if os.path.dirname(sys.executable) != py_dir:
import glob
#放在最后,优先导入当前运行 Python 已安装模块
sys.path.append(packages)
sys.path.extend(glob.glob(os.path.join(packages, '*.egg')))
from local import clogging as logging
logging.replace_logging()
logging.addLevelName(15, 'TEST', logging.COLORS.GREEN)
try:
import gevent
import gevent.monkey
gevent.monkey.patch_all(os=False, signal=False, subprocess=False, Event=True)
except ImportError:
logging.warning('无法找到 gevent 或者与 Python 版本不匹配,请安装 gevent-1.0.0 以上版本,或将相应 .egg 放到 %r 文件夹!\n正在退出……', packages)
sys.exit(-1)
except TypeError:
gevent.monkey.patch_all(os=False)
logging.warning('警告:请更新 gevent 至 1.0.0 以上版本!')
try:
import OpenSSL
except ImportError:
logging.exception('无法找到 pyOpenSSL,请安装 pyOpenSSL-16.0.0 以上版本,或将相应 .egg 放到 %r 文件夹!\n正在退出……', packages)
sys.exit(-1)
try:
import dnslib
except ImportError:
logging.error(u'无法找到 dnslib,请安装 dnslib-0.8.3 以上版本,或将相应 .egg 放到 %r 文件夹!', packages)
sys.exit(-1)
_ver = sys.version_info
PY3 = _ver[0] == 3
#PY35 = PY3 and _ver[1] == 5
if not PY3:
logging.error('请使用 Python 3 系列版本运行本程序!\n正在退出……')
sys.exit(-1)
import queue as Queue
import _thread as thread
import http.server as BaseHTTPServer
import http.client as httplib
import urllib.request as urllib2
import urllib.parse as urlparse
import socketserver as SocketServer
from configparser import RawConfigParser, ConfigParser
#去掉 lower 以支持选项名称大小写共存
RawConfigParser.optionxform = lambda s, opt: opt
#默认编码
_read = ConfigParser.read
ConfigParser.read = lambda s, f, encoding='utf8': _read(s, f, encoding)
#重写了类方法 __getattr__ 时,修正 hasattr
hasattr = lambda o, a: getattr(o, a, None) != None
| {"/local/compat/__init__.py": ["/local/path.py"], "/local/GAEUpdate.py": ["/local/path.py", "/local/compat/__init__.py", "/local/GAEFinder.py"], "/local/GAEFinder.py": ["/local/path.py", "/local/common/__init__.py", "/local/compat/openssl.py"], "/local/common/__init__.py": ["/local/compat/__init__.py"]} |
76,505 | qingjiangshui/GotoX | refs/heads/master | /local/compat/openssl.py | # coding:utf-8
'''OpenSSL Connection Wrapper'''
import socket
import errno
from OpenSSL import SSL
from select import select
zero_errno = errno.ECONNABORTED, errno.ECONNRESET, errno.ENOTSOCK
zero_EOF_error = -1, 'Unexpected EOF'
class SSLConnection:
'''API-compatibility wrapper for Python OpenSSL's Connection-class.'''
def __init__(self, context, sock):
self._context = context
self._sock = sock
self._connection = SSL.Connection(context, sock)
self._io_refs = 0
def __del__(self):
if self._sock:
self._sock.close()
self._sock = None
def __getattr__(self, attr):
return getattr(self._connection, attr)
def __iowait(self, io_func, *args, **kwargs):
timeout = self._sock.gettimeout()
fd = self._sock
while self._connection:
try:
return io_func(*args, **kwargs)
except (SSL.WantReadError, SSL.WantX509LookupError):
#exc_clear()
rd, _, ed = select([fd], [], [fd], timeout)
if ed:
raise socket.error(ed)
if not rd:
raise socket.timeout('The read operation timed out')
except SSL.WantWriteError:
#exc_clear()
_, wd, ed = select([], [fd], [fd], timeout)
if ed:
raise socket.error(ed)
if not wd:
raise socket.timeout('The write operation timed out')
except SSL.SysCallError as e:
if e.args[0] == errno.EWOULDBLOCK:
#exc_clear()
rd, wd, ed = select([fd], [fd], [fd], timeout)
if ed:
raise socket.error(ed)
if not rd and not wd:
raise socket.timeout('The socket operation timed out')
elif e.args[0] == errno.EAGAIN:
continue
else:
raise e
def accept(self):
sock, addr = self._sock.accept()
client = SSLConnection(self._context, sock)
client.set_accept_state()
return client, addr
def do_handshake(self):
self.__iowait(self._connection.do_handshake)
def connect(self, addr):
self.__iowait(self._connection.connect, addr)
def send(self, data, flags=0):
if data:
return self.__iowait(self._connection.send, data)
else:
return 0
write = send
def sendall(self, data, flags=0):
total_sent = 0
total_to_send = len(data)
if not hasattr(data, 'tobytes'):
data = memoryview(data)
while total_sent < total_to_send:
sent = self.send(data[total_sent:total_sent + 32768]) # 32K
total_sent += sent
def recv(self, bufsiz, flags=None):
pending = self._connection.pending()
if pending:
return self._connection.recv(min(pending, bufsiz))
try:
return self.__iowait(self._connection.recv, bufsiz, flags)
except SSL.ZeroReturnError as e:
if self._connection.get_shutdown() == SSL.RECEIVED_SHUTDOWN:
return b''
raise e
except SSL.SysCallError as e:
if e.args == zero_EOF_error:
return b''
elif e.args[0] in zero_errno:
return b''
raise e
read = recv
def recv_into(self, buffer, nbytes=None, flags=None):
pending = self._connection.pending()
if pending:
return self._connection.recv_into(buffer)
try:
return self.__iowait(self._connection.recv_into, buffer, nbytes, flags)
except SSL.ZeroReturnError as e:
if self._connection.get_shutdown() == SSL.RECEIVED_SHUTDOWN:
return 0
raise e
except SSL.SysCallError as e:
if e.args == zero_EOF_error:
return 0
elif e.args[0] in zero_errno:
return 0
raise e
readinto = recv_into
def close(self):
if self._io_refs < 1:
self._connection = None
if self._sock:
self._sock.close()
self._sock = None
else:
self._io_refs -= 1
#if PY3:
# def makefile(self, *args, **kwargs):
# return socket.socket.makefile(self, *args, **kwargs)
#else:
# def makefile(self, mode='r', bufsize=-1):
# self._io_refs += 1
# return socket._fileobject(self, mode, bufsize, close=True)
def makefile(self, *args, **kwargs):
return socket.socket.makefile(self, *args, **kwargs)
| {"/local/compat/__init__.py": ["/local/path.py"], "/local/GAEUpdate.py": ["/local/path.py", "/local/compat/__init__.py", "/local/GAEFinder.py"], "/local/GAEFinder.py": ["/local/path.py", "/local/common/__init__.py", "/local/compat/openssl.py"], "/local/common/__init__.py": ["/local/compat/__init__.py"]} |
76,506 | qingjiangshui/GotoX | refs/heads/master | /local/GAEUpdate.py | # coding:utf-8
'''Auto check and update GAE IP'''
import threading
import logging
from time import time, sleep, strftime
from .path import config_dir
from .compat import thread, Queue
from .GlobalConfig import GC
from .ProxyServer import network_test
from .HTTPUtil import http_gws
from .GAEFinder import (
g as finder,
g_comdomain as comdomain,
timeToDelay,
writebytes,
gae_finder,
getgaeip,
savestatistics,
#savebadlist
)
lLock = threading.Lock()
tLock = threading.Lock()
sLock = threading.Lock()
class testip:
running = False
queobj = Queue.Queue()
lastactive = lastupdate = time()
lasttest = lastupdate - 30
#预连接 GAE 和普通服务
cachekey = 'google_fe:443', 'google_gws|:443'
ncachekey = 0
def getcachekey():
with sLock:
testip.ncachekey += 1
if testip.ncachekey >= len(testip.cachekey):
testip.ncachekey = 0
return testip.cachekey[testip.ncachekey]
def removeip(ip):
with lLock:
for name in GC.IPLIST_MAP:
if name.startswith('google_'):
try:
GC.IPLIST_MAP[name].remove(ip)
except:
pass
def _refreship(gaeip):
with lLock:
for name in gaeip:
if name is 'google_com':
continue
GC.IPLIST_MAP[name][:] = gaeip[name] + GC.IPLIST_MAP[name]
testip.lastupdate = time()
ipuse_h = ('#coding: utf-8\n'
'#此文件由 GotoX 自动维护,请不要修改。\n'
'[iplist]\n').encode()
def refreship(needgws=None, needcom=None):
threading.current_thread().setName('Find GAE')
#检测当前 IP 并搜索新的 IP
network_test()
if needgws is None:
needgws = countneedgws()
if needcom is None:
needcom = countneedcom()
gaeip = getgaeip(GC.IPLIST_MAP['google_gws'], needgws, needcom)
#更新 IP
if gaeip and gaeip['google_gws']:
_refreship(gaeip)
#更新 ip.use
with open(GC.CONFIG_IPDB, 'wb') as f:
write = writebytes(f.write)
f.write(ipuse_h)
for name in gaeip:
write(name)
f.write(b' = ')
write('|'.join(GC.IPLIST_MAP[name]))
f.write(b'\n')
logging.test('GAE IP 更新完毕')
if len(GC.IPLIST_MAP['google_gws']) < GC.FINDER_MINIPCNT:
logging.warning('没有检测到足够数量符合要求的 GAE IP,请重新设定参数!')
#更新完毕
updateip.running = False
def updateip(needgws=None, needcom=None):
with tLock:
if updateip.running: #是否更新
return
updateip.running = True
thread.start_new_thread(refreship, (needgws, needcom))
updateip.running = False
timeoutb = max(GC.FINDER_MAXTIMEOUT*1.3, 400)
def gettimeout():
timeout = timeoutb + min(len(GC.IPLIST_MAP['google_gws']), 20)*10 + timeToDelay[strftime('%H')]
return int(timeout)
def countneedgws():
return max(GC.FINDER_MINIPCNT - len(GC.IPLIST_MAP['google_gws']), 0)
ipcomcnt = max(min(GC.FINDER_MINIPCNT//3, 5), 1)
def countneedcom():
return max(ipcomcnt - len(GC.IPLIST_MAP['google_com']), 0)
def testipuseable(ip):
_, _, isgaeserver = gae_finder.getipinfo(ip)
if not isgaeserver:
removeip(ip)
addtoblocklist(ip)
logging.warning('IP:%r 暂时不可用,已经删除', ip)
return isgaeserver
if GC.GAE_TESTGWSIPLIST:
def addtoblocklist(ip):
timesdel = finder.baddict[ip][2] if ip in finder.baddict else 0
finder.baddict[ip] = GC.FINDER_TIMESBLOCK+1, int(time()), timesdel+1
for ipdict in finder.statistics:
if ip in ipdict:
ipdict[ip] = -1, 0
#finder.reloadlist = True
def testallgaeip(force=False):
with tLock:
if updateip.running:
return
elif force:
if testip.running == 9:
return
while testip.running == 1:
sleep(0.1)
elif testip.running:
return
testip.running = 9
thread.start_new_thread(_testallgaeip, ())
return True
else:
def dummy(*args, **kwargs):
pass
addtoblocklist = testallgaeip = dummy
def _testallgaeip():
iplist = GC.IPLIST_MAP['google_gws']
if not iplist:
testip.running = False
return updateip()
badip = set()
timeout = gettimeout()
timeoutl = timeout + 1000
logging.test('连接测试开始,超时:%d 毫秒', timeout)
network_test()
testip.queobj.queue.clear()
for ip in iplist:
if ip in GC.IPLIST_MAP['google_com']:
_timeout = timeoutl
else:
_timeout = timeout
thread.start_new_thread(http_gws._create_ssl_connection, ((ip, 443), getcachekey(), None, testip.queobj, _timeout/1000))
for _ in iplist:
result = testip.queobj.get()
if isinstance(result, Exception):
ip = result.xip[0]
logging.warning('测试失败 %s:%s' % ('.'.join(x.rjust(3) for x in ip.split('.')), result.args[0]))
badip.add(ip)
else:
logging.test('测试连接 %s: %d' %('.'.join(x.rjust(3) for x in result[0].split('.')), int(result[1]*1000)))
#删除 bad IP
nbadip = len(badip)
if nbadip > 0:
for ip in badip:
removeip(ip)
logging.test('连接测试完毕%s', ',Bad IP 已删除' if nbadip > 0 else '')
testip.lastactive = testip.lasttest = time()
testip.running = False
#刷新开始
needgws = countneedgws()
needcom = countneedcom()
if needgws > 0 or needcom > 0:
updateip(needgws, needcom)
def testonegaeip():
with tLock:
if updateip.running or testip.running:
return
testip.running = 1
iplist = GC.IPLIST_MAP['google_gws']
if not iplist:
testip.running = False
return updateip()
ip = iplist[-1]
timeout = gettimeout()
if ip in GC.IPLIST_MAP['google_com'] and len(GC.IPLIST_MAP['google_com']) < len(iplist):
iplist.insert(0, iplist.pop())
testip.running = False
return
badip = False
statistics = finder.statistics
network_test()
testip.queobj.queue.clear()
http_gws._create_ssl_connection((ip, 443), getcachekey(), None, testip.queobj, timeout/1000)
result = testip.queobj.get()
if isinstance(result, Exception):
logging.warning('测试失败(超时:%d 毫秒)%s:%s,Bad IP 已删除' % (timeout, '.'.join(x.rjust(3) for x in ip.split('.')), result.args[0]))
removeip(ip)
badip = True
ipdict, ipdicttoday = statistics
if ip in ipdict:
good, bad = ipdict[ip]
#失败次数超出预期,设置 -1 表示删除
s = bad/max(good, 1)
if s > 2 or (s > 0.4 and bad > 10):
ipdict[ip] = ipdicttoday[ip] = -1, 0
else:
ipdict[ip] = good, bad + 1
if ip in ipdicttoday:
good, bad = ipdicttoday[ip]
else:
good = bad = 0
ipdicttoday[ip] = good, bad + 1
#加入统计
else:
ipdict[ip] = ipdicttoday[ip] = 0, 1
else:
logging.test('测试连接(超时:%d 毫秒)%s: %d' %(timeout, '.'.join(x.rjust(3) for x in result[0].split('.')), int(result[1]*1000)))
iplist.insert(0, iplist.pop())
#调高 com 权重
addn = 2 if ip in GC.IPLIST_MAP['google_com'] else 1
baddict = finder.baddict
for ipdict in statistics:
if ip in ipdict:
good, bad = ipdict[ip]
good += addn
ipdict[ip] = good, bad
#当天通过测试次数达到条件后重置容忍次数
if ipdict is statistics[1] and ip in baddict:
s = bad/max(good, 1)
if s < 0.1:
del baddict[ip]
#加入统计
else:
ipdict[ip] = addn, 0
savestatistics()
testip.lasttest = time()
testip.running = False
#刷新开始
needgws = countneedgws()
needcom = countneedcom()
if needgws > 0 or needcom > 0:
updateip(needgws, needcom)
elif badip:
testonegaeip()
def testipserver():
#启动时全部测一遍
iplist = GC.IPLIST_MAP['google_gws']
testallgaeip()
looptime = max(90, GC.GAE_KEEPTIME) + min(10, GC.FINDER_MINIPCNT) * 20
while True:
now = time()
lasttest = now - testip.lasttest
try:
if ((now - testip.lastactive > 6 or # X 秒钟未使用
lasttest > 30) and #强制 X 秒钟检测
#and not GC.PROXY_ENABLE #无代理
lasttest > looptime/(len(iplist) or 1)): #强制 x 秒间隔
testonegaeip()
except Exception as e:
logging.exception(' IP 测试守护线程错误:%r', e)
sleep(1)
def checkgooglecom():
def _checkgooglecom(lastcheck=None):
nonlocal ssldomain, costtime, isgaeserver
if isgaeserver and ssldomain == comdomain:
ssldomain = '*.google.com'
with lLock:
if ip not in google_com:
google_com.append(ip)
else:
with lLock:
try:
google_com.remove(ip)
except:
pass
log = logging.warning if lastcheck and not isgaeserver else logging.test
log('固定 GAE IP 列表检测,IP:%s,可用证书:%s,耗时:%d 毫秒,支持 GAE:%s',
ip, ssldomain, costtime, isgaeserver)
google_com = GC.IPLIST_MAP['google_com']
retrylist = []
retrytimes = 2
for ip in GC.IPLIST_MAP[GC.GAE_IPLIST]:
ssldomain, costtime, isgaeserver = gae_finder.getipinfo(ip)
_checkgooglecom()
if not isgaeserver and (ssldomain is None or 'google' in ssldomain):
retrylist.append(ip)
for i in range(retrytimes):
sleep(5)
for ip in retrylist.copy():
ssldomain, costtime, isgaeserver = gae_finder.getipinfo(ip, 3, 3, 4)
if i == retrytimes - 1:
_checkgooglecom(True)
else:
if isgaeserver:
retrylist.remove(ip)
_checkgooglecom()
countcom = len(google_com)
if countcom < 3:
logging.error('检测出固定 GAE IP 列表 [%s] 中包含的可用 GWS IP 数量过少:%d 个,请增加。', GC.GAE_IPLIST, countcom)
| {"/local/compat/__init__.py": ["/local/path.py"], "/local/GAEUpdate.py": ["/local/path.py", "/local/compat/__init__.py", "/local/GAEFinder.py"], "/local/GAEFinder.py": ["/local/path.py", "/local/common/__init__.py", "/local/compat/openssl.py"], "/local/common/__init__.py": ["/local/compat/__init__.py"]} |
76,507 | qingjiangshui/GotoX | refs/heads/master | /local/path.py | # coding:utf-8
import os
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
app_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cert_dir = os.path.join(app_root, 'cert')
config_dir = os.path.join(app_root, 'config')
data_dir = os.path.join(app_root, 'data')
launcher_dir = os.path.join(app_root, 'launcher')
py_dir = os.path.join(app_root, 'python')
web_dir = os.path.join(app_root, 'web')
icon_gotox = os.path.join(app_root, 'gotox.ico')
packages = os.path.join(py_dir, 'site-packages')
| {"/local/compat/__init__.py": ["/local/path.py"], "/local/GAEUpdate.py": ["/local/path.py", "/local/compat/__init__.py", "/local/GAEFinder.py"], "/local/GAEFinder.py": ["/local/path.py", "/local/common/__init__.py", "/local/compat/openssl.py"], "/local/common/__init__.py": ["/local/compat/__init__.py"]} |
76,508 | qingjiangshui/GotoX | refs/heads/master | /local/clogging.py | # coding:utf-8
'''
A simple colorful logging class for console or terminal output.
'''
import sys, os, threading, time, traceback
__all__ = ['CRITICAL', 'DEBUG', 'ERROR', 'FATAL', 'INFO', 'NOTSET', 'WARN',
'WARNING', 'COLORS', 'addLevelName', 'basicConfig', 'getLogger',
'setLevel', 'disable', 'critical', 'debug', 'error', 'exception',
'fatal', 'info', 'log', 'warn', 'warning']
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelToName = {
CRITICAL: 'CRITICAL',
ERROR: 'ERROR',
WARNING: 'WARNING',
INFO: 'INFO',
DEBUG: 'DEBUG',
NOTSET: 'NOTSET',
}
_nameToLevel = {
'CRITICAL' : CRITICAL,
'FATAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
class _colors(object):
if os.name == 'nt':
RESET = 0x07
BLACK = 0x00
RED = 0x0c
GREEN = 0x02
YELLOW = 0x06
BLUE = 0x01
MAGENTA = 0x05
CYAN = 0x03
SILVER = 0x07
GRAY = 0x08
WHITE = 0x0f
elif os.name == 'posix':
RESET = '\033[0m'
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
SILVER = '\033[37m'
GRAY = '\033[1;30m'
WHITE = '\033[1;37m'
CRITICAL = RED
ERROR = RED
WARNING = YELLOW
INFO = SILVER
DEBUG = GREEN
HEAD = CYAN
DEFAULT = RESET
def __getattr__(self, key):
try:
return self.__getattribute__(key)
except:
return self.DEFAULT
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
COLORS = _colors()
_lock = threading.RLock()
_addedLevelNames = {}
_handlerList = []
def addLevelName(level, levelName, color=None, force=False):
with _lock:
levelName = levelName.upper()
if not force and level in _levelToName or levelName in _nameToLevel:
return
_levelToName[level] = levelName
_nameToLevel[levelName] = level
if color:
COLORS[levelName] = color
g = globals()
g[levelName] = level
def wrapper(logger):
def wrap(fmt, *args, **kwargs):
logger.log(level, fmt, *args, **kwargs)
return wrap
levelName = levelName.lower()
_addedLevelNames[levelName] = wrapper
g[levelName] = root.__getattr__(levelName)
def _checkLevel(level):
with _lock:
if isinstance(level, (int, float)):
rv = level
elif str(level) == level:
if level not in _nameToLevel:
raise ValueError("Unknown level: %r" % level)
rv = _nameToLevel[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
def _write(msg, file=None, color=None, reset=None):
if file is None:
file = sys.stderr or sys.stdout
if file is None:
return
try:
colors = color and file in (sys.stderr, sys.stdout)
if colors:
_setColor(color)
file.write(msg)
if hasattr(file, 'flush'):
file.flush()
if colors and reset:
_setColor('RESET')
except OSError:
pass
if os.name == 'nt' and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
import ctypes
_SetCTA = ctypes.windll.kernel32.SetConsoleTextAttribute
_StdHandle = ctypes.windll.kernel32.GetStdHandle(-12)
_setColor = lambda color: _SetCTA(_StdHandle, COLORS[color])
elif os.name == 'posix':
_setColor = lambda color: _write(COLORS[color])
else:
_setColor = lambda x: None
class Logger(object):
loggerDict = {}
_disable = 0
logName = True
stream = None
def __new__(cls, name, level=NOTSET):
with _lock:
if name in cls.loggerDict:
return cls.loggerDict[name]
else:
self = super(Logger, cls).__new__(cls)
cls.loggerDict[name] = self
return self
def __init__(self, name, level=NOTSET):
self.name = name
self.level = _checkLevel(level)
def __getattr__(self, attr):
try:
return self.__getattribute__(attr)
except Exception as e:
try:
log = _addedLevelNames[attr](self)
self.__setattr__(attr, log)
return log
except:
raise e
@staticmethod
def getLogger(name=None):
if name:
return Logger(name)
else:
return root
def setLevel(self, level):
self.level = _checkLevel(level)
def disable(self, level):
self._disable = _checkLevel(level)
def isEnabledFor(self, level):
if self.__class__._disable >= level or self._disable >= level:
return False
return level >= self.level
def log(self, level, fmt, *args, exc_info=None, **kwargs):
with _lock:
if self.isEnabledFor(level):
levelName = _levelToName[level]
head = '%s %s ' % (time.strftime('%H:%M:%S'), levelName[0])
if self.logName:
head = '%s%s ' % (head, self.name)
_write(head, color='HEAD')
_write('%s\n' % (fmt % args), color=levelName, reset=True)
if exc_info:
if isinstance(exc_info, BaseException):
exc_info = (type(exc_info), exc_info, exc_info.__traceback__)
elif not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
_write(''.join(traceback.format_exception(*exc_info)))
_write('\n')
def debug(self, fmt, *args, **kwargs):
self.log(DEBUG, fmt, *args, **kwargs)
def info(self, fmt, *args, **kwargs):
self.log(INFO, fmt, *args, **kwargs)
def warning(self, fmt, *args, **kwargs):
self.log(WARNING, fmt, *args, **kwargs)
warn = warning
def error(self, fmt, *args, **kwargs):
self.log(ERROR, fmt, *args, **kwargs)
def exception(self, fmt, *args, exc_info=True, **kwargs):
self.error(fmt, *args, exc_info=exc_info, **kwargs)
def critical(self, fmt, *args, **kwargs):
self.log(CRITICAL, fmt, *args, **kwargs)
fatal = critical
def basicConfig(*args, **kwargs):
warning('Unable to format, the only format is "%%H:%%M:%%S" + level code '
'+ logger name in head.')
warning('Use setLevel(level) to set output level.')
root.level = _checkLevel(kwargs.get('level', INFO))
getLogger = Logger.getLogger
root = Logger('root', WARNING)
root.logName = False
Logger.root = root
setLevel = root.setLevel
disable = root.disable
log = root.log
debug = root.debug
info = root.info
warning = warn = root.warning
error = root.error
exception = root.exception
critical = fatal = root.critical
def replace_logging():
sys.modules['logging'] = sys.modules[__name__]
| {"/local/compat/__init__.py": ["/local/path.py"], "/local/GAEUpdate.py": ["/local/path.py", "/local/compat/__init__.py", "/local/GAEFinder.py"], "/local/GAEFinder.py": ["/local/path.py", "/local/common/__init__.py", "/local/compat/openssl.py"], "/local/common/__init__.py": ["/local/compat/__init__.py"]} |
76,509 | qingjiangshui/GotoX | refs/heads/master | /local/GAEFinder.py | # coding:utf-8
'''
根据 checkgoogleip 代码重新编写整合到 GotoX
从一个较大的可用 GAE IP 列表中快速筛选优质 IP
'''
import os
import sys
import threading
import socket
import ssl
import select
import random
import OpenSSL
import logging
from shutil import copyfile
from time import time, localtime, strftime
from .path import cert_dir, data_dir
from .common import NetWorkIOError, isip, isipv4, isipv6
from .common.internet_active import internet_v4, internet_v6
from .compat.openssl import zero_EOF_error
from .ProxyServer import network_test
from .GlobalConfig import GC
exists = os.path.exists
#全局只读写数据
#最大 IP 延时,单位:毫秒
g_maxhandletimeout = GC.FINDER_MAXTIMEOUT
#扫描得到的可用 IP 数量
g_maxgaeipcnt = GC.FINDER_MINIPCNT
#最大扫描 IP 的线程数量
g_maxthreads = GC.FINDER_MAXTHREADS
#容忍 badip 的次数
g_timesblock = GC.FINDER_TIMESBLOCK
#累计容忍 badip 的次数
g_timesdel = GC.FINDER_TIMESDEL
#屏蔽 badip 的时限,单位:小时
g_blocktime = GC.FINDER_BLOCKTIME * 3600
#连接超时设置,单位:秒
g_timeout = 4
g_conntimeout = 1
g_handshaketimeout = 1.5
#屏蔽列表(当前使用的新测试方法可能用不着这个了)
g_block = GC.FINDER_BLOCK #('74.125.', '173.194.', '203.208.', '113.171.')
#扫描时使用的主机名和匹配的域名,需配对
g_servername = GC.FINDER_SERVERNAME
g_comdomain = GC.FINDER_COMDOMAIN
g_ipfile = os.path.join(data_dir, 'ip.txt')
g_ipfilebak = os.path.join(data_dir, 'ipbak.txt')
g_ipexfile = os.path.join(data_dir, 'ipex.txt')
g_ipexfilebak = os.path.join(data_dir, 'ipexbak.txt')
g_badfile = os.path.join(data_dir, 'ip_bad.txt')
g_badfilebak = os.path.join(data_dir, 'ip_badbak.txt')
g_delfile = os.path.join(data_dir, 'ip_del.txt')
g_delfilebak = os.path.join(data_dir, 'ip_delbak.txt')
g_statisticsfilebak = os.path.join(data_dir, 'statisticsbak')
#加各时段 IP 延时,单位:毫秒
timeToDelay = {
'01': 0, '09': 50, '17': 100,
'02': 0, '10': 50, '18': 100,
'03': 0, '11': 50, '19': 150,
'04': 0, '12': 100, '20': 150,
'05': 0, '13': 100, '21': 150,
'06': 0, '14': 100, '22': 50,
'07': 0, '15': 50, '23': 50,
'08': 0, '16': 50, '00': 0
}
gLock = threading.Lock()
def PRINT(fmt, *args, **kwargs):
#logging.info(strlog)
logging.test('[%s] %s' % (threading.current_thread().name, fmt), *args, **kwargs)
def WARNING(fmt, *args, **kwargs):
logging.debug('[%s] %s' % (threading.current_thread().name, fmt), *args, **kwargs)
def writebytes(write):
def newwrite(str):
write(str.encode())
return newwrite
if GC.LINK_PROFILE == 'ipv4':
ipnotuse = lambda x: not isipv4(x)
elif GC.LINK_PROFILE == 'ipv6':
ipnotuse = lambda x: not isipv6(x)
elif GC.LINK_PROFILE == 'ipv46':
ipnotuse = lambda x: False
def readstatistics():
def getnames():
now = time()
names = []
#生成统计文件名
for i in range(GC.FINDER_STATDAYS):
n = strftime('%y%j', localtime(now-3600*24*i))
name = os.path.join(data_dir, 'statistics'+n)
names.append(name)
#新建当日统计文件
#with open(names[0], 'ab'): pass
#删除过期的统计文件
sfiles = [g_statisticsfilebak,]
sfiles.extend(names)
for file in os.listdir(data_dir):
if file.startswith('statistics'):
isdel = True
for sfile in sfiles:
if sfile.endswith(file):
isdel = False
break
if isdel:
os.remove(os.path.join(data_dir, file))
return tuple(names)
ipdict = {}
ipdicttoday = None
deledipset = set()
delset = g.delset
g.statisticsfiles = statisticsfiles = getnames()
for file in statisticsfiles:
if exists(file):
with open(file, 'r') as fd:
for line in fd:
try:
ip, good, bad = (x.strip() for x in line.split('*'))
except:
pass
else:
if ip in delset or ip.startswith(g_block):
continue
good = int(good)
bad = int(bad)
# 小于 0 表示已删除
if good < 0:
deledipset.add(ip)
if ip in ipdict:
if ip in deledipset:
continue
cgood, cbad = ipdict[ip]
good += cgood
bad += cbad
ipdict[ip] = good, bad
#复制当日统计数据
if ipdicttoday is None:
ipdicttoday = ipdict.copy()
return ipdict, ipdicttoday
def savestatistics(statistics=None):
statisticsfile = g.statisticsfiles[0]
backupfile(statisticsfile, g_statisticsfilebak)
statistics = statistics or g.statistics[1]
statistics = [(ip, stats1, stats2) for ip, (stats1, stats2) in statistics.items()]
statistics.sort(key=lambda x: -(x[1]+0.01)/(x[2]**2+0.1))
with open(statisticsfile, 'wb') as f:
write = writebytes(f.write)
for ip, good, bad in statistics:
write(str(ip).rjust(15))
f.write(b' * ')
write(str(good).rjust(3))
f.write(b' * ')
write(str(bad).rjust(3))
f.write(b'\n')
#读取 checkgoogleip 输出 ip.txt
def readiplist(otherset):
#g.reloadlist = False
now = time()
otherset.update(g.goodlist)
baddict = g.baddict
delset = g.delset
ipexset = g.ipexset
ipset = g.ipset
source_ipexset = g.source_ipexset
source_ipset = g.source_ipset
weakset = set()
deledset = set()
#判断是否屏蔽
for ip, (timesblock, blocktime, timesdel) in baddict.copy().items():
if timesblock is 0:
continue
if timesdel > g_timesdel:
del baddict[ip]
deledset.add(ip)
continue
if now - blocktime > g_blocktime:
baddict[ip] = 0, 0, timesdel
continue
if timesblock > g_timesblock:
otherset.add(ip)
continue
if not ip.startswith(g_block):
weakset.add(ip)
#读取待捡 IP
if not ipexset and exists(g_ipexfile):
with open(g_ipexfile, 'r') as fd:
for line in fd:
ip = line.strip()
source_ipexset.add(ip)
if not line.startswith(g_block):
ipexset.add(ip)
if not ipset and exists(g_ipfile):
source_ipcnt = 0
with open(g_ipfile, 'r') as fd:
for line in fd:
source_ipcnt += 1
ip = line.strip()
source_ipset.add(ip)
if not line.startswith(g_block):
ipset.add(ip)
#检测重复 IP
hasdupl = source_ipcnt - len(source_ipset)
if hasdupl:
PRINT('从主列表发现重复 IP,数量:%d。', hasdupl)
else:
hasdupl = False
#移除永久屏蔽 IP
if deledset:
ipset -= deledset
source_ipset -= deledset
#检测并添加新 IP
addset = source_ipexset - source_ipset
if addset:
ipset |= ipexset #合并所有符合条件的 IP
source_ipset |= addset
PRINT('检测到新添加 IP,数量:%d。', len(addset))
backupfile(g_ipfile, g_ipfilebak)
#从永久屏蔽 IP 中移除新添加 IP
adddeledset = deledset & addset
if adddeledset:
deledset -= adddeledset
#检测并移除永久屏蔽 IP,保存扩展列表
deledexset = source_ipexset & deledset
if deledexset:
ipexset -= deledexset
source_ipexset -= deledexset
saveipexlist()
#添加和撤销永久屏蔽 IP,保存永久屏蔽列表
issavedellist = False
restoreset = delset & source_ipset
if restoreset:
delset -= restoreset
issavedellist = True
PRINT('检测到被撤销永久屏蔽的 IP,数量:%d。', len(restoreset))
if deledset:
delset |= deledset
issavedellist = True
logging.warning('检测到新的永久屏蔽 IP,数量:%d。', len(deledset))
if issavedellist:
savedellist()
logging.warning('已保存永久屏蔽列表文件,数量:%d。', len(delset))
#保存主列表
source_ipsetlen = len(source_ipset)
if source_ipsetlen < g_maxgaeipcnt:
logging.warning('IP 列表文件 "%s" 包含 IP 过少,请添加。', g_ipfile)
if hasdupl or deledset or addset:
saveiplist()
PRINT('已保存主列表文件,数量:%d。', source_ipsetlen)
#排除自动屏蔽列表、正在使用的列表、good 列表
#用等号赋值不会改变原集合内容,之前都是用非等号赋值的
ipexset = ipexset - otherset
ipset = ipset - otherset
ipset -= ipexset
#排除非当前配置的遗留 IP
weakset = weakset - otherset
weakset &= ipset
ipset -= weakset
g.halfweak = len(weakset)/2
g.readtime = now
return list(ipexset), list(ipset), list(weakset)
def saveipexlist(ipexset=None):
ipexset = ipexset or g.source_ipexset
savelist(ipexset, g_ipexfile)
#保持修改时间不变(自动删除判断依据)
os.utime(g_ipexfile, (g.ipexmtime, g.ipexmtime))
def saveiplist(ipset=None):
ipset = ipset or g.source_ipset
savelist(ipset, g_ipfile)
g.ipmtime = os.path.getmtime(g_ipfile)
def savelist(set, file):
with open(file, 'wb') as f:
write = writebytes(f.write)
for ip in set:
write(ip)
f.write(b'\n')
def readbadlist():
ipdict = {}
if exists(g_badfile):
with open(g_badfile, 'r') as fd:
for line in fd:
#兼容之前的格式,下个版本会去掉
entry = line.strip().split('*')
entrylen = len(entry)
if entrylen is 4:
ip, timesblock, blocktime, timesdel = entry
if entrylen is 3:
ip, timesblock, blocktime = entry
timesdel = timesblock
if entrylen > 2:
ipdict[ip] = int(timesblock), int(blocktime), int(timesdel)
return ipdict
def savebadlist(baddict=None):
baddict = baddict or g.baddict
backupfile(g_badfile, g_badfilebak)
with open(g_badfile, 'wb') as f:
write = writebytes(f.write)
for ip in baddict:
timesblock, blocktime, timesdel = baddict[ip]
write(ip)
f.write(b'*')
write(str(timesblock))
f.write(b'*')
write(str(blocktime))
f.write(b'*')
write(str(timesdel))
f.write(b'\n')
def readdellist():
ipset = set()
if exists(g_delfile):
with open(g_delfile, 'r') as fd:
for line in fd:
ipset.add(line.strip())
return ipset
def savedellist(delset=None):
delset = delset or g.delset
backupfile(g_delfile, g_delfilebak)
savelist(delset, g_delfile)
def backupfile(file, bakfile):
if exists(file):
if exists(bakfile):
os.remove(bakfile)
os.rename(file, bakfile)
def clearzerofile(file):
if exists(file) and os.path.getsize(file) == 0:
os.remove(file)
def makegoodlist(nowgaeset=()):
#日期变更、重新加载统计文件
if not g.statisticsfiles[0].endswith(strftime('%y%j')):
savestatistics()
g.statistics = readstatistics()
# goodlist 根据统计来排序已经足够,不依据 baddict 来排除 IP
#不然干扰严重时可能过多丢弃可用 IP
# baddict 只用来排除没有进入统计的IP 以减少尝试次数
statistics = g.statistics[0]
statistics = [(ip, stats1, stats2) for ip, (stats1, stats2) in statistics.items() if ip not in nowgaeset and stats1 >= 0]
#根据统计数据排序(bad 降序、good 升序)供 pop 使用
statistics.sort(key=lambda x: (x[1]+0.01)/(x[2]**2+0.1))
g.goodlist = [ip[0] for ip in statistics]
#全局可读写数据
class g:
ipexset = set()
ipset = set()
source_ipexset = set()
source_ipset = set()
running = False
#reloadlist = False
ipmtime = 0
ipexmtime = 0
baddict = readbadlist()
delset = readdellist()
ipexlist = []
iplist = []
weaklist = []
clearzerofile(g_ipfile)
clearzerofile(g_ipfilebak)
clearzerofile(g_badfile)
clearzerofile(g_badfilebak)
#启动时备份主列表
if exists(g_ipfile):
if not exists(g_ipfilebak):
copyfile(g_ipfile, g_ipfilebak)
#只有启动时,才从备份恢复主列表
elif exists(g_ipfilebak):
copyfile(g_ipfilebak, g_ipfile)
else:
logging.error('未发现 IP 列表文件 "%s",请创建!', g_ipfile)
#只有启动时,才从备份恢复 bad 列表
if not exists(g_badfile) and exists(g_badfilebak):
os.rename(g_badfilebak, g_badfile)
g.statistics = readstatistics()
makegoodlist()
from .HTTPUtil import http_gws
class GAE_Finder:
httpreq = (
b'HEAD / HTTP/1.1\r\n'
b'Host: www.appspot.com\r\n'
b'Connection: Close\r\n\r\n'
)
redirect_res = (
b'302 Found\r\n'
b'Location: https://console.cloud.google.com/appengine'
)
def __init__(self):
pass
def getipinfo(self, ip, conntimeout=g_conntimeout, handshaketimeout=g_handshaketimeout, timeout=g_timeout, retry=None):
if ipnotuse(ip):
return None, 0, False
start_time = time()
costtime = 0
domain = None
sock = None
ssl_sock = None
try:
sock = socket.socket(socket.AF_INET if ':' not in ip else socket.AF_INET6)
http_gws.set_tcp_socket(sock, set_buffer=False)
ssl_sock = http_gws.get_ssl_socket(sock, g_servername)
ssl_sock.settimeout(conntimeout)
ssl_sock.connect((ip, 443))
ssl_sock.settimeout(handshaketimeout)
ssl_sock.do_handshake()
ssl_sock.settimeout(timeout)
handshaked_time = time() - start_time
if handshaked_time > handshaketimeout:
raise socket.error('handshake cost %dms timed out' % int(handshaked_time*1000))
cert = http_gws.google_verify(ssl_sock)
domain = cert.get_subject().CN
if not domain:
raise ssl.SSLError('%s 无法获取 commonName:%s ' % (ip, cert))
except NetWorkIOError as e:
sock.close()
ssl_sock = None
if not retry and e.args == zero_EOF_error:
return self.getipinfo(ip, conntimeout, handshaketimeout, timeout, True)
WARNING('getipinfo %r', e)
is_gae = self.check_gae_status(ssl_sock, sock, ip) if ssl_sock else False
costtime = int((time()-start_time)*1000)
return domain, costtime, is_gae
def check_gae_status(self, conn, sock, ip):
try:
conn.send(self.httpreq)
return conn.read(72)[-63:] == self.redirect_res
except NetWorkIOError as e:
WARNING('从 %s 获取服务器信息时发生错误:%r', ip, e)
finally:
sock.close()
gae_finder = GAE_Finder()
def runfinder(ip):
with gLock:
#检查网络
if g.testedcnt >= 50:
network_test()
g.testedcnt = 0
ssldomain, costtime, isgaeserver = gae_finder.getipinfo(ip)
statistics = g.statistics
baddict = g.baddict
with gLock:
g.testedcnt += 1
g.pingcnt -= 1
remain = len(g.goodlist) + len(g.ipexlist) + len(g.iplist) + len(g.weaklist) + g.pingcnt
#判断是否可用
if isgaeserver:
with gLock:
g.testedok += 1
if ip in baddict: #重置未到容忍次数的 badip
_, _, timesdel = baddict[ip]
baddict[ip] = 0, 0, timesdel
com = ssldomain == g_comdomain
if com:
ssldomain = '*.google.com'
return
PRINT('剩余:%s,%s,%sms,%s', str(remain).rjust(4), ip.rjust(15),
str(costtime).rjust(4), ssldomain)
#判断是否够快
if costtime < g.maxhandletimeout:
g.gaelist.append((ip, costtime, com))
with gLock:
#计数
g.needgwscnt -= 1
if com:
g.needcomcnt -= 1
else:
#备用
g.gaelistbak.append((ip, costtime, com))
for ipdict in statistics:
if ip in ipdict:
good, bad = ipdict[ip]
ipdict[ip] = max(good - 1, 0), bad
else:
timesdel = 0
if ip in baddict: # badip 容忍次数 +1
timesblock, _, timesdel = baddict[ip]
baddict[ip] = timesblock + 1, int(time()), timesdel + 1
else: #记录检测到 badip 的时间
baddict[ip] = 1, int(time()), 1
badb = baddict[ip][0]
ipdict, ipdicttoday = statistics
if ip in ipdict:
#累计容忍次数超出预期,设置 -1 表示删除
if timesdel >= g_timesdel:
ipdict[ip] = ipdicttoday[ip] = -1, 0
else:
good, bad = ipdict[ip]
if good >= 0:
#失败次数超出预期,设置 -1 表示删除
s = bad/max(good, 1)
if s > 2 or (s > 0.4 and bad > 10) or (s > 0.15 and badb > 10):
ipdict[ip] = ipdicttoday[ip] = -1, 0
else:
ipdict[ip] = max(good - 2, 0), bad + 1
if ip in ipdicttoday:
good, bad = ipdicttoday[ip]
else:
good = bad = 0
ipdicttoday[ip] = max(good - 2, 0), bad + 1
#测试了足够多 IP 数目或达标 IP 满足数量后停止
if g.testedok > g.testok or g.needgwscnt < 1 and g.needcomcnt < 1:
return True
class Finder(threading.Thread):
def run(self):
ip = randomip()
while ip:
if internet_v6.last_stat and isipv6(ip) or internet_v4.last_stat and isipv4(ip):
if runfinder(ip):
break
else:
with gLock:
g.pingcnt -= 1
ip = randomip()
def _randomip(iplist):
cnt = len(iplist)
#a = random.randint(0, cnt - 1)
#b = int(random.random() * (cnt - 0.1))
#if random.random() > 0.7: #随机分布概率偏向较小数值
# n = max(a, b)
#else:
# n = min(a, b)
n = int(random.random() * (cnt - 0.1))
ip = iplist[n]
del iplist[n]
return ip
def randomip():
with gLock:
g.getgood += 1
g.pingcnt += 1
if g.goodlist and g.getgood >= 20:
g.getgood = 0
return g.goodlist.pop()
if g.ipexlist:
return _randomip(g.ipexlist)
if g.iplist:
return _randomip(g.iplist)
if g.goodlist:
return g.goodlist.pop()
if g.weaklist:
return _randomip(g.weaklist)
g.pingcnt -= 1
def getgaeip(nowgaelist, needgwscnt, needcomcnt):
if g.running or needgwscnt == needcomcnt == 0:
return
g.running = True
#获取参数
nowgaeset = set(nowgaelist)
g.needgwscnt = needgwscnt
g.needcomcnt = needcomcnt
threads = min(needgwscnt + needcomcnt*2 + 1, g_maxthreads)
#重建 good 列表
makegoodlist(nowgaeset)
#检查 IP 数据修改时间
ipmtime = ipexmtime = 0
if exists(g_ipfile):
ipmtime = os.path.getmtime(g_ipfile)
if ipmtime > g.ipmtime:
copyfile(g_ipfile, g_ipfilebak)
g.source_ipset.clear()
g.ipset.clear()
else:
logging.error('未发现 IP 列表文件 "%s",请创建!', g_ipfile)
if exists(g_ipexfile):
ipexmtime = os.path.getmtime(g_ipexfile)
if ipexmtime > g.ipexmtime:
copyfile(g_ipexfile, g_ipexfilebak)
g.source_ipexset.clear()
g.ipexset.clear()
now = time()
if ipmtime > g.ipmtime or ipexmtime > g.ipexmtime:
# 更新过 IP 列表
g.ipmtime = ipmtime
g.ipexmtime = ipexmtime
g.ipexlist, g.iplist, g.weaklist = readiplist(nowgaeset)
elif (len(g.weaklist) < g.halfweak or # 上一次加载 IP 时出过错的 IP
now - g.readtime > 8*3600 or # n 小时强制重载 IP
#g.reloadlist or
len(g.ipexlist) == len(g.iplist) == len(g.weaklist) == 0):
g.ipexlist, g.iplist, g.weaklist = readiplist(nowgaeset)
if ipexmtime:
passtime = now - ipexmtime
#最快两小时,最慢十二小时后删除
if passtime > 43200 or len(g.ipexlist) == 0 and passtime > 7200:
os.remove(g_ipexfile)
ipexmtime = now
g.getgood = 0
g.gaelist = []
g.gaelistbak = gaelistbak = []
g.pingcnt = 0
g.testedok = 0
g.testedcnt = 0
g.testok = max(needgwscnt * 8, g.needcomcnt * 16)
g.maxhandletimeout = g_maxhandletimeout + timeToDelay[strftime('%H')]
PRINT('==================== 开始查找 GAE IP ====================')
PRINT('需要查找 IP 数:%d/%d,待检测 IP 数:%d + %d',
needcomcnt,
max(needgwscnt, needcomcnt),
len(g.ipexlist) + len(g.iplist) + len(g.weaklist),
len(g.goodlist))
#多线程搜索
threadiplist = []
for i in range(threads):
ping_thread = Finder()
ping_thread.setDaemon(True)
ping_thread.setName('Finder%s' % str(i+1).rjust(2, '0'))
ping_thread.start()
threadiplist.append(ping_thread)
for p in threadiplist:
p.join()
#结果
savebadlist()
savestatistics()
m = int(g.needcomcnt)
if m > 0 and gaelistbak:
#补齐个数,以 google_com 为准
gaelistbak.sort(key=lambda x: (-x[2], x[1]))
comlistbak = gaelistbak[:m]
gaelistbak = gaelistbak[m:]
g.gaelist.extend(comlistbak)
m = len(comlistbak)
else:
m = 0
n = int(g.needgwscnt - m)
if n > 0 and gaelistbak:
#补齐个数
gaelistbak.sort(key=lambda x: x[1])
gwslistbak = gaelistbak[:n]
g.gaelist.extend(gwslistbak)
n = len(gwslistbak)
else:
n = 0
gaelist = {'google_gws':[], 'google_com':[]}
for ip in g.gaelist:
gaelist['google_gws'].append(ip[0])
if ip[2]:
gaelist['google_com'].append(ip[0])
if m > 0 or n > 0:
PRINT('未找到足够的优质 GAE IP,添加 %d 个备选 IP:\n %s',
m + n,
' | '.join(gaelist['google_gws']))
else:
PRINT('已经找到 %d 个新的优质 GAE IP:\n %s',
len(gaelist['google_gws']),
' | '.join(gaelist['google_gws']))
PRINT('==================== GAE IP 查找完毕 ====================')
g.running = False
return gaelist
| {"/local/compat/__init__.py": ["/local/path.py"], "/local/GAEUpdate.py": ["/local/path.py", "/local/compat/__init__.py", "/local/GAEFinder.py"], "/local/GAEFinder.py": ["/local/path.py", "/local/common/__init__.py", "/local/compat/openssl.py"], "/local/common/__init__.py": ["/local/compat/__init__.py"]} |
76,510 | qingjiangshui/GotoX | refs/heads/master | /local/common/__init__.py | # coding:utf-8
import os
import sys
import re
import ssl
import errno
import socket
import string
import threading
import collections
import ipaddress
import logging
import OpenSSL
from time import time, sleep
from local.compat import thread
NetWorkIOError = (socket.error, ssl.SSLError, OSError, OpenSSL.SSL.Error) if OpenSSL else (socket.error, ssl.SSLError, OSError)
# Windows: errno.WSAENAMETOOLONG = 10063
reset_errno = errno.ECONNRESET, 10063, errno.ENAMETOOLONG
closed_errno = errno.ECONNABORTED, errno.ECONNRESET, errno.EPIPE
pass_errno = -1, errno.ECONNABORTED, errno.ECONNRESET, errno.EPIPE
NONEKEY = object()
class LRUCache:
# Modified from http://pypi.python.org/pypi/lru/
#最近最少使用缓存,支持过期时间设置
failobj = object()
def __init__(self, max_items, expire=None):
self.cache = {}
self.max_items = int(max_items)
self.expire = expire
self.key_expire = {}
self.key_noexpire = set()
self.key_order = collections.deque()
self.lock = threading.Lock()
if expire:
thread.start_new_thread(self._cleanup, ())
def __delitem__(self, key):
with self.lock:
if key in self.cache:
self.key_order.remove(key)
if key in self.key_expire:
del self.key_expire[key]
if key in self.key_noexpire:
del self.key_noexpire[key]
del self.cache[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
self.set(key, value)
def __getitem__(self, key):
value = self.get(key, self.failobj)
if value is self.failobj:
raise KeyError(key)
else:
return value
def __contains__(self, key):
with self.lock:
self._expire_check(key)
return key in self.cache
def __len__(self):
with self.lock:
return len(self.key_order)
def set(self, key, value, expire=False, noexpire=False):
with self.lock:
if noexpire:
expire = False
self.key_noexpire.add(key)
elif key in self.key_noexpire:
expire = False
else:
expire = expire or self.expire
if expire:
self.key_expire[key] = int(time()) + expire
elif key in self.key_expire:
del self.key_expire[key]
self._mark(key)
self.cache[key] = value
def get(self, key, value=None):
with self.lock:
self._expire_check(key)
if key in self.cache:
self._mark(key)
return self.cache[key]
else:
return value
def getstate(self, key):
with self.lock:
contains = key in self.cache
value = self.cache.get(key)
self._expire_check(key)
expired = key not in self.cache
return contains, expired, value
def pop(self, key=NONEKEY):
with self.lock:
if key is not NONEKEY:
self._expire_check(key)
if key in self.cache:
self._mark(key)
value = self.cache[key]
self.key_order.remove(key)
if key in self.key_expire:
del self.key_expire[key]
if key in self.key_noexpire:
del self.key_noexpire[key]
del self.cache[key]
return value
else:
raise KeyError(key)
#未指明 key 时不检查抛出项是否过期,慎用!
#返回元组 (key, value)
if self.key_order:
key = self.key_order.pop()
value = self.cache[key]
if key in self.key_noexpire:
del self.key_noexpire[key]
if key in self.key_expire:
del self.key_expire[key]
del self.cache[key]
return key, value
else:
raise IndexError('pop from empty LRUCache')
def _expire_check(self, key):
key_expire = self.key_expire
if key in key_expire:
now = int(time())
timeleft = key_expire[key] - now
if timeleft <= 0:
self.key_order.remove(key)
del key_expire[key]
del self.cache[key]
elif timeleft < 8:
#为可能存在的紧接的调用保持足够的反应时间
key_expire[key] = now + 8
def _mark(self, key):
key_order = self.key_order
cache = self.cache
if key in cache:
key_order.remove(key)
key_order.appendleft(key)
while len(key_order) > self.max_items:
key = key_order.pop()
if key in self.key_noexpire:
key_order.appendleft(key)
else:
if key in self.key_expire:
del self.key_expire[key]
del cache[key]
def _cleanup(self):
#按每秒一个的频率循环检查并清除靠后的 l/m 个项目中的过期项目
lock = self.lock
key_order = self.key_order
key_expire = self.key_expire
key_noexpire = self.key_noexpire
cache = self.cache
max_items = self.max_items
m = 4
n = 1
while True:
sleep(1)
with lock:
l = len(key_order)
if l:
if l // m < n:
n = 1
key = key_order[-n]
if key in key_noexpire:
del key_order[-n]
key_order.appendleft(key)
elif key_expire[key] <= int(time()):
del key_order[-n]
del key_expire[key]
del cache[key]
n += 1
def clear(self):
with self.lock:
self.cache.clear()
self.key_expire.clear()
self.key_noexpire.clear()
self.key_order.clear()
class LimiterEmpty(OSError):
pass
class LimiterFull(OSError):
pass
class Limiter:
'A queue.Queue-like class use for count and limit.'
def __init__(self, maxsize=1):
if maxsize < 1:
raise ValueError('The maxsize can not be less than 1.')
self.maxsize = maxsize
self.mutex = threading.Lock()
self.not_empty = threading.Condition(self.mutex)
self.not_full = threading.Condition(self.mutex)
self.__qsize = 0
def qsize(self):
with self.mutex:
return self.__qsize
def empty(self):
with self.mutex:
return not self.__qsize
def full(self):
with self.mutex:
return self.maxsize <= self.__qsize
def push(self, block=True, timeout=None):
with self.not_full:
if self.maxsize > 0:
if not block:
if self.__qsize >= self.maxsize:
raise LimiterFull(-1)
elif timeout is None:
while self.__qsize >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self.__qsize >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise LimiterFull(-1)
self.not_full.wait(remaining)
self.__qsize += 1
self.not_empty.notify()
def pop(self, block=True, timeout=None):
with self.not_empty:
if not block:
if self.__qsize <= 0:
raise LimiterEmpty(-1)
elif timeout is None:
while self.__qsize <= 0:
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self.__qsize <= 0:
remaining = endtime - time()
if remaining <= 0.0:
raise LimiterEmpty(-1)
self.not_empty.wait(remaining)
self.__qsize -= 1
self.not_full.notify()
MESSAGE_TEMPLATE = '''
<html><head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>$title</title>
<style><!--
body {font-family: arial,sans-serif}
div.nav {margin-top: 1ex}
div.nav A {font-size: 10pt; font-family: arial,sans-serif}
span.nav {font-size: 10pt; font-family: arial,sans-serif; font-weight: bold}
div.nav A,span.big {font-size: 12pt; color: #0000cc}
div.nav A {font-size: 10pt; color: black}
A.l:link {color: #6f6f6f}
A.u:link {color: green}
//--></style>
</head>
<body text=#000000 bgcolor=#ffffff>
<table border=0 cellpadding=2 cellspacing=0 width=100%>
<tr><td bgcolor=#3366cc><font face=arial,sans-serif color=#ffffff><b>Message</b></td></tr>
<tr><td> </td></tr></table>
<blockquote>
<H1>$banner</H1>
$detail
<p>
</blockquote>
<table width=100% cellpadding=0 cellspacing=0><tr><td bgcolor=#3366cc><img alt="" width=1 height=4></td></tr></table>
</body></html>
'''
MESSAGE_TEMPLATE = string.Template(MESSAGE_TEMPLATE).substitute
def message_html(title, banner, detail=''):
return MESSAGE_TEMPLATE(title=title, banner=banner, detail=detail)
import random
dchars = ['bcdfghjklmnpqrstvwxyz', 'aeiou', '0123456789']
pchars = [0, 0, 0, 1, 2, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1]
subds = [
'www', 'img', 'pic', 'js', 'game', 'mail', 'static', 'ajax', 'video', 'lib',
'login', 'player', 'image', 'api', 'upload', 'download', 'cdnjs', 'cc', 's',
'book', 'v', 'service', 'web', 'forum', 'bbs', 'news', 'home', 'wiki', 'it'
]
gtlds = ['org', 'com', 'net', 'gov', 'edu', 'xyz','info']
def random_hostname(wildcard_host=None):
replace_wildcard = wildcard_host and '*' in wildcard_host
if replace_wildcard and '{' in wildcard_host:
try:
a = wildcard_host.find('{')
b = wildcard_host.find('}')
word_length = int(wildcard_host[a + 1:b])
wildcard_host = wildcard_host[:a] + wildcard_host[b + 1:]
except:
pass
else:
word_length = random.randint(5, 12)
maxcl = word_length * 2 // 3 or 1
maxcv = word_length // 2 or 1
maxd = word_length // 6
chars = []
for _ in range(word_length):
while True:
n = random.choice(pchars)
if n == 0 and maxcl:
maxcl -= 1
break
elif n == 1 and maxcv:
maxcv -= 1
break
elif n == 2 and maxd:
maxd -= 1
break
chars.append(random.choice(dchars[n]))
random.shuffle(chars)
if word_length > 7 and not random.randrange(3):
if replace_wildcard:
if '-' not in wildcard_host:
chars[random.randint(5, word_length - 4)] = '-'
else:
chars.insert(random.randint(5, word_length - 3), '-')
sld = ''.join(chars)
if replace_wildcard:
return wildcard_host.replace('*', sld)
else:
subd = random.choice(subds)
gtld = random.choice(gtlds)
return '.'.join((subd, sld, gtld))
def isip(ip):
if '.' in ip:
return isipv4(ip)
elif ':' in ip:
return isipv6(ip)
else:
return False
def isipv4(ip, inet_aton=socket.inet_aton):
if '.' not in ip:
return False
try:
inet_aton(ip)
except:
return False
else:
return True
def isipv6(ip, AF_INET6=socket.AF_INET6, inet_pton=socket.inet_pton):
if ':' not in ip:
return False
try:
inet_pton(AF_INET6, ip)
except:
return False
else:
return True
#isipv4 = re.compile(r'^(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}$').match
#isipv6 = re.compile(r'^(?!:[^:]|.*::.*::)'
# r'(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){7}'
# r'([0-9a-f]{1,4}'
# r'|(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})$', re.I).match
def get_parent_domain(host):
ip = isip(host)
if not ip:
hostsp = host.split('.')
nhost = len(hostsp)
if nhost > 3 or nhost == 3 and (len(hostsp[-1]) > 2 or len(hostsp[-2]) > 3):
host = '.'.join(hostsp[1:])
return host
def get_main_domain(host):
ip = isip(host)
if not ip:
hostsp = host.split('.')
if len(hostsp[-1]) > 2:
host = '.'.join(hostsp[-2:])
elif len(hostsp) > 2:
if len(hostsp[-2]) > 3:
host = '.'.join(hostsp[-2:])
else:
host = '.'.join(hostsp[-3:])
return host
from local.GlobalConfig import GC
from local.compat import urllib2
direct_opener = urllib2.OpenerDirector()
handler_names = ['UnknownHandler', 'HTTPHandler', 'HTTPSHandler',
'HTTPDefaultErrorHandler', 'HTTPRedirectHandler',
'HTTPErrorProcessor']
for handler_name in handler_names:
klass = getattr(urllib2, handler_name, None)
if klass:
direct_opener.add_handler(klass())
def get_wan_ipv4():
for url in GC.DNS_IP_API:
response = None
try:
response = direct_opener.open(url, timeout=10)
content = response.read().decode().strip()
if isip(content):
logging.test('当前 IPv4 公网出口 IP 是:%s', content)
return content
except:
pass
finally:
if response:
response.close()
logging.warning('获取 IPv4 公网出口 IP 失败,请增加更多的 IP-API')
def get_wan_ipv6():
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.connect(('2001:4860:4860::8888', 80))
addr6 = ipaddress.IPv6Address(sock.getsockname()[0])
if addr6.is_global or addr6.teredo:
return addr6
except:
pass
finally:
if sock:
sock.close()
class classlist(list): pass
def spawn_later(seconds, target, *args, **kwargs):
def wrap(*args, **kwargs):
sleep(seconds)
try:
target(*args, **kwargs)
except Exception as e:
logging.warning('%s.%s 错误:%s', target.__module__, target.__name__, e)
thread.start_new_thread(wrap, args, kwargs)
def spawn_loop(seconds, target, *args, **kwargs):
def wrap(*args, **kwargs):
while True:
sleep(seconds)
try:
target(*args, **kwargs)
except Exception as e:
logging.warning('%s.%s 错误:%s', target.__module__, target.__name__, e)
thread.start_new_thread(wrap, args, kwargs)
| {"/local/compat/__init__.py": ["/local/path.py"], "/local/GAEUpdate.py": ["/local/path.py", "/local/compat/__init__.py", "/local/GAEFinder.py"], "/local/GAEFinder.py": ["/local/path.py", "/local/common/__init__.py", "/local/compat/openssl.py"], "/local/common/__init__.py": ["/local/compat/__init__.py"]} |
76,514 | cabbagewww/keras-MVCNN | refs/heads/main | /input.py | import cv2
import random
import numpy as np
import time
import globals as g_
W = H = 256
class Shape:
def __init__(self, list_file):
with open(list_file) as f:
self.label = int(f.readline())
self.V = int(f.readline())
view_files = [l.strip() for l in f.readlines()]
self.views = self._load_views(view_files, self.V)
self.done_mean = False
def _load_views(self, view_files, V):
views = []
for f in view_files:
im = cv2.imread(f)
im = cv2.resize(im, (W, H))
# im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) #BGR!!
assert im.shape == (W,H,3), 'BGR!'
im = im.astype('float32')
views.append(im)
views = np.asarray(views)
return views
def subtract_mean(self):
if not self.done_mean:
mean_bgr = (104., 116., 122.)
for i in range(3):
self.views[:,:,:,i] -= mean_bgr[i]
self.done_mean = True
def crop_center(self, size=(224,224)): #AlexNet为227×227 ,VGG-M、ResNet为224×224
w, h = self.views.shape[1], self.views.shape[2]
wn, hn = size
left = w / 2 - wn / 2
top = h / 2 - hn / 2
right = left + wn
bottom = top + hn
self.views = self.views[:, int(left):int(right), int(top):int(bottom), :]
class Dataset:
def __init__(self, listfiles, labels, subtract_mean, V):
self.listfiles = listfiles
self.labels = labels
self.shuffled = False
self.subtract_mean = subtract_mean
self.V = V
print ('dataset inited')
print (' total size:', len(listfiles))
def shuffle(self):
z = list(zip(self.listfiles, self.labels))
random.shuffle(z)
self.listfiles, self.labels = [list(l) for l in zip(*z)]
self.shuffled = True
def _batches(self,batch_size):
listfiles = self.listfiles
n = len(listfiles)
for i in range(0, n, batch_size):
starttime = time.time()
lists = listfiles[i : i+batch_size]
#VGG-M、ResNet
x = np.zeros((batch_size, self.V, 224, 224, 3))
#AlexNet
# x = np.zeros((batch_size, self.V, 227, 227, 3))
y = np.zeros(batch_size)
for j,l in enumerate(lists):
s = Shape(l)
s.crop_center()
if self.subtract_mean:
s.subtract_mean()
x[j, ...] = s.views
y[j] = s.label
# print ('load batch time:', time.time()-starttime, 'sec')
yield x, y
def size(self):
""" size of listfiles (if splitted, only count 'train', not 'val')"""
return len(self.listfiles) | {"/input.py": ["/globals.py"], "/re_test.py": ["/input.py", "/globals.py", "/re_model.py"], "/re_train.py": ["/input.py", "/globals.py", "/re_model.py"]} |
76,515 | cabbagewww/keras-MVCNN | refs/heads/main | /globals.py | NUM_CLASSES = 40
NUM_VIEWS = 12
TRAIN_LOL = 'data_sample/train_lists.txt'
VAL_LOL = 'data_sample/val_lists.txt'
TEST_LOL = 'data_sample/test_lists.txt'
MODEL_PATH = 'model/mvcnn_model.h5'
BATCH_SIZE = 4
INIT_LEARNING_RATE = 0.0001
VAL_PERIOD = 400 | {"/input.py": ["/globals.py"], "/re_test.py": ["/input.py", "/globals.py", "/re_model.py"], "/re_train.py": ["/input.py", "/globals.py", "/re_model.py"]} |
76,516 | cabbagewww/keras-MVCNN | refs/heads/main | /re_test.py | import numpy as np
import tensorflow as tf
import time
from datetime import datetime
import os
from tensorflow import keras
from input import Dataset
import globals as g_
import re_model
import sklearn.metrics as metrics
from tensorflow.keras.models import load_model
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def read_lists(list_of_lists_file):
listfile_labels = np.loadtxt(list_of_lists_file, dtype=str).tolist()
listfiles, labels = zip(*[(l[0], int(l[1])) for l in listfile_labels])
return listfiles, labels
def data_load():
st = time.time()
print ('start loading data')
listfiles_test,labels_test = read_lists(g_.TEST_LOL)
dataset_test = Dataset(listfiles_test, labels_test, subtract_mean=False, V=g_.NUM_VIEWS)
print ('done loading data, time=', time.time() - st)
return dataset_test
def test(dataset_test):
print ('test() called')
V = g_.NUM_VIEWS
save_path = g_.MODEL_PATH
batch__size = g_.BATCH_SIZE
dataset_test.shuffle()
data_size = dataset_test.size()
print ('testing size:', data_size)
# model = re_model.MVCNN_model((227, 227, 3),V,g_.NUM_CLASSES)
model = load_model(save_path, custom_objects={'tf': tf},compile=False)
model.optimizer = keras.optimizers.Adam(lr=g_.INIT_LEARNING_RATE,decay=0.1)
model.loss_func = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
steps = 0
save_acc = 0
predictions = np.array([])
test_losses = []
test_y =[]
for batch_x,batch_y in dataset_test._batches(batch__size):
x = tf.transpose(batch_x, perm=[1, 0, 2, 3, 4])
x = tf.cast(x,dtype=tf.float32)
t_x = []
for j in range(V):
t_x.append(tf.gather(x, j))
pred = model(t_x)
softmax = tf.nn.softmax(pred)
test_pred = tf.argmax(softmax,1)
test_loss = model.loss_func(batch_y,pred)
test_losses.append(test_loss)
predictions = np.hstack((predictions, test_pred))
test_y.extend(batch_y)
test_acc = metrics.accuracy_score(batch_y,np.array(test_pred))
print('loss = %.4f,acc = %.4f'%(test_loss,test_acc))
test_loss = np.mean(test_losses)
acc = metrics.accuracy_score(test_y[:predictions.size], np.array(predictions))
print ('%s: test loss=%.4f, test acc=%f' %(datetime.now(), test_loss, acc*100.)) | {"/input.py": ["/globals.py"], "/re_test.py": ["/input.py", "/globals.py", "/re_model.py"], "/re_train.py": ["/input.py", "/globals.py", "/re_model.py"]} |
76,517 | cabbagewww/keras-MVCNN | refs/heads/main | /re_model.py | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Input,Flatten, Dense,Dropout,Lambda
from tensorflow.keras.models import Model
# from tensorflow.keras.applications.resnet50 import ResNet50
import numpy as np
def reshape(a):
dim = np.prod(a.get_shape().as_list()[1:])
return tf.reshape(a, [-1, dim])
def f1(a):
return tf.expand_dims(a, 0)
def f2(a):
return tf.reduce_max(a, [0], name='view_pool')
def MVCNN_model(views_shape,n_views, n_classes):
# AlexNet
# digit_input = Input(shape=views_shape)
# conv1 = Conv2D(96,(11,11),strides=(4,4),padding='valid',activation='relu',use_bias=True)(digit_input)
# pool1 = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='valid')(conv1)
# conv2 = Conv2D(256,(5,5),strides=(1,1),padding='same',activation='relu',use_bias=True)(pool1)
# pool2 = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='valid')(conv2)
# conv3 = Conv2D(384,(3,3),padding='same',activation='relu',use_bias=True)(pool2)
# conv4 = Conv2D(384,(3,3),padding='same',activation='relu',use_bias=True)(conv3)
# conv5 = Conv2D(256,(3,3),padding='same',activation='relu',use_bias=True)(conv4)
# pool5 = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='valid')(conv5)
#VGG-M
digit_input = Input(shape=views_shape)
conv1 = Conv2D(96,(7,7),strides=(2,2),padding='valid',activation='relu',use_bias=True)(digit_input)
pool1 = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='valid')(conv1)
conv2 = Conv2D(256,(5,5),strides=(2,2),padding='same',activation='relu',use_bias=True)(pool1)
pool2 = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='valid')(conv2)
conv3 = Conv2D(512,(3,3),padding='same',activation='relu',use_bias=True)(pool2)
conv4 = Conv2D(512,(3,3),padding='same',activation='relu',use_bias=True)(conv3)
conv5 = Conv2D(512,(3,3),padding='same',activation='relu',use_bias=True)(conv4)
pool5 = MaxPooling2D(pool_size=(3,3),strides=(2,2),padding='valid')(conv5)
#ResNet
# digit_input = Input(shape=views_shape)
# pool5 = ResNet50(include_top=False,input_shape=views_shape)(digit_input)
# CNN1_model = Model(digit_input,pool5)
digit_inputs = []
for i in range(n_views):
digit_input = Input(shape=views_shape)
digit_inputs.append(digit_input)
CNN_out = CNN1_model(digit_inputs[i])
CNN_out = Lambda(reshape)(CNN_out)
# print(digit_inputs[i].shape)
# print(CNN_out.shape)
if i == 0:
CNN_all = Lambda(f1)(CNN_out)
else:
cnn_out = Lambda(f1)(CNN_out)
CNN_all = keras.layers.concatenate([CNN_all,cnn_out],axis=0)
pool_vp = Lambda(f2)(CNN_all)
dense1 = Dense(1024,activation='relu',use_bias=True)(pool_vp)
dropout1 = Dropout(0.5)(dense1)
dense2 = Dense(512,activation='relu',use_bias=True)(dropout1)
dropout2 = Dropout(0.5)(dense2)
out_put = Dense(n_classes,activation=None,use_bias=True)(dropout2)
model_out = Model(digit_inputs, out_put)
return model_out | {"/input.py": ["/globals.py"], "/re_test.py": ["/input.py", "/globals.py", "/re_model.py"], "/re_train.py": ["/input.py", "/globals.py", "/re_model.py"]} |
76,518 | cabbagewww/keras-MVCNN | refs/heads/main | /re_train.py | import numpy as np
import tensorflow as tf
import time
from datetime import datetime
import os
from tensorflow import keras
from input import Dataset
import globals as g_
import re_model
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def train(dataset_train,dataset_val):
print ('train() called')
V = g_.NUM_VIEWS
save_path = g_.MODEL_PATH
batch__size = g_.BATCH_SIZE
dataset_train.shuffle()
dataset_val.shuffle()
data_size = dataset_train.size()
print ('training size:', data_size)
model = re_model.MVCNN_model((224, 224, 3),V,g_.NUM_CLASSES)
model.optimizer = keras.optimizers.Adam(lr=g_.INIT_LEARNING_RATE,decay=0.1)
model.loss_func = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# model.summary()
steps = 0
save_acc = 0
all_val_loss = []
all_val_acc = []
all_epo_loss = []
all_epo_acc = []
for i in range(40):
print('epoch:',i)
all_pred = np.array([])
all_losses = []
all_y =[]
for x,y in dataset_train._batches(batch__size):
x = tf.transpose(x, perm=[1, 0, 2, 3, 4])
x = tf.cast(x,dtype=tf.float32)
t_x = []
for j in range(V):
t_x.append(tf.gather(x, j))
with tf.GradientTape() as tape:
pred = model(t_x)
loss = model.loss_func(y,pred)
gradients = tape.gradient(loss,model.trainable_variables)
model.optimizer.apply_gradients(zip(gradients,model.trainable_variables))
steps += 1
pred = tf.nn.softmax(pred)
pred = tf.argmax(pred,1)
all_losses.append(loss)
all_pred = np.hstack((all_pred, pred))
all_y.extend(y)
acc = metrics.accuracy_score(y,np.array(pred))
print('loss=%.4f , acc=%.4f'%(loss,acc*100))
# validation
if steps %g_.VAL_PERIOD == 0:
val_losses = []
predictions = np.array([])
val_y = []
for val_batch_x,val_batch_y in dataset_val._batches(batch__size):
val_batch_x = tf.transpose(val_batch_x, perm=[1, 0, 2, 3, 4])
val_batch_x = tf.cast(val_batch_x,dtype=tf.float32)
v_x = []
for j in range(V):
v_x.append(tf.gather(val_batch_x, j))
pred = model(v_x)
softmax = tf.nn.softmax(pred)
val_pred = tf.argmax(softmax,1)
val_loss = model.loss_func(val_batch_y,pred)
val_losses.append(val_loss)
predictions = np.hstack((predictions, val_pred))
val_y.extend(val_batch_y)
val_loss = np.mean(val_losses)
all_val_loss.append(val_loss)
acc = metrics.accuracy_score(val_y[:predictions.size], np.array(predictions))
all_val_acc.append(acc)
print ('%s: step %d, val loss=%.4f, val acc=%f' %(datetime.now(), steps, val_loss, acc*100.))
#保存网络
if save_acc==0:
save_acc = acc
else:
if save_acc<acc:
save_acc =acc
model.save(save_path)
print('model已保存,val_acc=%.4f'%save_acc)
mean_loss = np.mean(all_losses)
all_epo_loss.append(mean_loss)
mean_acc = metrics.accuracy_score(all_y[:all_pred.size],np.array(all_pred))
all_epo_acc.append(mean_acc)
print('epoch %d: mean_loss=%.4f, mean_acc=%.4f'%(i,mean_loss,mean_acc*100))
# loss、acc曲线图绘制
x = np.arange(1,80,1)
y = np.array(all_val_acc)
plt.plot(x,y,label="val_acc")
plt.legend()
plt.xlabel("step")
plt.ylabel("acc")
plt.show()
x = np.arange(1,41,1)
y = np.array(all_epo_acc)
plt.plot(x,y,label="epo_acc")
plt.legend()
plt.xlabel("epoch")
plt.ylabel("acc")
plt.show()
def read_lists(list_of_lists_file):
listfile_labels = np.loadtxt(list_of_lists_file, dtype=str).tolist()
listfiles, labels = zip(*[(l[0], int(l[1])) for l in listfile_labels])
return listfiles, labels
def data_load():
st = time.time()
print ('start loading data')
listfiles_train, labels_train = read_lists(g_.TRAIN_LOL)
listfiles_val,labels_val = read_lists(g_.VAL_LOL)
dataset_train = Dataset(listfiles_train, labels_train, subtract_mean=False, V=g_.NUM_VIEWS)
dataset_val = Dataset(listfiles_val, labels_val, subtract_mean=False, V=g_.NUM_VIEWS)
print ('done loading data, time=', time.time() - st)
return dataset_train,dataset_val | {"/input.py": ["/globals.py"], "/re_test.py": ["/input.py", "/globals.py", "/re_model.py"], "/re_train.py": ["/input.py", "/globals.py", "/re_model.py"]} |
76,519 | cabbagewww/keras-MVCNN | refs/heads/main | /make_label.py | import cv2
import os
import random
def txt_make(path,save_path):
listfiles = os.listdir(path)
if '.ipynb_checkpoints' in listfiles:
listfiles.remove('.ipynb_checkpoints')
for i,j in enumerate(listfiles):
for t_t in ('train','test'):
view_s_path = save_path+'/'+t_t+'/'+j
isExists = os.path.exists(view_s_path)
if not isExists:
os.makedirs(view_s_path)
print(view_s_path+' 文件夹创建成功')
view_files = path+'/'+j+'/'+t_t
view_lists = os.listdir(view_files)
if '.ipynb_checkpoints' in view_lists:
view_lists.remove('.ipynb_checkpoints')
txt_files = os.listdir(view_s_path)
view_n = []
for index,view_list in enumerate(view_lists):
view_name = view_list[:-8]
view_n.append(view_name)
num_ = []
txt_num = 0
while len(view_n) != 0:
view_name = view_n[0]
num_ = [index for index,view_num in enumerate(view_n) if view_num == view_name]
txt_path = view_s_path+'/'+view_name+'.txt'
with open(txt_path,'w') as f:
f.write(str(i)+'\n')
f.write(str(len(num_))+'\n')
txt_ = open (txt_path,'a+')
sub_ = 0
for num in num_:
view_ = view_lists[num-sub_]
txt_.write(view_files+'/'+view_+'\n')
view_n.remove(view_name)
view_lists.remove(view_)
sub_ += 1
txt_.close()
txt_num += 1
print(view_s_path+'里的txt文件制作成功','txt文件数量:',txt_num)
print('txt创建成功')
def txt_write(file_path,txt_files,s_p,t_t):
if '.ipynb_checkpoints' in txt_files:
txt_files.remove('.ipynb_checkpoints')
for i,j in enumerate(txt_files):
txt_f_path = file_path+'/'+j
txt_lists = os.listdir(txt_f_path)
if '.ipynb_checkpoints' in txt_lists:
txt_lists.remove('.ipynb_checkpoints')
if t_t =='train':
with open(s_p+'train_lists.txt','a+') as f:
for txtlist in txt_lists:
if txtlist != '.ipynb_checkpoints':
txt_path = txt_f_path+'/'+txtlist
f.write(txt_path+' '+str(i)+'\n')
else:
num = len(txt_lists)
random.shuffle(txt_lists)
with open(s_p+'val_lists.txt','a+') as f:
for txt_num in range(0,int(num/2)):
txt_path = txt_f_path+'/'+txt_lists[txt_num]
f.write(txt_path+' '+str(i)+'\n')
with open(s_p+'test_lists.txt','a+') as f:
for txt_num in range(int(num/2),num):
txt_path = txt_f_path+'/'+txt_lists[txt_num]
f.write(txt_path+' '+str(i)+'\n') | {"/input.py": ["/globals.py"], "/re_test.py": ["/input.py", "/globals.py", "/re_model.py"], "/re_train.py": ["/input.py", "/globals.py", "/re_model.py"]} |
76,524 | asmallleaf/DoorAppTest | refs/heads/master | /app/route/usrapi/signin.py | from app_2_0_0.app.route.usrapi.login import usrapi
from app_2_0_0.app.support import dbSupportTool,verifySupportTool
from app_2_0_0.app.support.verifySupportTool import Type
from flask import request,jsonify
@usrapi.route('/fdsignin',methods=['POST','GET'])
def fdsignin():
verify = verifySupportTool.VerifySupportToolFactory().build()
database = dbSupportTool.dbSupportToolFactory().build()
verify.setDbSupport(database).setRequest(value=request)
#print(verify.request)
if verify.verifySignin() is False:
return jsonify(verify.getError(Type.user)),404
database.load()
return jsonify(verify.getSuccess(Type.user)), 200
| {"/app/toolbox/iotool.py": ["/app/toolbox/basetool.py"], "/app/main/launch.py": ["/app/main/createapp.py"], "/app/toolbox/verifytool.py": ["/app/toolbox/basetool.py"], "/app/toolbox/dbtool.py": ["/app/toolbox/basetool.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.