index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
67,991 | vk-vinay/nsenest | refs/heads/master | /api/views.py | from rest_framework.viewsets import GenericViewSet
from utils.CustomResponse import success, notfound
from rest_framework.decorators import action
from utils.RedisConnection import RedisInstance
import json
# Create your views here.
class Nifty(GenericViewSet):
@action(detail=False, methods=['GET'])
def gainers(self, request):
instance = RedisInstance()
data = instance.get(key='gainers')
if data:
return success(data=json.loads(data))
return notfound()
@action(detail=False, methods=['GET'])
def loosers(self, request):
instance = RedisInstance()
data = instance.get(key='loosers')
if data:
return success(data=json.loads(data))
return notfound()
| {"/api/views.py": ["/utils/CustomResponse.py", "/utils/RedisConnection.py"], "/api/tasks.py": ["/utils/APIBridge.py", "/utils/RedisConnection.py", "/utils/constants.py"], "/api/urls.py": ["/api/views.py"], "/api/utils.py": ["/utils/APIBridge.py", "/utils/constants.py", "/utils/RedisConnection.py"]} |
67,992 | vk-vinay/nsenest | refs/heads/master | /utils/constants.py | """ Nse endpoints contents"""
class NseAPI:
Base = 'https://www.nseindia.com/api/live-analysis-variations?index='
Loosers = Base + 'loosers'
Gainers = Base + 'gainers'
| {"/api/views.py": ["/utils/CustomResponse.py", "/utils/RedisConnection.py"], "/api/tasks.py": ["/utils/APIBridge.py", "/utils/RedisConnection.py", "/utils/constants.py"], "/api/urls.py": ["/api/views.py"], "/api/utils.py": ["/utils/APIBridge.py", "/utils/constants.py", "/utils/RedisConnection.py"]} |
67,993 | vk-vinay/nsenest | refs/heads/master | /api/tasks.py | from __future__ import absolute_import, unicode_literals
import json
from celery import shared_task
from utils.APIBridge import APIBridge
from utils.RedisConnection import RedisInstance
from utils.constants import NseAPI
@shared_task(name="update_gainers")
def niftygainers():
data = APIBridge(url=NseAPI.Gainers).get()
if data:
gainers = data.json()['NIFTY'].get('data', None)
return RedisInstance().set(key='gainers', value=json.dumps(gainers))
@shared_task(name="update_loosers")
def niftyloosers():
data = APIBridge(url=NseAPI.Loosers).get()
if data:
loosers = data.json()['NIFTY'].get('data', None)
return RedisInstance().set(key='loosers', value=json.dumps(loosers))
| {"/api/views.py": ["/utils/CustomResponse.py", "/utils/RedisConnection.py"], "/api/tasks.py": ["/utils/APIBridge.py", "/utils/RedisConnection.py", "/utils/constants.py"], "/api/urls.py": ["/api/views.py"], "/api/utils.py": ["/utils/APIBridge.py", "/utils/constants.py", "/utils/RedisConnection.py"]} |
67,994 | vk-vinay/nsenest | refs/heads/master | /utils/RedisConnection.py | import logging
import redis.exceptions
from django.conf import settings
# connect to our Redis instance
class RedisInstance:
def __init__(self):
self.instance = redis.StrictRedis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT, db=0)
def set(self, key, value):
try:
return self.instance.set(name=key, value=value)
except redis.exceptions.RedisError as er:
logging.error('Something went wrong!' + repr(er))
def get(self, key):
try:
return self.instance.get(name=key)
except redis.exceptions.RedisError as er:
logging.error('Something went wrong!' + repr(er))
| {"/api/views.py": ["/utils/CustomResponse.py", "/utils/RedisConnection.py"], "/api/tasks.py": ["/utils/APIBridge.py", "/utils/RedisConnection.py", "/utils/constants.py"], "/api/urls.py": ["/api/views.py"], "/api/utils.py": ["/utils/APIBridge.py", "/utils/constants.py", "/utils/RedisConnection.py"]} |
67,995 | vk-vinay/nsenest | refs/heads/master | /nsenest/views.py | from django.shortcuts import render
from django.views.generic import base
class Index(base.View):
def get(self, request):
return render(request, template_name='index.html')
| {"/api/views.py": ["/utils/CustomResponse.py", "/utils/RedisConnection.py"], "/api/tasks.py": ["/utils/APIBridge.py", "/utils/RedisConnection.py", "/utils/constants.py"], "/api/urls.py": ["/api/views.py"], "/api/utils.py": ["/utils/APIBridge.py", "/utils/constants.py", "/utils/RedisConnection.py"]} |
67,996 | vk-vinay/nsenest | refs/heads/master | /utils/APIBridge.py | import requests
import logging
class APIBridge:
def __init__(self, url, token=None, payload=None):
self.url = url
self.headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Content-Type': 'application/json;charset=utf-8',
}
if token:
self.headers.update({'Authorization': f'Token {token}'})
self.payload = payload
def get(self):
try:
response = requests.get(url=self.url, headers=self.headers)
response.raise_for_status()
return response
except requests.exceptions.RequestException as er:
logging.error('Something went wrong!' + repr(er))
def post(self):
pass
| {"/api/views.py": ["/utils/CustomResponse.py", "/utils/RedisConnection.py"], "/api/tasks.py": ["/utils/APIBridge.py", "/utils/RedisConnection.py", "/utils/constants.py"], "/api/urls.py": ["/api/views.py"], "/api/utils.py": ["/utils/APIBridge.py", "/utils/constants.py", "/utils/RedisConnection.py"]} |
67,997 | vk-vinay/nsenest | refs/heads/master | /utils/CustomResponse.py | from rest_framework.response import Response
from rest_framework import status
def success(data=None, message='Success'):
return Response({'message': message, 'data': data}, status=status.HTTP_200_OK)
def notfound(data=None, message='Not Found'):
return Response({'message': message, 'data': data}, status=status.HTTP_404_NOT_FOUND)
| {"/api/views.py": ["/utils/CustomResponse.py", "/utils/RedisConnection.py"], "/api/tasks.py": ["/utils/APIBridge.py", "/utils/RedisConnection.py", "/utils/constants.py"], "/api/urls.py": ["/api/views.py"], "/api/utils.py": ["/utils/APIBridge.py", "/utils/constants.py", "/utils/RedisConnection.py"]} |
67,998 | vk-vinay/nsenest | refs/heads/master | /api/urls.py | from django.urls import path, include
from rest_framework import routers
from .views import Nifty
router = routers.DefaultRouter()
router.register('nifty', Nifty, basename='nifty')
urlpatterns = [
path('', include(router.urls))
]
| {"/api/views.py": ["/utils/CustomResponse.py", "/utils/RedisConnection.py"], "/api/tasks.py": ["/utils/APIBridge.py", "/utils/RedisConnection.py", "/utils/constants.py"], "/api/urls.py": ["/api/views.py"], "/api/utils.py": ["/utils/APIBridge.py", "/utils/constants.py", "/utils/RedisConnection.py"]} |
67,999 | vk-vinay/nsenest | refs/heads/master | /api/utils.py | from utils.APIBridge import APIBridge
from utils.constants import NseAPI
from utils.RedisConnection import RedisInstance
import json
| {"/api/views.py": ["/utils/CustomResponse.py", "/utils/RedisConnection.py"], "/api/tasks.py": ["/utils/APIBridge.py", "/utils/RedisConnection.py", "/utils/constants.py"], "/api/urls.py": ["/api/views.py"], "/api/utils.py": ["/utils/APIBridge.py", "/utils/constants.py", "/utils/RedisConnection.py"]} |
68,006 | alenprastya/Mask-Detection-With-Tracker | refs/heads/main | /src/models/stream/stream.py | import cv2
import imutils
import datetime
from src.apis.sound import Sound
from src.views.views import Views
from imutils.video import VideoStream
from src.apis.randomName import Generator
from src.models.captures.image_capture import Image
from src.apis.detectNPredict import detect_and_predict_mask
from src.logging.logging import log
class Stream(Views, Image) :
def __init__(self) :
super().__init__()
def video_stream(self, face_net, mask_net) :
# initialize the video stream
print(f"[ INFO ] starting video stream...")
vs = VideoStream(0).start()
"""
Visitors index
"""
MASK_VISITORS = 0
NOMASK_VISITORS = 0
TOTAL_VISITORS = 0
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize( frame, width=1024 )
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = detect_and_predict_mask(frame, face_net, mask_net)
# loop over the detected face locations and their corresponding
# locations
PERCENTAGES = {
"response" : False
}
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
if mask > withoutMask :
PERCENTAGES = {
"response" : True,
"capture" : f'documentations/capture/mask/{Generator().generate()}.jpg',
"label" : "silahkan.... Anda boleh masuk",
"status" : "MASK",
"sound" : "src/asset/sound/mask.mp3",
"color" : (0, 255, 0),
"percentages" : f"{int(max(mask, withoutMask) * 100)} %",
"ends" : (endX, endY),
"starts" : (startX, startY - 10)
}
MASK_VISITORS += 1
TOTAL_VISITORS += 1
elif mask < withoutMask :
PERCENTAGES = {
"response" : True,
"capture" : f'documentations/capture/no_mask/{Generator().generate()}.jpg',
"label" : "PAKAI MASKER DAHULU!!!",
"status" : "NOMASK",
"sound" : "src/asset/sound/nomask.mp3",
"color" : (0, 0, 255),
"percentages" : f"{int(max(mask, withoutMask) * 100)} %",
"ends" : (endX, endY),
"starts" : (startX, startY - 10)
}
NOMASK_VISITORS += 1
TOTAL_VISITORS += 1
if PERCENTAGES["response"] :
""" image capture """
self.capture(
frame = frame,
location = PERCENTAGES["capture"]
)
""" face detect view """
self.face_detectView(
frame = frame,
label = PERCENTAGES["label"],
starts = PERCENTAGES["starts"],
ends = PERCENTAGES["ends"],
color = PERCENTAGES["color"]
)
""" percentage view """
self.percentagesView(
frame = frame,
status = PERCENTAGES["status"],
percentage = PERCENTAGES["percentages"],
color = PERCENTAGES["color"]
)
""" sound """
Sound( source = PERCENTAGES["sound"] )
""" logging """
log( message = PERCENTAGES["status"] )
else :
self.noHumanView( frame = frame )
# visitors
self.visitorsView(
frame = frame,
mask_visitors = MASK_VISITORS,
nomask_visitors = NOMASK_VISITORS,
total_visitors = TOTAL_VISITORS
)
# cv2.putText(frame, f"MASK : {MASK_VISITORS} orang", (20, 595),cv2.FONT_HERSHEY_SIMPLEX, 0.70, (255, 255, 255), 2)
# cv2.putText(frame, f"NOMASK : {NOMASK_VISITORS} orang", (20, 625),cv2.FONT_HERSHEY_SIMPLEX, 0.70, (255, 255, 255), 2)
# cv2.putText(frame, "------------- +", (20, 645),cv2.FONT_HERSHEY_SIMPLEX, 0.70, (255, 255, 255), 2)
# cv2.putText(frame, f"TOTAL : {TOTAL_VISITORS} orang", (20, 670),cv2.FONT_HERSHEY_SIMPLEX, 0.70, (255, 255, 255), 2)
# timer
self.timerView( frame = frame )
# cv2.putText(frame, f"{datetime.datetime.now().strftime('%H:%M:%S')}", (860, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.90, (255, 255, 255), 2)
# connction
self.connectionView( frame = frame )
# cv2.putText(frame, f"OFFLINE", (860, 670),cv2.FONT_HERSHEY_SIMPLEX, 0.90, (0, 0, 255), 2)
# show the output frame
cv2.imshow("ZEIPER", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
| {"/src/models/stream/stream.py": ["/src/apis/sound.py", "/src/views/views.py", "/src/apis/randomName.py", "/src/models/captures/image_capture.py", "/src/logging/logging.py"], "/detector.py": ["/src/models/stream/stream.py"]} |
68,007 | alenprastya/Mask-Detection-With-Tracker | refs/heads/main | /src/models/captures/image_capture.py | import cv2
class Image :
def capture(self, frame, location) :
cv2.imwrite( filename = location, img = frame )
| {"/src/models/stream/stream.py": ["/src/apis/sound.py", "/src/views/views.py", "/src/apis/randomName.py", "/src/models/captures/image_capture.py", "/src/logging/logging.py"], "/detector.py": ["/src/models/stream/stream.py"]} |
68,008 | alenprastya/Mask-Detection-With-Tracker | refs/heads/main | /src/apis/randomName.py | import string
import random
class Generator :
def __init__(self) :
self.LENGTH = 16
def generate( self ):
asciiDigits = string.ascii_letters + string.digits
generateResult = "".join( [random.choice(asciiDigits) for _ in range(self.LENGTH)] )
stripeGenerateResult = ""
for stripeGenerate in range(len(generateResult)):
if ((stripeGenerate == 4)|(stripeGenerate == 8)|(stripeGenerate == 12)) :
stripeGenerateResult += "-"
stripeGenerateResult += generateResult[stripeGenerate]
else :
stripeGenerateResult += generateResult[stripeGenerate]
return stripeGenerateResult | {"/src/models/stream/stream.py": ["/src/apis/sound.py", "/src/views/views.py", "/src/apis/randomName.py", "/src/models/captures/image_capture.py", "/src/logging/logging.py"], "/detector.py": ["/src/models/stream/stream.py"]} |
68,009 | alenprastya/Mask-Detection-With-Tracker | refs/heads/main | /src/logging/logging.py | import datetime
def log ( message ) :
print(f"[ LOG ] status : {message} --> {datetime.datetime.now().strftime('%H:%M:%S')}") | {"/src/models/stream/stream.py": ["/src/apis/sound.py", "/src/views/views.py", "/src/apis/randomName.py", "/src/models/captures/image_capture.py", "/src/logging/logging.py"], "/detector.py": ["/src/models/stream/stream.py"]} |
68,010 | alenprastya/Mask-Detection-With-Tracker | refs/heads/main | /src/apis/sound.py | from playsound import playsound
def Sound (source) :
playsound(source)
| {"/src/models/stream/stream.py": ["/src/apis/sound.py", "/src/views/views.py", "/src/apis/randomName.py", "/src/models/captures/image_capture.py", "/src/logging/logging.py"], "/detector.py": ["/src/models/stream/stream.py"]} |
68,011 | alenprastya/Mask-Detection-With-Tracker | refs/heads/main | /detector.py | #!/bin/env python3
import cv2
from tensorflow.keras.models import load_model
from src.models.stream.stream import Stream
class detector(Stream) :
def __init__ (self) :
# load our serialized face detector model from disk
self.prototxtPath = r"src\models\detector_models\deploy.prototxt"
self.weightsPath = r"src\models\detector_models\res10_300x300_ssd_iter_140000.caffemodel"
self.faceNet = cv2.dnn.readNet(self.prototxtPath, self.weightsPath)
# load the face mask detector model from disk
self.maskNet = load_model("mask_detector.model")
super().__init__()
def running(self) :
self.video_stream(
face_net = self.faceNet,
mask_net = self.maskNet
)
if __name__ == "__main__" :
detector = detector()
detector.running()
| {"/src/models/stream/stream.py": ["/src/apis/sound.py", "/src/views/views.py", "/src/apis/randomName.py", "/src/models/captures/image_capture.py", "/src/logging/logging.py"], "/detector.py": ["/src/models/stream/stream.py"]} |
68,012 | alenprastya/Mask-Detection-With-Tracker | refs/heads/main | /src/views/views.py | import cv2
import datetime
class Views :
def face_detectView(self, frame, label, starts, ends, color) :
cv2.rectangle(frame, starts, ends, color, 2)
cv2.putText(frame, label, starts, cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
def percentagesView(self, frame, status, percentage, color) :
cv2.putText(frame, status, (170, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.70, color, 2)
cv2.putText(frame, "status :", (20, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.70, (255, 255, 255), 2)
cv2.putText(frame, percentage, (170, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.70, color, 2)
cv2.putText(frame, "persentase :", (20, 75),cv2.FONT_HERSHEY_SIMPLEX, 0.70, (255, 255, 255), 2)
def visitorsView(self, frame, mask_visitors, nomask_visitors, total_visitors) :
cv2.putText(frame, f"MASK : {mask_visitors} orang", (20, 595),cv2.FONT_HERSHEY_SIMPLEX, 0.70, (255, 255, 255), 2)
cv2.putText(frame, f"NOMASK : {nomask_visitors} orang", (20, 625),cv2.FONT_HERSHEY_SIMPLEX, 0.70, (255, 255, 255), 2)
cv2.putText(frame, "------------- +", (20, 645),cv2.FONT_HERSHEY_SIMPLEX, 0.70, (255, 255, 255), 2)
cv2.putText(frame, f"TOTAL : {total_visitors} orang", (20, 670),cv2.FONT_HERSHEY_SIMPLEX, 0.70, (255, 255, 255), 2)
def timerView(self, frame) :
cv2.putText(frame, f"{datetime.datetime.now().strftime('%H:%M:%S')}", (860, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.90, (255, 255, 255), 2)
def connectionView(self, frame) :
cv2.putText(frame, f"OFFLINE", (860, 670),cv2.FONT_HERSHEY_SIMPLEX, 0.90, (0, 0, 255), 2)
def noHumanView(self, frame) :
cv2.putText(frame, "NO HUMAN", (20, 50),cv2.FONT_HERSHEY_SIMPLEX, 0.90, (255, 255, 255), 2)
| {"/src/models/stream/stream.py": ["/src/apis/sound.py", "/src/views/views.py", "/src/apis/randomName.py", "/src/models/captures/image_capture.py", "/src/logging/logging.py"], "/detector.py": ["/src/models/stream/stream.py"]} |
68,013 | AutumnSun1996/ML | refs/heads/master | /celery_tasks/Estimators.py | from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
| {"/data_process.py": ["/config.py"], "/base.py": ["/config.py"], "/compare.py": ["/base.py", "/config.py"], "/trial.py": ["/base.py", "/config.py", "/data_loader.py"]} |
68,014 | AutumnSun1996/ML | refs/heads/master | /data_process.py | import numpy as np
import pandas as pd
from config import log
class Processor:
def __init__(self, data, records=None):
self.data = data.copy()
self.records = [] if records is None else records
def one_hot_encoding(self, columns=None, record=True):
self.data = pd.get_dummies(self.data)
if columns is None:
columns = list(self.data.columns)
else:
self.data = self.data.loc[:, columns].fillna(0, columns=columns)
if record:
self.records.append({'name': 'one_hot_encoding', 'args': {'columns': columns}})
def add_column(self, name, source, func, record=True):
# noinspection PyUnresolvedReferences
self.data[name] = self.data[source].apply(np.__getattribute__(func))
if record:
self.records.append({'name': 'add_column', 'args': {'name': name, 'source': source, 'func': func}})
def drop(self, names, record=True):
self.data.drop(names, axis=1, inplace=True)
if record:
self.records.append({'name': 'drop', 'args': {'names': names}})
def normalize(self, norm=None, record=True):
num_cols = self.data.columns[self.data.dtypes != "object"]
if norm is None:
norm = (list(self.data[num_cols].std()), list(self.data[num_cols].mean()))
self.data[num_cols] = (self.data[num_cols] - norm[1]) / norm[0]
if record:
self.records.append({'name': 'normalize', 'args': {'norm': norm}})
def redo(self):
for each in self.records:
log(0x16, 'Processor.redo:', each['name'])
func = self.__getattribute__(each['name'])
if func:
func(**each['args'], record=False)
else:
raise NameError("Unknown Function %s" % each['name'])
def fill_missing(self, fill_with='mean', record=True):
columns = self.data.columns[self.data.dtypes != 'object']
if self.records[-1]['name'] == 'normalize' and fill_with == 'mean':
fill_with = 0
fill = {}
if isinstance(fill_with, str):
for column in columns:
fill[column] = self.data[column].__getattribute__(fill_with)()
elif isinstance(fill_with, (int, float)):
for column in columns:
fill[column] = fill_with
elif not isinstance(fill_with, dict):
raise TypeError('fill_with Should be number or dict or method name, get {!r}'.format(fill_with))
else:
fill = fill_with
for column in fill.keys():
self.data[column].fillna(fill.get(column), inplace=True)
if record:
self.records.append({'name': 'fill_missing', 'args': {'fill_with': fill}})
| {"/data_process.py": ["/config.py"], "/base.py": ["/config.py"], "/compare.py": ["/base.py", "/config.py"], "/trial.py": ["/base.py", "/config.py", "/data_loader.py"]} |
68,015 | AutumnSun1996/ML | refs/heads/master | /config.py | # 1:data_process.py, 2:config.py
# 1:fatal error, 2:error, 3:output, 4-6:important information, 7-A:normal information, B-F:extra information
log_level = {
0x01, 0x02,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16,
0x21, 0x22, 0x25, 0x26,
0x31, 0x32,
0x41, 0x42
}
def log(level, *messages, **kwargs):
if (level & 0x0F) < 0x03:
if not kwargs.get('file'):
from sys import stderr
kwargs['file'] = stderr
if level in log_level:
print(*messages, **kwargs)
default_setting = {
'params': {
'max_depth': (1, 5),
'learning_rate': (1e-05, 1, 'log-uniform'),
},
'cv': {
'cv': 3,
'n_jobs': -1,
'method': 'predict_proba'
},
'scoring': 'log_loss',
'gp': {
'n_calls': 10,
'random_state': 0,
'verbose': True
},
}
settings = {
'CatBoostClassifier': {
'params': {
'depth': (1, 5),
'learning_rate': (1e-05, 1, 'log-uniform'),
}
},
'CatBoostRegressor': {
'params': {
'depth': (1, 5),
'learning_rate': (1e-05, 1, 'log-uniform'),
}
},
}
def get_setting(name):
setting = default_setting.copy()
setting.update(settings.get(name, {}))
return setting
| {"/data_process.py": ["/config.py"], "/base.py": ["/config.py"], "/compare.py": ["/base.py", "/config.py"], "/trial.py": ["/base.py", "/config.py", "/data_loader.py"]} |
68,016 | AutumnSun1996/ML | refs/heads/master | /data_loader.py | import datetime
import numpy as np
import pandas as pd
import scipy.sparse as sp
log_name = 'log/{}.log'.format(datetime.datetime.now())
log_file = open(log_name, 'w', -1, 'utf8')
print('Log To:', log_name)
def log(level, *messages, **kwargs):
timestamp = datetime.datetime.now()
print('LOG: %02X' % level, timestamp, *messages, **kwargs)
kwargs.update({'file': log_file, 'flush': True})
print('%02X' % level, timestamp, *messages, **kwargs)
def process_data(all_data, target, frac=0.5):
train = all_data.sample(frac=frac, random_state=0)
log(0x25, 'Train Data:', train.shape)
train_index = train.index
train_y = train[target].values
train_x = sp.csc_matrix(train.drop(target, axis=1).values)
del train
test = all_data.drop(train_index)
log(0x25, 'Test Data:', test.shape)
test_y = test[target].values
test_x = sp.csc_matrix(test.drop(target, axis=1).values)
del test
return {'train': {'X': train_x, 'y': train_y}, 'test': {'X': test_x, 'y': test_y}}
# return {'train': {'X': train_x[:], 'y': train_y[:]}, 'test': {'X': test_x[:], 'y': test_y[:]}}
def load_orange(label='appetency', frac=0.5):
'''Load the kdd-cup-2009 dataset.
:param label: {'appetency', 'upselling'}. Default='appetency'
:param frac: Wanted fraction of train data. Default=0.5
:return: dict{dataset}
'''
log(0x24, 'Use Data: orange_{}'.format(label))
data = pd.read_csv('data/orange/train.data', sep='\t', )
data = data.dropna(axis=1, how='all')
mean_val = data.mean()
indices = mean_val.index
data[indices] = (data[indices] - mean_val) / data[indices].std()
data = pd.get_dummies(data, sparse=True).fillna(0)
data['Target'] = pd.read_csv('data/orange/train_{}.labels'.format(label), header=None)[0].apply(
lambda a: 1 if a > 0 else 0)
return process_data(data, 'Target', frac=frac)
def load_Amazon(frac=0.5):
log(0x24, 'Use Data: Amazon')
train = pd.read_csv('data/Amazon/train.csv', dtype='category')
train['ACTION'] = train['ACTION'].astype('int32')
return process_data(pd.get_dummies(train), 'ACTION', frac)
def load_adult():
log(0x24, 'Use Data: Adult')
import re
items = re.findall('(?m)^\s*(.+): (.+)$', '''
age: continuous.
workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
fnlwgt: continuous.
education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
education-num: continuous.
marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
sex: Female, Male.
capital-gain: continuous.
capital-loss: continuous.
hours-per-week: continuous.
native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
''')
items.append(('Target', ''))
# print(names)
names = []
types = {}
for each in items:
names.append(each[0])
types[each[0]] = np.float32 if each[1] == 'continuous.' else 'object'
# print(each[0], types[each[0]])
settings = dict(names=names, dtype=types, skiprows=1, header=0, index_col=False, na_values=['?'],
skipinitialspace=True, )
train = pd.read_csv('data/adult/adult.data', **settings, )
train_dummy = pd.get_dummies(train)
# print(train_dummy.head())
train_dummy = train_dummy.drop('Target_<=50K', axis=1)
columns = train_dummy.columns
# print(train.describe(include='all'))
test = pd.read_csv('data/adult/adult.test', **settings)
test['Target'] = test['Target'].apply(lambda a: a.strip('.'))
# print(test.describe(include='all'))
test_dummy = pd.get_dummies(test)
test_dummy = test_dummy.loc[:, columns].fillna(0)
target = 'Target_>50K'
# print(train_dummy[[target]].describe())
# print(test_dummy[[target]].describe())
# print(train_dummy[[target]].describe())
# print(test_dummy[[target]].describe())
# np.ravel()
log(0x25, 'Train Data:', train_dummy.shape)
log(0x25, 'Test Data:', test_dummy.shape)
return {'train': {'X': np.array(train_dummy.drop(target, axis=1)), 'y': np.ravel(train_dummy[[target]])},
'test': {'X': np.array(test_dummy.drop(target, axis=1)), 'y': np.ravel(test_dummy[[target]])}, }
# return {'train': {'X': train_dummy.drop(target, axis=1), 'y': train_dummy[[target]]},
# 'test': {'X': test_dummy.drop(target, axis=1), 'y': test_dummy[[target]]}, }
| {"/data_process.py": ["/config.py"], "/base.py": ["/config.py"], "/compare.py": ["/base.py", "/config.py"], "/trial.py": ["/base.py", "/config.py", "/data_loader.py"]} |
68,017 | AutumnSun1996/ML | refs/heads/master | /base.py | from sklearn.linear_model.logistic import LogisticRegression as MetaEstimator
import numpy as np
from sklearn.model_selection import cross_val_score, cross_val_predict, KFold
import sklearn.metrics
from skopt import gp_minimize
from config import log
# from catboost import CatBoostRegressor, CatBoostClassifier
#
#
# class MyCatBoostRegressor(CatBoostRegressor):
# def set_params(self, **params):
# for k in params:
# if isinstance(params[k], np.integer):
# params[k] = int(params[k])
# elif isinstance(params[k], np.floating):
# params[k] = float(params[k])
# elif isinstance(params[k], np.ndarray):
# params[k] = params[k].tolist()
# super().set_params(**params)
#
#
# class MyCatBoostClassifier(CatBoostClassifier):
# def set_params(self, **params):
# for k in params:
# if isinstance(params[k], np.integer):
# params[k] = int(params[k])
# elif isinstance(params[k], np.floating):
# params[k] = float(params[k])
# elif isinstance(params[k], np.ndarray):
# params[k] = params[k].tolist()
# super().set_params(**params)
#
def tuning(predictor, X, y, params, cv, gp, scoring='log_loss'):
log(0x25, 'tuning: predictor=', predictor)
log(0x25, 'tuning: x=', type(X), X.shape)
log(0x25, 'tuning: y=', type(y), y.shape)
log(0x25, 'tuning: params=', params)
log(0x25, 'tuning: cv=', cv)
log(0x25, 'tuning: gp=', gp)
space = []
names = []
for key in params:
names.append(key)
space.append(params[key])
def objective(p):
for k, v in enumerate(p):
if isinstance(v, np.integer):
p[k] = int(v)
elif isinstance(v, np.floating):
p[k] = float(v)
print('Set:', dict(zip(names, p)))
predictor.set_params(**dict(zip(names, p)))
prediction = cross_val_predict(predictor, X[:], y[:], **cv)
return sklearn.metrics.__getattribute__(scoring)(y[:], prediction)
result = gp_minimize(objective, space, **gp)
predictor.set_params(**dict(zip(names, result.x)))
predictor.fit(X, y)
return result
class Ensemble:
def __init__(self, base_estimators=None, random_state=0):
self.base_estimators = base_estimators
self.estimator = MetaEstimator()
self.random_state = random_state
def fit(self, X, y):
cv = KFold(n_splits=5, shuffle=True, random_state=self.random_state)
predictions = []
for estimator in self.base_estimators:
prediction = cross_val_predict(estimator, X, y, cv=cv, method='predict_proba')
print('prediction of', estimator.__class__.__name__)
print(prediction)
# predictions.extend(prediction.T)
predictions.append(prediction.T[0])
print('all predictions')
print(np.array(predictions), y)
self.estimator.fit(np.array(predictions).T, y)
for estimator in self.base_estimators:
estimator.fit(X, y)
def predict(self, X, margin):
return np.array(self.predict_proba(X)) > margin
def predict_proba(self, X):
predictions = []
for estimator in self.base_estimators:
# predictions.extend(estimator.predict_proba(X).T)
predictions.append(estimator.predict_proba(X).T[0])
return self.estimator.predict_proba(np.array(predictions).T)
#
#
# class Ensemble:
# def __init__(self, base_estimators=None, random_state=0):
# self.base_estimators = base_estimators
# self.estimator = LogisticRegression()
# self.random_state = random_state
#
# def fit(self, X, y):
# folds = KFold(n_splits=len(self.base_estimators), shuffle=True, random_state=self.random_state)
# predictions = []
# idx = 0
# for train, test in folds.split(X):
# estimator = self.base_estimators[idx]
# idx += 1
# estimator.fit(X[train], y[train])
# prediction = estimator.predict_proba(X)
# print('prediction of', estimator.__class__.__name__)
# print(prediction)
# predictions.extend(prediction.T)
# print('all predictions')
# print(np.array(predictions), y)
# self.estimator.fit(np.array(predictions).T, y)
# for estimator in self.base_estimators:
# estimator.fit(X, y)
#
# def predict(self, X, margin):
# return np.array(self.predict_proba(X)) > margin
#
# def predict_proba(self, X):
# predictions = []
# for estimator in self.base_estimators:
# predictions.extend(estimator.predict_proba(X).T)
# return self.estimator.predict_proba(np.array(predictions).T)
| {"/data_process.py": ["/config.py"], "/base.py": ["/config.py"], "/compare.py": ["/base.py", "/config.py"], "/trial.py": ["/base.py", "/config.py", "/data_loader.py"]} |
68,018 | AutumnSun1996/ML | refs/heads/master | /compare.py | import datetime
import time
import pandas as pd
import numpy as np
from sklearn.metrics.scorer import mean_squared_error as error_func
from sklearn.datasets import load_iris
from catboost import CatBoostRegressor, CatBoostClassifier
from lightgbm import LGBMRegressor, LGBMClassifier
from xgboost import XGBRegressor, XGBClassifier
from base import tuning
from config import get_setting
regressor_classes = [CatBoostRegressor, LGBMRegressor, XGBRegressor]
classifier_classes = [
LGBMClassifier,
XGBClassifier,
CatBoostClassifier,
]
compare_data = {'Amazon': 'ACTION'}
def log(level, *messages, **kwargs):
timestamp = datetime.datetime.now()
print('LOG: %02X' % level, timestamp, *messages, **kwargs)
kwargs.update({'file': log_file, 'flush': True})
print('%02X' % level, timestamp, *messages, **kwargs)
def check_result(y_true, y_pred):
print(pd.DataFrame([y_true, y_pred], columns=['True', 'Pred']).describe())
return error_func(y_true, y_pred)
def process_data(all_data, target, frac=0.8):
train = all_data.sample(frac=frac, random_state=0)
train_y = np.array(train[target])
train_x = np.array(train.drop(target, axis=1))
test = all_data.drop(train.index)
test_y = np.array(test[target])
test_x = np.array(test.drop(target, axis=1))
return {'train': {'X': train_x, 'y': train_y}, 'test': {'X': test_x, 'y': test_y}}
# return train_x[:], train_y[:], test_x[:], test_y[:]
def check(estimator_class, data):
if estimator_class.__name__ == 'CatBoostClassifier':
estimator = estimator_class(loss_function='MultiClass', classes_count=len(set(data['train']['y'])))
else:
estimator = estimator_class()
log('~Fit With Default Setting~', estimator_class.__name__)
tick1 = time.time()
estimator.fit(**data['train'])
score = error_func(data['test']['y'], estimator.predict(data['test']['X']))
tick2 = time.time()
log('Score:', score)
log('Time Usage:', tick2 - tick1)
if estimator_class.__name__ == 'CatBoostClassifier':
estimator = estimator_class(loss_function='MultiClass', classes_count=len(set(data['train']['y'])))
else:
estimator = estimator_class()
log('~Tuning~', estimator_class.__name__)
tick1 = time.time()
tuning(estimator, **data['train'], **get_setting(estimator_class.__name__))
score = error_func(data['test']['y'], estimator.predict(data['test']['X']))
tick2 = time.time()
log('Score:', score)
log('Time Usage:', tick2 - tick1)
if __name__ == '__main__':
log_file = open('./compare.log', 'w')
iris_x, iris_y = load_iris(return_X_y=True)
iris = pd.DataFrame(iris_x)
iris['y'] = iris_y
data = process_data(iris, 'y')
for each in classifier_classes:
check(each, data)
log_file.close()
| {"/data_process.py": ["/config.py"], "/base.py": ["/config.py"], "/compare.py": ["/base.py", "/config.py"], "/trial.py": ["/base.py", "/config.py", "/data_loader.py"]} |
68,019 | AutumnSun1996/ML | refs/heads/master | /trial.py | import time
from sklearn.metrics.scorer import log_loss as error_func
from sklearn.linear_model.logistic import LogisticRegression as MetaEstimator
from sklearn.model_selection import cross_val_predict, KFold
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMRegressor, LGBMClassifier
from catboost import CatBoostClassifier, CatBoostRegressor
from xgboost import XGBRegressor, XGBClassifier
from base import tuning
from config import get_setting
from data_loader import *
# from config import log
class Ensemble:
def __init__(self, base_estimators=None, random_state=0, cv=3):
self.base_estimators = base_estimators
self.estimator = MetaEstimator()
self.random_state = random_state
self.fit_cv = cv
def fit(self, X, y):
cv = KFold(n_splits=self.fit_cv, shuffle=True, random_state=self.random_state)
predictions = []
for estimator in self.base_estimators:
name = estimator.__class__.__name__
log(0x25, 'cross_val_predict start', name)
prediction = cross_val_predict(estimator, X, y, cv=cv, method='predict_proba')
log(0x25, 'cross_val_predict end', name)
# print('prediction of', estimator.__class__.__name__)
# print(prediction)
log(0x25, 'CV Score', name, check_result(y, prediction))
predictions.append(prediction.T[0])
# print('all predictions')
# print(np.array(predictions), y)
self.estimator.fit(np.array(predictions).T, y)
for estimator in self.base_estimators:
name = estimator.__class__.__name__
log(0x25, 'fit start', name)
estimator.fit(X, y)
log(0x25, 'fit end:', name)
def predict(self, X, margin):
return np.array(self.predict_proba(X)[:, 0]) > margin
def predict_proba(self, X):
predictions = []
for estimator in self.base_estimators:
# predictions.extend(estimator.predict_proba(X).T)
predictions.append(estimator.predict_proba(X).T[0])
return self.estimator.predict_proba(np.array(predictions).T)
def check_result(y_true, y_pred):
# print(pd.DataFrame(y_pred, dtype='object').describe())
# print(set(y_pred))
return error_func(y_true, y_pred)
def check(estimator, data, tune=True, fit=True):
log(0x25, '~Default Setting~', estimator.__class__.__name__)
if fit:
tick = time.time()
estimator.fit(**data['train'])
log(0x25, 'Fit in:', time.time() - tick)
if estimator.__class__.__name__ == 'Ensemble':
log(0x25, 'Base Estimators:', ', '.join(['%s' % e.__class__.__name__ for e in estimator.base_estimators]))
log(0x25, 'Ceof:', estimator.estimator.coef_, 'intercept:', estimator.estimator.intercept_)
tick = time.time()
prediction = estimator.predict_proba(data['test']['X'])
log(0x25, 'Predict in:', time.time() - tick)
score = check_result(data['test']['y'], prediction)
log(0x25, 'Score:', score)
if not tune:
return
log(0x25, '~Tuned~', estimator.__class__.__name__)
tick = time.time()
tuning(estimator, **data['train'], **get_setting(estimator.__class__.__name__))
# estimator.fit(**data['train'])
score = check_result(data['test']['y'], estimator.predict_proba(data['test']['X']))
log(0x25, 'Params:', estimator.get_params())
log(0x25, 'Time:', time.time() - tick)
log(0x25, 'Score:', score)
if __name__ == '__main__':
# check(LGBMClassifier, load_Amazon())
# data = load_adult()
# data = load_Amazon()
data = load_orange()
random_state = 0
cv = 5
log(0x24, 'random_state:', random_state, 'cv:', cv)
ensemble = Ensemble(
base_estimators=[
RandomForestClassifier(random_state=random_state),
# GradientBoostingClassifier(random_state=random_state),
LGBMClassifier(seed=random_state),
# XGBClassifier(seed=random_state),
CatBoostClassifier(random_seed=random_state),
LogisticRegression(random_state=random_state),
],
random_state=random_state,
cv=cv,
)
check(ensemble, data, tune=False)
for estimator in ensemble.base_estimators:
check(estimator, data, tune=False, fit=False)
| {"/data_process.py": ["/config.py"], "/base.py": ["/config.py"], "/compare.py": ["/base.py", "/config.py"], "/trial.py": ["/base.py", "/config.py", "/data_loader.py"]} |
68,020 | AutumnSun1996/ML | refs/heads/master | /FTP.py | from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
# 实例化虚拟用户,这是FTP验证首要条件
authorizer = DummyAuthorizer()
path = r'/home/autumnsun/Downloads'
# 添加用户权限和路径,括号内的参数是(用户名, 密码, 用户目录, 权限)
authorizer.add_user('autumnsun', 'AutumnSun', path, perm='elradfmw')
# 添加匿名用户 只需要路径
authorizer.add_anonymous(path)
# 初始化ftp句柄
handler = FTPHandler
handler.authorizer = authorizer
# 监听ip 和 端口,因为linux里非root用户无法使用21端口,所以使用2121端口
server = FTPServer(('192.168.1.122', 2121), handler)
# 开始服务
server.serve_forever()
| {"/data_process.py": ["/config.py"], "/base.py": ["/config.py"], "/compare.py": ["/base.py", "/config.py"], "/trial.py": ["/base.py", "/config.py", "/data_loader.py"]} |
68,023 | firegreen/adventofcode2020 | refs/heads/main | /password_philosophy_input.py | input = [
"1-5 k: kkkkhkkkkkkkkkk",
"5-7 k: blkqhtxfgktdkxzkksk",
"15-16 x: xxxxxxxxxxxxxxlf",
"3-5 j: fjpvj",
"17-20 x: zsxjrxkgxxxxxxxmxgxf",
"5-6 m: swjzmmmlx",
"2-4 v: vqdn",
"8-12 t: thllsbqtgdsf",
"10-17 h: vpbrjcbhnwqhhphxjk",
"8-9 p: zpwpppkqbpkpppp",
"5-6 t: wtxxts",
"3-8 v: vfsvhgvvhh",
"1-3 v: kvvvm",
"8-16 w: bwkqpdgwrbwjxrtqlwbw",
"3-8 s: sssssssbs",
"6-9 w: wwwwwcwwww",
"5-10 r: rvwrrlxbrjhp",
"1-4 r: rbnlkkrjphnnxpw",
"7-9 r: zxhrpmsrrxrlr",
"2-3 w: dwcvwb",
"2-4 v: vlvvvv",
"3-12 h: hbvdhhhhhqhhlzhs",
"8-10 t: tbgfvwbtvbnghfbhxz",
"4-8 m: dmvgkltn",
"4-6 x: xfxxxdcxxqr",
"1-2 p: pkcpmh",
"2-7 f: fkfffflf",
"9-11 m: mmmmbmkmmmmm",
"9-10 b: bbbbbbbbbz",
"6-7 z: zzzzzcp",
"4-7 c: cqccltd",
"3-4 w: wbvwcwllww",
"4-10 k: kkkmkkdkkkxkfh",
"5-6 x: xqftxz",
"9-13 r: rrbrrrrrxrrqrjrr",
"5-10 t: hjcxthjtvdd",
"8-17 c: bckccxqcmccxrckcl",
"16-18 b: jwxpkkmnbqftknzjkbr",
"5-6 r: rrrmrr",
"1-2 k: lkbhbkstth",
"17-18 j: jjjjjjjjjjzjjjjjxsj",
"4-12 t: tttkttttttttttttj",
"7-9 k: kkkkkkwkk",
"3-5 q: pjlql",
"9-14 t: tptjdnnzkvjqbthm",
"2-8 q: qwqqqqqqq",
"5-8 p: ppwwrphp",
"6-7 h: hhhhhtr",
"2-3 w: wvtfwwh",
"6-13 d: ddddjdmddcdkjd",
"2-4 k: xkhf",
"4-6 s: fdksdsq",
"2-3 x: xxjsx",
"1-11 b: ksbjfhgqbbbbvpcbg",
"5-12 f: fgfffffffffwfjsfzfs",
"5-7 p: ppppppq",
"10-14 z: zzjzfzzzzfzzzj",
"5-9 p: qxxmkfpspnpk",
"6-7 s: shqgnsq",
"10-15 v: vvvvvvvvvnvvvvvv",
"1-3 g: gxchngg",
"5-8 v: vvvvvvvcvv",
"9-14 s: sssflshssssssssssfs",
"4-7 v: kvlvdkvfn",
"9-10 x: xxxxxxxxxdxx",
"3-4 n: nnnnn",
"3-7 z: zzzzzznz",
"2-5 v: hnkgvxqqfq",
"3-9 m: vmbmbqrmks",
"5-8 g: ghggggjfggfd",
"13-15 m: tvhsdwphwtpkmlm",
"14-15 h: hhhghhhhshhhhch",
"15-18 s: bssssssvssssjszsss",
"6-7 h: hhfrhhmhhh",
"5-10 x: xxgxxxdxmmxrx",
"2-5 c: cztcgcwct",
"2-4 l: lllw",
"12-13 x: xxxxxxxxxxxgx",
"4-5 k: kckgkk",
"1-3 p: gqppndp",
"4-6 c: cdcnfr",
"6-7 v: vvmvvvfv",
"2-4 c: cdzc",
"7-8 x: xxxxxxxxxxwxx",
"2-5 z: zzzznns",
"6-7 k: kkkkkkk",
"13-15 t: ttttttftntttttxttttt",
"4-5 k: mckkmkxthk",
"3-6 h: hhchlhh",
"10-14 x: xxxxxxxxxsxxxxxx",
"1-2 n: nknnnz",
"1-14 r: prrrrrrrrrrrrrr",
"1-3 p: gpppp",
"1-15 x: xxxxdxxwxxxxxcxr",
"9-10 f: fffffffffk",
"4-5 m: mmmwmm",
"10-14 q: dtbtqwkspvxbtq",
"4-7 w: wzsxmpqgxjwsw",
"9-16 z: qftzzzfztnczwzzzjzzp",
"6-11 r: grrqvcjkhrrdrsrrrqr",
"8-11 g: gjbmwgggbgnggltgz",
"5-7 c: cccctcdcc",
"11-13 j: jjtzdlfrvjcjsjj",
"3-11 p: wmmrmblfbcppjvvfp",
"2-3 m: zgzrsf",
"9-13 t: tttttttttsttttt",
"2-4 m: skmm",
"3-4 l: nllx",
"5-6 h: hhhhhx",
"7-9 l: mlflqllgl",
"6-7 r: rrqcrnrlrrcr",
"1-4 x: xbxbbcxx",
"11-19 f: ddfzffnfffqffffffff",
"2-7 w: wtwwwthw",
"10-14 z: zzlzhzzzzkzzzzkzzzr",
"10-12 p: pplvpppwpnqpprspspp",
"12-14 g: gxghmjgggggjgggggg",
"4-5 w: whlmwx",
"1-10 g: cggzggbsgzr",
"2-5 p: ppppzjp",
"4-6 b: bbbfbbbbxbbbbbbbbbb",
"12-14 r: rrrrrrrrrrrhrr",
"9-10 f: fvdkjxfgpf",
"4-7 v: vvvnpqv",
"4-14 r: rrrwcrxrqmrrrrrrrrr",
"10-15 z: zszjzjzzmzzzzzd",
"8-11 v: vvvvvvvvvvv",
"10-11 v: vvvvlvvvlrvv",
"4-7 j: fbbbhjjj",
"1-5 d: ldddd",
"2-5 p: flkszkmgp",
"4-11 h: gclqbxbphsj",
"2-4 h: jthhhh",
"8-9 f: fqfffgfqfffffff",
"2-9 v: znfvsxrdw",
"12-14 c: cccccccrccmhccc",
"2-5 k: tkwkk",
"3-4 f: ffqffff",
"3-5 j: jkjjm",
"7-10 t: kvggzgqjttrg",
"8-17 h: whhhhhhhhgvhkhhhs",
"2-16 s: spsssgssssssszsssvhp",
"6-7 s: jssssbk",
"7-8 f: fhfffqfn",
"6-12 q: pmfgxqdcwsnqvfpbxqt",
"2-3 x: khqxfx",
"4-11 z: rzzzzzvzzzz",
"2-4 x: xrsx",
"3-13 g: ggggqmggggggglg",
"2-4 c: vccs",
"3-4 s: szvsfs",
"11-17 x: csxxxzxxrkzqxxzxxxbx",
"13-16 h: zhjhhhhbhhhfshhhqhh",
"16-20 s: zjljhcpwdzgbkdsssgrs",
"6-7 t: ttqtttbxtt",
"3-4 w: wlswk",
"8-9 l: lllllglrllll",
"8-9 d: ddddddqdnd",
"8-10 c: cccccccccbcc",
"2-6 n: fnxlng",
"6-10 g: gggggggggt",
"4-13 p: pmptpjpppkpppvpp",
"12-13 b: hbwpjmnxcwvfl",
"7-10 c: tccccclcccc",
"16-17 n: mnnjnmrnjnnnnnnnn",
"3-4 r: wxrr",
"9-16 x: xxxxxxxxxxxxxxxnx",
"1-5 v: vxvgm",
"7-11 l: lllnlhllnlljcll",
"4-9 s: sdhtjsdfsstl",
"9-11 n: nnbszpzwkmf",
"15-16 k: kpvfgsrkptkkbdkj",
"3-5 j: nfrjffxjh",
"6-9 j: dxrrhnwwjm",
"9-10 x: xxxxxxxxpxx",
"3-4 n: nndnnnnlnnsnnnnn",
"2-4 n: mgnc",
"16-18 g: gggggggggggggggggsg",
"8-19 p: lbmcxkxfhwltkbjzppp",
"6-8 l: lllllllbnllflz",
"1-2 g: zggggg",
"10-12 n: vxddjhnnvnrr",
"1-3 f: sftff",
"12-13 q: qqqqxqqqqqqqcq",
"1-3 r: trqrrrrrr",
"5-15 g: ggggfgggggggggdggg",
"5-6 l: zpzcklk",
"3-8 j: jjcjjjjjj",
"11-15 k: kkmkkkkkkkljkkk",
"3-4 x: hxdb",
"8-13 g: vggggfgdpggghmg",
"6-9 q: qqqqqcqqqqqqqqq",
"5-6 z: zzszbz",
"8-15 k: knfkxkzjvbqqdkvc",
"13-16 n: xbngntvnqdnfnnfd",
"1-9 s: sshstsssv",
"5-7 s: szsslsss",
"5-7 n: nnxhdfnnr",
"2-5 m: mfbdm",
"10-13 l: qnzhxkkwflllrllz",
"4-8 h: mnhtnbhxw",
"4-5 b: wbwbp",
"4-13 s: jttsbbszhssvt",
"8-12 r: rrrrrrrhrrrrr",
"3-6 w: sxvnksbvdwlg",
"11-14 x: xxrxxxnxbxxxkx",
"16-17 s: szssssssssssssssc",
"5-8 l: hlllclllllllllllll",
"3-4 l: lllhl",
"3-12 s: ntsmzknclldtlsq",
"5-10 m: mmmmmmmmmlmmmm",
"7-10 x: jxkhxkxbbvlmn",
"4-6 t: tttttwt",
"18-19 l: nlbllqqvqqlmpllxldc",
"1-2 m: mpmmmgqm",
"3-7 t: ttfttqgtttttt",
"3-6 r: brrrrfrt",
"9-14 d: sddddddxdnxddzdd",
"5-6 c: szcxsc",
"3-9 m: cprvpmmmm",
"12-16 p: ppppppppppptpppp",
"3-7 q: qqsqqqbq",
"2-11 g: bgndtnltsgg",
"5-6 c: kctccckcxcc",
"8-9 q: qfzbfnbjzljzcvhpswgt",
"17-18 j: jjjjjjnjjpdjxwjqwjfb",
"2-5 g: dhkldg",
"4-6 s: rwssbs",
"11-12 g: ggmggcgpgmgn",
"1-2 f: flcnv",
"10-11 c: ccccccccccrc",
"14-19 h: hhhhhhhnhhhhhbhhhhhh",
"9-10 v: bnvvzctvvw",
"3-7 v: gdhvpqv",
"6-8 c: ccnccfcc",
"6-9 j: zjdjlfnjjjjnfj",
"8-9 q: qqqqqqqfq",
"12-14 t: ptgsttgttttttkttst",
"9-12 p: ppzppppkjpppqppd",
"3-4 g: gggfggng",
"4-9 z: zzzqqdzrrzzz",
"5-7 f: bjbftff",
"12-13 w: wvwwwwwwwwwmwzfwww",
"1-4 h: htnd",
"5-10 m: nlbtmgfmpqzfv",
"11-14 j: jcjjgjjjfjjjjzjfj",
"9-10 n: nnnnnnnnnznq",
"3-5 t: ttttmt",
"7-9 n: nnntnnbnnnn",
"7-14 l: btlsjhlgglhrlkllgrb",
"4-5 g: hctgg",
"4-6 b: ksbbjbljnfsd",
"2-3 h: hhsx",
"4-10 h: pgmhbbhmpsvxnhdtsh",
"5-11 q: pqrsdqqqqqqqq",
"1-4 s: sssq",
"6-7 r: shpvlsr",
"1-2 f: fsff",
"8-14 z: zgzhbwkzdjtglgxzh",
"10-14 j: djjjjgrgjjjjjkj",
"2-12 g: ggxtdhcjpfdg",
"1-2 f: frfffffmffft",
"18-20 d: ddpkddbzdkdkxzddhrdx",
"4-7 b: bwdbrbkq",
"8-10 d: dhddddddmt",
"7-9 z: znzzzzzzt",
"4-11 r: jtcrcsfjrkrfnttm",
"4-5 k: hkkkkk",
"3-19 m: vtxnccshqhlvfkwrxtnw",
"8-9 h: hcvhghzfhrhh",
"5-6 r: bqdcjrkrrwqvrr",
"6-12 r: rprmrnrfkbrrwm",
"19-20 f: fffffffffffffffffgff",
"6-8 d: dmdddhddd",
"4-11 w: mwwwwdwkwwfwww",
"9-10 r: rrrqhvfrrhrftrprr",
"9-11 p: ppppdpsghpp",
"2-5 m: mmnmmtttwvk",
"6-10 q: qlfqkqdbqb",
"8-9 r: rrrrfrrrprrrrrrfr",
"4-6 l: nwnklltxl",
"3-7 b: fzvpjtbjb",
"3-7 r: rrsrxzrbrrrqx",
"2-4 n: cnbg",
"6-8 t: ttqtttfjhtxtgdrtn",
"4-5 g: gghgggg",
"7-10 s: rgdqsqstvwl",
"4-11 w: wwwsjwwwbwjwwww",
"3-6 q: qkpqhfqq",
"3-5 l: xbhmlrlllvq",
"6-7 v: gltvcvkn",
"3-4 t: ttthttt",
"11-13 n: nnnrdndnngffnnnnnn",
"1-4 t: nttvt",
"4-6 h: hhhhhl",
"4-6 l: rlgmlqmtlptdf",
"9-16 x: bqkxsfdhxlhwmzrqn",
"9-10 m: mmmsmmmmmvmcm",
"4-6 b: bbbbbd",
"11-15 g: ggggggggggggggng",
"1-2 v: bvvssfblvnxqp",
"5-6 v: pvtjvpvvv",
"1-4 c: cccnccccccccccccc",
"8-11 s: sssssssjsxxsss",
"5-8 d: ddddgddd",
"6-13 z: jxzxzzhzznzqw",
"6-12 r: drzdgkrctcnd",
"11-12 k: kkkkkckrkkkwbtbkvkk",
"16-18 t: swtvgprhjnzlgsjjst",
"5-11 k: kkkkkkkkkkpkkkk",
"4-7 j: jjjjjjjjjjj",
"6-8 d: kdddkdtt",
"1-5 c: cccsk",
"10-17 n: nnnnnnnnnrnnnnnnfn",
"5-9 z: zpzznzzzzzvzzzzz",
"5-20 t: tltttztnttttttvtttct",
"2-14 l: lrlllllllllllll",
"1-12 h: hhhhwhhhhkhshh",
"2-4 p: spbpppkppnjfx",
"11-15 d: ddddzdcdddddshdw",
"9-10 h: qhcwgthzsh",
"2-4 t: bhdtttbfwjpthntmrq",
"3-4 z: jzdz",
"7-16 k: hkvxwzkqrkbsltgcx",
"3-4 v: vcqv",
"2-7 x: xdtgjjxfh",
"15-17 c: ccnzdlcpzrccbcccf",
"3-7 g: zsgrmgrh",
"3-7 z: zqjzzlzcvzzzls",
"6-9 f: cnzdlfnqvzkq",
"2-5 m: jmfhgwmmc",
"7-9 r: rrrrrrrws",
"4-9 g: gdsgggbkdgg",
"11-13 l: ztrzlltljglzcr",
"10-13 g: ggggdjgtgggzrgg",
"12-16 k: zpfdzxwbzfkptwqkn",
"15-16 p: ppppmpppppppppwpp",
"8-14 b: bmhgtpdbbsrkbgvwd",
"10-12 k: ktkkkkkkrnkjkksk",
"2-10 d: dhdwdffhdqngs",
"4-7 g: ggglggggggggggg",
"1-10 f: xfffffffffff",
"6-11 m: mxmnvsmwmmmww",
"15-16 k: qskxqzknqkfhtlbkhx",
"4-5 b: hbbbt",
"11-14 k: kvkkdkkvkdskrkk",
"1-6 b: bjvbqs",
"2-3 r: dxzsvqtqvxrbv",
"6-8 c: cscrxrcklcscvkmbccc",
"4-8 d: dddjddddmd",
"4-13 x: dprmsnhxzxnxmv",
"5-6 j: jhkjjz",
"5-8 z: mzzjzhzbwdzl",
"9-11 x: xxxxxxxxlxx",
"4-5 d: dxdvndddpddddddddvnl",
"2-5 v: vvvvlv",
"5-11 m: cmmgmmmmmmmmmtc",
"13-14 r: rrrrrrrrrrrwjrrr",
"4-14 w: rlvwwwglzlxggwhkhk",
"17-18 d: dddddddddvddddhddldm",
"3-6 r: gmrprm",
"5-6 m: fpmjlmmzjb",
"12-14 j: jjjjjjjjjjcjjq",
"12-14 f: ffffwfffffbdfq",
"9-10 j: jjjjjjjjbjjj",
"1-3 z: dzzzkz",
"1-2 w: swtcxgdqgnlj",
"1-2 v: wvvv",
"1-3 h: rhrr",
"2-6 x: wxxmtbmcsr",
"6-7 d: ddndtfdd",
"8-13 j: cjqjjwwhjcswbthcj",
"3-4 x: xhxxppxnwmwkhdx",
"7-9 j: ggrjjbjjjnjvjjj",
"3-9 g: tzghddllprgs",
"11-13 r: jhdvbchsrzlxrwjrz",
"12-13 g: gwgggggggggglg",
"2-14 m: kmmcwwhprtskhwwvg",
"1-4 r: rzrssthbbhjdmnx",
"5-12 f: ffffgffffffff",
"19-20 j: jjjtjjjjjjjjjjjjjjjj",
"5-6 n: gnfnnnn",
"4-10 b: bbbmbbbbbbbbb",
"1-2 x: xsxs",
"17-18 r: rjrrrrrrrrrrrrrrzr",
"12-16 b: bgpvgbkbkhbqwbvbtbvg",
"1-11 q: qqqqqqqqqqxq",
"8-11 l: lllllfllllgll",
"3-4 h: hrtc",
"9-10 l: llvllnllkbwlslldllm",
"6-8 j: jjjjzsjj",
"3-6 n: mnxqjn",
"2-3 g: ghgg",
"2-16 q: qkqqqqrqqqqqqqqqqqqq",
"2-6 n: nnngrbfqshlmnnskdwpw",
"4-13 f: fffffffffffffff",
"4-5 s: nvcstplm",
"7-14 n: nzcnntnsdnbnngnnnpnk",
"16-18 v: vmvvvvvvvvvvvvvcvv",
"5-6 l: llllhq",
"5-10 d: ddddddbddqd",
"12-14 h: hhhhhhhhhhhzhhhh",
"11-12 b: bbbbqblbbwbnbbblbt",
"3-4 n: nwnqbtn",
"6-10 v: vgvvpvvdvbzq",
"10-11 m: mmmmmmmmmmmmm",
"6-8 b: bbbvjpvb",
"5-7 c: rcccccs",
"5-7 n: nqjksznntnzgnznmjj",
"7-8 r: rrrrrrrfrrrrrr",
"7-10 c: ccmccszccsmcbccggclc",
"1-8 s: sssssssxs",
"3-8 j: jxjjjxjj",
"7-10 z: zzzzzzczzzzzz",
"1-4 g: gdzgg",
"8-9 b: sqbbbbvhb",
"16-17 s: clssssssssflvssss",
"10-11 h: vhjhhhwhhpsh",
"3-5 z: jczzmzz",
"1-8 p: cztfpjcpp",
"4-7 w: twwprwwcww",
"2-3 x: xsxx",
"4-11 m: mswmmdkmmmqmwmfnqmt",
"8-11 b: lpbbbbbbbgb",
"4-14 n: nnnnnnnnnnnnnwnn",
"5-7 m: mrxsvmnwbmq",
"3-4 w: wwtjw",
"4-5 x: xxxxsxx",
"7-10 v: jkxhvtvfcvcsfvrbczkv",
"6-8 v: vvvvvtdvfjpc",
"11-12 h: hxqhhhhhmhnh",
"1-3 h: chhh",
"4-9 n: nnncnnnnnn",
"14-20 z: zrzzhwzmtzfktvrwqzxq",
"11-13 j: jkjjdgjjvpjjmkj",
"3-4 n: rcrfknnn",
"3-8 c: ccsccrccc",
"10-16 n: nnnnnnnnntnnnnnnn",
"8-13 p: tpvzplvlppphpzppjp",
"3-5 r: srrrsrhr",
"18-19 g: gggggdnfggqgwgggsgpg",
"13-18 m: mcjmmhmsvswcrmkmmmm",
"3-9 p: bjpgzprcplf",
"6-9 g: ggkgsgggj",
"1-7 v: vvvvvvmzv",
"6-16 h: hhhhrhhhthhhhhhq",
"2-3 v: tfvq",
"1-9 v: svvbwcvvvvl",
"10-11 d: dfdddddddhdddd",
"7-9 v: sscbdsvvr",
"3-6 q: wvrrvqrqczg",
"7-9 v: gmqzvpvbvd",
"2-20 v: tbmpjzvhqbklqtsllcfv",
"7-11 v: vvvvvvvvvvv",
"11-13 s: kknsbrcpsjblhrsbkh",
"2-7 n: fnnnmpstcklhwzmwx",
"6-14 d: kdmdhlddddddmv",
"17-18 z: zzzzzzzzzzzzzzzzrr",
"2-4 k: hmkfkklq",
"11-14 n: nnnnnnnnnnznnn",
"6-10 k: hnvpcckbxkpks",
"1-5 d: dmdpvjgdcglnssx",
"6-9 k: xzhqkpxvkkdtskkkwk",
"7-9 p: ppppcpppppp",
"4-5 r: qnvrrhvshwrsrxc",
"8-9 l: lllllllzk",
"9-10 s: sssssssssks",
"8-9 d: ddqzdddsd",
"4-16 k: kkkkkkksfwkkfkkhkk",
"10-11 n: rtpdqfmrnknr",
"5-6 z: zfzzztzxzzv",
"8-19 f: hvtkcwpfdgqwwqfwwvvb",
"5-7 v: vtmvtvwd",
"1-2 s: hpsp",
"10-12 d: jddddddddddc",
"8-10 s: tvsxfdgjts",
"6-7 x: xdbkbjxd",
"3-16 m: plmmxkwpcjgqgbrm",
"2-7 c: bqrdnscbkc",
"2-3 m: mnpmshpnqwz",
"4-7 g: kngcjgg",
"1-5 l: llllcl",
"4-11 k: kkkrkkkmkkkkkt",
"8-9 t: tttstvttsttt",
"11-12 c: cxzcjcccclnt",
"1-3 w: wwpss",
"2-7 p: zpsmqpx",
"4-9 x: whdsxdmncxwplxbh",
"17-19 p: qpqpppjppppfppppppp",
"1-2 b: bwbwxrbgb",
"5-6 n: nnpnbhpn",
"5-12 m: pmmmsmvmmmmmmm",
"5-9 q: vqpqqqqqbqqqfs",
"12-15 m: mmmmfmmmbmrwmmqmm",
"1-5 w: wtnflwwr",
"5-6 j: zdhhnrqpbcjkj",
"9-16 r: fmrzrrrrrrrrrsrfr",
"2-3 s: dmsq",
"1-18 w: wwwwwwwwwwwwwlwwws",
"2-9 c: cswcrbrccccqj",
"15-17 m: mmmmmmjzmmmmmmmmhm",
"1-9 n: nnnpnfkxnnlrpssnxbv",
"7-9 x: kvgqdhxhxvbm",
"5-8 l: ljllmclllftjzqk",
"9-12 j: jjjjjjjjjjjkj",
"12-13 h: hhhhhhhhhhhrh",
"11-13 f: ffffvfrfffmfff",
"3-13 n: dhnnndnnmwndrnnnhnnj",
"1-4 w: qswww",
"11-12 r: rjrcrrrcrrpkrr",
"1-5 k: kkkkjbs",
"3-4 r: vzrr",
"2-3 k: kmkkkkkkkk",
"3-4 v: vkdg",
"2-5 j: vtjpv",
"7-10 t: ttttmttjntqttz",
"2-3 x: xkzxxt",
"4-7 w: qhwzvswkhw",
"1-2 l: wqwnl",
"6-12 r: fzrrhrgrrzfxr",
"3-4 d: dddtdfbddd",
"12-16 g: gggggggggggcgggg",
"11-14 h: hhhkfhcshhchhhxhhhh",
"1-2 f: zfff",
"6-7 z: zzzzjgzz",
"12-19 k: tkgkkkkfkgkxgkqgkkn",
"12-13 c: ccccccccccczcc",
"16-17 z: zzzzzzzzzzzzzzzzn",
"4-5 b: bbbpbbzbbb",
"17-18 c: nvfkfcwxxjlrrslwcpc",
"7-10 v: kwcfvvvxvx",
"9-13 q: qqqqqqqqqqqqwq",
"1-4 g: pjgdhgnmzggd",
"7-11 d: ddddddzdbdd",
"17-20 j: npdnjjjjjjjjkzcfjslf",
"5-10 q: zkthrrkzqg",
"6-14 j: jjjjjjjjjjjjjkj",
"5-10 t: tbtttfbmztq",
"3-4 s: sssv",
"6-8 w: wwwlzwwwwwdk",
"1-11 l: vffmrflltflkwxldl",
"8-10 j: jjpjvjjjjb",
"6-7 p: hpqhvpzpphdkpp",
"3-4 m: mmtv",
"9-15 l: lllllgllllmllll",
"10-12 m: mmmmvhmmmmgtvg",
"2-6 w: wwwwwgwbt",
"7-13 m: mmmmmmmmmmmmm",
"1-5 r: gjcskhrz",
"5-12 s: bvggstbnhjfdfwz",
"7-8 c: ctxcfppcccc",
"12-14 j: jjjjjjjjjjjhjp",
"14-17 t: ttxttttptttttttttttt",
"1-8 l: lllllltf",
"7-8 r: qrrrrrrsvrhm",
"1-13 q: qqqqqrqqqqqqgqqqsq",
"4-9 n: qpnmnzpnnnnmxtr",
"6-11 z: qczxzzkzbkznzrvnvkz",
"7-13 b: bbbbbbkbbbbbsbb",
"6-9 c: ccfqncjszc",
"1-5 l: lrlsl",
"2-8 f: pfbtbvpjmzwpccsbm",
"2-4 d: wnjd",
"10-11 n: nnnnnnnnnxndnnnn",
"3-4 c: cccc",
"8-9 r: rflrrrvrh",
"3-4 b: thbs",
"3-9 q: zcqqbdrws",
"1-6 d: dlmncvgd",
"2-3 n: bmnfknkt",
"9-13 n: nzbpvpmfnwdnn",
"7-14 f: wpntqfsttpvflf",
"13-15 w: cwwwzspwwwswwlxp",
"4-7 j: jjjmjjj",
"10-14 m: bnrnlmhdfwnhfmdc",
"2-4 z: kzntlwtbzx",
"11-13 w: wwwwwwwrbwwww",
"6-7 g: pwggggv",
"5-9 h: sbdxfjffznxghlwtc",
"10-11 c: ccccccccccpccc",
"2-4 h: hnhbc",
"4-5 z: fjblqlpdwbl",
"1-5 d: dddddd",
"6-7 n: nnnfjnlvnxvnnn",
"4-5 m: mmzvmm",
"13-18 w: nxxwkwbwwpzgwwkwwfww",
"8-9 g: ggghkwbgc",
"9-10 v: vvhpjzvvvvfcnlvvgvq",
"1-15 k: kpprkqmkthtkwvd",
"16-17 b: rrhdbfwpntvjphvbb",
"16-19 k: kkkkkkqknkkkvkkfwkkk",
"5-9 d: dwddddddddzd",
"10-14 g: gggggdgggrgkpzgk",
"9-12 x: bffxxdjwsxcx",
"3-10 b: nbgbzbbbbbbbbbbdb",
"15-17 x: xxxxxzxxxxxxxxxxt",
"1-5 h: xhthhdq",
"5-16 j: spfjjmjjjjjjjjjjjjjj",
"3-6 b: hzbbbbmbkdrb",
"11-13 m: mwmmmmrmmmmmmmmmm",
"16-17 v: vvvvvvvvvvvvvbvvv",
"17-19 g: ggggggggggggggggggg",
"2-3 h: gfhvhjwh",
"4-6 f: fjgzwf",
"1-5 x: kjwhxntxhxfcxlfgb",
"6-7 s: snssplq",
"7-13 x: kxgxxxxxmxglhxx",
"3-5 j: ljjvdhp",
"13-14 c: gjtrlcblfbqvcw",
"13-15 k: kkkkkkkkkkkkkkk",
"5-8 t: vtfvttttmszk",
"9-11 q: qjtsqqqqfbq",
"3-8 f: fftfrvfcfqfff",
"6-8 f: fffffffdf",
"3-8 l: lhllwvlltzlw",
"7-14 p: hpxrnnbpqrcjtpvjpgq",
"1-12 z: lzzzzpzgzzgz",
"4-5 c: ccgqcnc",
"10-11 x: tpmddsxqjdx",
"8-10 p: pnpfppppqpcpf",
"1-6 q: qqzkqh",
"1-4 g: wggc",
"16-17 f: fffhfxqffftfffffhf",
"7-8 c: dcvckvbc",
"7-12 z: zzkzzzhzzjvz",
"2-12 z: dkghplwzdnpzq",
"3-4 r: gdkvhr",
"3-4 t: ttbx",
"8-13 s: sqzsssstsgssslsss",
"1-4 s: csssss",
"5-7 j: jfzjjvjjkjzwjrjtjw",
"2-12 s: sgcssssszfqsssn",
"8-13 p: pzlhpxpsphppz",
"2-12 m: mllzwvmzkgbmns",
"17-20 k: kkzkkkknkkkkkkkkkkkh",
"11-13 j: jxhqjjpwjljjj",
"17-18 z: zzzzzzzrzzzzzzzzzl",
"2-7 c: vhrqfckjrh",
"7-11 p: pppppnpppppj",
"9-11 x: xxnxxvxbxxxxgrxmxxw",
"12-13 h: psltjhhhvmthh",
"1-2 q: hsxqqlqxwqm",
"6-14 b: bzdnbblbbsbbnbb",
"3-17 z: wlwfbbxnztpvfgdzzl",
"5-6 z: zzszzrmkplrzz",
"12-14 z: zzzzzzzzkzzzvvmzzz",
"16-17 x: xpxxbxxxxxxxxhckxn",
"6-11 w: wkwhmgwwwwwwwww",
"9-14 f: ffffffffzffffff",
"3-4 q: qqjqsq",
"13-14 z: zzzclzzzzrzzzzzzzz",
"1-3 x: xxxxxxxx",
"9-11 w: wvwrwwwdfpwwxwdw",
"2-10 s: zssqsxqqtm",
"3-10 b: bbbbbbbbbxb",
"4-7 l: llllllb",
"10-11 j: jjjjjjgcjqjjj",
"12-13 t: tttsptxtttttvbktmt",
"5-6 z: zzzzpzzzz",
"10-11 q: qqqqqqqqqhq",
"6-12 w: drkfkwwwnvccmxdfwx",
"3-4 h: rsht",
"10-12 m: mmjmlmhmmmmnm",
"4-6 s: sssssss",
"2-3 z: mwccbpff",
"2-16 t: tzttttttttttttttt",
"1-2 j: sjjjjjjjjj",
"10-11 b: wbbbbbbbbbdbwq",
"6-9 r: rngrdrlrvmrbrr",
"8-9 t: hplttzttt",
"18-19 v: vvvvvvvvvvvvvvvvvck",
"11-16 p: pxnnsvpmfpxvbpnpppc",
"9-11 b: bbhsbbnbwbbbbwvbb",
"6-11 r: rmngrbbmrwrprjwrr",
"5-7 p: kpjlppjp",
"2-9 m: mtmmmmmmhm",
"1-3 l: llknl",
"5-6 s: sssssvs",
"1-3 v: vvbvvvvvvvvvmvvvv",
"3-4 f: ffffff",
"10-12 w: gjnwbwwwhdww",
"10-13 w: wwwwwwwwwkwwww",
"3-4 r: rrrrrw",
"4-5 n: nlnnm",
"1-15 b: bbbbbqbbbbbtlbbbb",
"3-4 l: glgl",
"4-5 k: bkkkkkk",
"2-8 j: dwtxvrdj",
"6-10 x: mwrznjhznxxx",
"14-15 d: dldddddddddddddd",
"4-14 s: rsssszsnzscdhrgp",
"1-8 m: kmmmmmmxmmmmmt",
"4-9 m: svkmmdmxm",
"2-7 f: fpffffffff",
"8-10 h: scpsvhkhjcp",
"7-11 x: xmzxxxxxxxtxxxx",
"13-14 p: dswdpggstpqjpc",
"8-14 j: jdjjjjfjsbjjxwjjbjj",
"3-6 m: mmlmmqkwmmmmmm",
"4-5 k: kkkpkk",
"2-7 t: tbttzxtm",
"3-5 n: jtnrlnbjnkztvknwnj",
"2-4 r: rrrwfghwrxpg",
"7-9 f: cffffffff",
"4-6 l: cmdsllz",
"2-4 z: rzzr",
"2-3 r: rzrrk",
"2-4 d: rddb",
"5-6 d: pvdddwdgkgdx",
"2-3 j: sjvfpftdkjkfg",
"5-6 s: vzdmgsk",
"13-14 d: wdddddddddddxbd",
"6-9 l: llllldcllll",
"3-4 r: rrjrn",
"7-11 f: ffffffffkfj",
"5-8 p: pxnmcppspxp",
"8-10 l: vplxllngbdl",
"7-18 w: wwwwwwwnwmwwwwwwwmww",
"2-4 x: mxxx",
"4-13 g: nnbxkmcrgpskgqg",
"3-7 l: llllllz",
"5-10 h: mgksbvscjbcfrmhgplw",
"2-13 g: mbgphwnkdndsg",
"15-18 x: xxzxxxxxxxxxxxxxxxx",
"1-3 x: xxrxxx",
"10-11 l: llslllllllp",
"1-5 n: nnrnpnnn",
"4-5 h: rhhlv",
"4-6 p: fvkrppswpdpp",
"5-12 d: dlrkxwdxgdjdwj",
"11-13 j: jjjjjjjjjjjjj",
"9-10 w: wwkwwwwwwvw",
"3-4 p: xzpjwvzmkppsppjz",
"3-11 r: vtrhfmdhpsqrrbwrlmc",
"8-15 d: hdcddddqdwfdddtdddd",
"2-4 d: mdkt",
"11-18 k: kjbtldlkfndkcbjsrk",
"3-14 h: hbhjlhlhhhvhhqh",
"4-6 x: zlkxxfrxmsl",
"7-8 g: gmmgtqgknbqzg",
"10-12 v: vvvvvvvfvxvvvv",
"7-11 n: nnnnnnnnnnc",
"2-5 t: btqcb",
"3-4 j: kgjsjvj",
"4-6 h: skhnbt",
"1-6 x: snxxxxxx",
"3-15 c: hccccjkbnctnxcc",
"12-17 s: ltsssshssqstssxsk",
"12-19 r: rkrbrrrrrxqcrrrggrsr",
"6-8 p: ppppprpjp",
"4-5 v: vvvvvxsrv",
"3-4 k: gskskzwmwfltkkgzxllz",
"3-10 h: qmvhhdqthhhshthhd",
"4-6 q: qfjklq",
"11-13 f: fmfffffffffhh",
"8-14 s: vlqlskgsjvgmfhgvzfft",
"5-8 d: dbrdwdkdddddd",
"4-5 b: bbwmr",
"18-19 d: cddsddvrddndwdpddgdd",
"4-5 l: llsls",
"4-6 h: mhszhh",
"15-16 g: gggggggggggggzggg",
"2-4 x: bxcxwflqrnrrrb",
"3-17 s: xhgvmzncgctsqdpks",
"4-18 p: rprpfcxprrmncmmwqj",
"1-2 v: vzvwvvvvbv",
"8-9 m: mmfmmhvbq",
"10-11 z: vzqjzzlzkzv",
"1-4 r: rrrsrn",
"3-12 k: kctvqmgbbxskbww",
"1-6 s: fsssgmsqss",
"5-6 k: kkkkkv",
"1-7 m: mvmmqmq",
"3-8 b: skbfbwhbbgwfcgnmjsk",
"7-9 s: sssssslsn",
"4-6 n: nnnnzbnn",
"2-4 x: kxnnqrvxtgch",
"13-17 w: xshwjqwgsnwrwwcww",
"12-13 l: lllljlwllllllll",
"2-3 k: kkkk",
"16-19 s: sssssssssssssssmssss",
"5-12 c: cfccpccpccchccc",
"9-13 j: vjjjjgjljhjjbjjfj",
"10-14 r: jrrrrrrrrkrrrrrvrr",
"6-10 b: bxbbkbbbbn",
"17-18 f: tvxfdgwgfgjdsqcppqkq",
"11-17 m: mmzmmrmhmmpmmmmmmm",
"8-9 v: vvrvvvkvkvv",
"3-8 g: wgcgpgghgpgdggfx",
"5-10 r: rrrrgrrrrk",
"7-15 l: sllnpklfqllglld",
"1-2 z: wzzwg",
"9-10 v: vvqvvvvkvbv",
"2-8 s: sljssddnsscsns",
"4-5 g: snzgpngj",
"1-6 c: ccccgtclgqchvcx",
"12-14 n: nnnnhnnnqpnpnnnn",
"11-13 p: wpdmppdpppfpp",
"3-11 b: bjbbndmbbbnbbbbbbb",
"9-13 x: qxxxxxxxkxxxxxlxxm",
"4-6 p: pqpnmp",
"1-5 j: jjckjjpmjbcmc",
"4-6 q: qqsdqqkkqdqg",
"7-11 l: qslplhlvqxqm",
"14-15 g: vggggvggpggggprggg",
"6-7 t: ttthtttttttt",
"2-3 g: qglgg",
"11-14 t: kdjwqgbtnzcdltt",
"10-11 c: cccclccccck",
"15-16 r: rrrrrrrrrrrrrrrh",
"5-7 m: vjbfbcxxdwxcgrfzmzhd",
"6-7 t: tttzftmt",
"14-16 n: nnnnnnnnnnnnnpnn",
"4-12 s: rgmktzvjlwssx",
"8-12 v: vvvvvvvvvvvsvvvbvvvv",
"2-3 w: kvwqmw",
"12-13 s: dlqsssssmvfskss",
"7-8 k: lpkfkkklbkkk",
"3-5 f: nvftfntf",
"5-7 t: qtfkhtxttqctmttvc",
"11-13 b: blbbbbbbzhvbb",
"4-5 w: wwwwxs",
"11-12 n: nnnnnnnnnnpnnn",
"11-16 t: ttttttttvtkttttnt",
"12-16 j: wjjjdwjjmjjjjjjd",
"5-6 q: bqqqvq",
"1-2 s: kssx",
"1-3 j: qmcj",
"5-10 v: ckjtsqsdvvvhctvfvhkv",
"5-8 l: kllhlrlzljll",
"9-10 j: lgvgjbccjrn",
"4-5 j: jbhhvjbcjggjhwx",
"5-6 m: mmmmqms",
"3-7 v: hvpvmmt",
"5-9 z: zzzlbzzjzrvxvkhxhzdj",
"3-5 f: fkfmknbzmwbcmh",
"8-10 n: nvjnklncnnp",
"4-10 p: ppptpfpppgppp",
"14-17 z: zzzczzzrzzzzzzzzncz",
"1-5 w: wwwwlwh",
"19-20 q: wzrqqqdmqqqzqqqvqqqq",
"2-4 f: tflf",
"4-5 t: tjqtltrtf",
"17-18 t: ttttttttttttttttdtjt",
"6-12 s: mhfslsfndcsvkr",
"7-16 r: rdprghmtcrrmrrlv",
"12-13 h: hhhhhhhhhhhph",
"12-15 k: kkkkkkkkkfkkdkbnk",
"16-17 h: hhscfhvhxvklqshhhmh",
"1-7 w: whwwdwwdz",
"2-3 b: bbpbb",
"8-9 n: frnnnnncmn",
"2-4 b: hbpbpsnj",
"11-19 f: qhggmffvmfjptmksfvf",
"8-10 b: rbbldmcbbxb",
"1-3 t: tjxfsmmttw",
"3-5 v: vvvvdvvvn",
"5-8 m: jmhmmvmjkx",
"7-9 x: xxkbxxxvxx",
"10-12 f: ffrffcfxnffrnfvffff",
"6-8 t: sttghztn",
"2-5 l: cmrllltb",
"8-9 f: sskbzzxgfhvc",
"4-7 k: kpkljpqfkkhkfkk",
"19-20 r: rfrrrhmrrrnrrtvrsrrr",
"3-4 x: sxxzn",
"5-7 k: zlkgpwk",
"3-12 k: drqkqbzgkhqkqb",
"3-4 g: qgfg",
"3-4 z: bkfx",
"6-12 f: bffxfnkfffgf",
"3-12 n: xhrlptrqvzfnmptvtjcb",
"4-6 z: kzhzrp",
"1-2 n: ngnnnknjnnn",
"7-9 n: nnnnnnvnnnnnnnn",
"10-12 s: sgssssmssssss",
"2-11 l: lmlllslthwlldn",
"13-16 b: bbbbbqmbbzbbbbbhb",
"4-6 p: pppbpp",
"2-6 n: nnxnpnnn",
"9-13 b: bbbbbbbbbbbblbbb",
"11-13 h: hhkghxdhfhhdkchjr",
"6-10 w: wwwwwhwwwwwrpww",
"6-7 w: mgpwwsw",
"10-11 n: njnnnntsnfntcqnnn",
"7-9 f: ffffffrfng",
"2-6 x: xxjgxnqxxkqf",
"6-11 n: nwnfvzndsdnn",
"16-18 c: cgccccccccccccxcct",
"11-17 t: wtktgqtcpttqkqfgznt",
"4-5 w: wwwwc",
"2-5 k: gqkck",
"15-16 k: kzkkkkkkmkkkdkkpkvkg",
"3-4 s: cslss",
"1-8 w: hwgwwwwwzsbwfwwwg",
"7-14 m: hrkqfmpfzmjqqmmgrdcm",
"2-8 r: sqrrrrrr",
"11-15 l: cphdwnphlkcbvllvw",
"4-7 f: vsxgfnf",
"8-10 q: qqqqqqqtqq",
"6-8 l: llllzqlll",
"9-14 j: kjjjjjjjvmjvjz",
"4-6 x: pxxxbx",
"10-16 h: hkhjhghhsghhhhhhhhhn",
"8-10 x: xchbxtgxqrrxvmzw",
"18-19 n: nnnnnnnnnnnnnnnnnnn",
"1-12 n: nnnnnnnnnnnnn",
"5-12 l: xmklnkmpqlfllwhrm",
"6-7 b: kfhbpbvbfbpbbtsxk",
"8-18 n: nnnnnnzznnnnnnnnnznn",
"10-16 h: hhhhhhhhhhhhjhhchh",
"12-13 l: lnlllllgklllx",
"5-7 x: xxxxlxxxxxx",
"3-4 g: gggn",
"3-14 g: gjgxgcggvdggggqgngqg",
"9-10 g: gggggggggt",
"3-6 r: rrntrrrrbrs",
"3-10 g: ggkbdgggkgg",
"3-4 k: twjqtkt",
"12-13 m: mzvnqmmpcbvmk",
"3-8 p: wxflqbvppqq",
"3-6 w: ljdlfwkzwmgkwxghbqv",
"11-15 p: nplwjprspcppvsvpfppp",
"5-16 v: vqvcmvjvnvvprvnqvnpc",
"10-11 g: ggggggggggng",
"13-16 b: sbbbbxqdbdblrbbbb",
"1-4 k: czlk",
"6-7 f: rffffsff",
"8-9 w: wdrnwtqmwqdpx",
"2-3 r: rxrr",
"3-4 r: hrrmrvj",
"10-15 v: cvffvvsvvpvvvnvvgjvv",
"12-19 b: bbbbbbbbbbbrbbbbbbbb",
"3-5 n: nvnhz",
"2-4 d: hddl",
"2-3 v: fvclhvp",
"3-15 h: xwhsnhndxgcqjfqtvm",
"1-3 h: rhhhh",
"15-18 f: ffffffmfffthfftfjf",
"1-4 h: hrxw",
"1-4 v: kvvv",
"6-12 r: crxrrzrnprrr",
"7-8 q: qqqfqqwqqqqq",
"3-4 l: lljllll",
"1-3 h: hhshh",
"2-3 q: qvqb",
"13-20 p: plpqtpppsgpppppppppz",
"9-15 w: wwwwwnwbbfwwwwwsw",
"11-12 k: kkkxqkksdlkg",
"8-14 k: kkkgkkkkgdjkxnktxd",
"17-18 s: sbsmssnssssstnsssq",
"4-10 s: sssbhfcssssxx",
"3-4 l: drbl",
"8-13 j: tjjtjjhljjjnjd",
"11-12 q: nrsqrqcmzqql",
"7-11 x: wxxlxxgwvxx",
"13-16 n: nnnnnnnnnknnjnnn",
"15-16 l: dwvlwglrhvqllllvvl",
"3-7 b: bbbbbbm",
"11-12 v: vvqlvvvvbvvktvvw",
"1-8 n: nwnnmpvx",
"7-8 f: zskgxljq",
"7-14 b: qxbpwbgvbgqdlbtfjdbq",
"3-9 v: vbxvrvhqvq",
"2-3 c: czfc",
"7-10 t: mtttnzvcttttjtt",
"6-14 l: lllllklllhlglll",
"3-4 b: wgzbpwpbj",
"1-6 z: mzzzbrzz",
"5-10 t: tttjtftttdttttttlt",
"9-10 z: hzzzvzzzmzz",
"14-19 n: lppspwbnhtzxcnrkgpn",
"5-7 n: bnnnnnt",
"11-12 v: hkkhvgvtvvvvqvjxv",
"2-11 w: wwjpfvqwnhwcmg",
"9-15 b: zbqndbbbbbbbbsk",
"11-13 d: dddddddddblddmddk",
"1-11 n: ngnnfnvnbkjpn",
"1-5 l: llllkllvl",
"17-19 g: gtggggcggfggzvgqgzn",
"9-10 g: ggggsslgxg",
"7-12 r: qrdrnrgdrrrrrrlr",
"1-3 h: hmkgg",
"4-9 t: jngjtmkms",
"2-5 p: qqfrpmnnppgpvpmwdfpp",
"10-11 m: mmmlsmmmmbqm",
"2-7 c: cpbdvfccxdzczsptncnn",
"6-7 d: mtdqrdd",
"10-12 v: vvvvvvvvvsvvvvvvv",
"13-14 q: gqqqqqqqqqqqqz",
"1-4 m: fmmmmm",
"4-10 b: bklbbzmdsnq",
"3-5 w: tjwwhkbbpwtt",
"4-5 l: llldv",
"3-4 t: ttttt",
"8-16 t: xlqhtttlhssjhxtt",
"12-14 m: mmmmmmtmmmmmmzm",
"1-2 c: cccc",
"1-4 x: mxkxcndbsfvfnxjwxh",
"9-13 n: nglxnhnnrktnnfznn"] | {"/2_password_philosophy.py": ["/password_philosophy_input.py"]} |
68,024 | firegreen/adventofcode2020 | refs/heads/main | /6_Custom_Customs.py | from Custom_Customs_input import input
result = 0
for group in input.split("\n\n"):
answers = group.split('\n')
common_answers = set(answers[0])
for answer in answers:
common_answers = common_answers.intersection(answer)
if not common_answers:
break
print(common_answers)
result += len(common_answers)
print(result)
| {"/2_password_philosophy.py": ["/password_philosophy_input.py"]} |
68,025 | firegreen/adventofcode2020 | refs/heads/main | /4_passport_processing.py | from passport_processing_input import input
import re
required = ["ecl", "pid", "eyr", "hcl", "byr", "iyr", "hgt"]
passports = input.split('\n\n')
result = 0
for passport in passports:
entries = passport.replace('\n', ' ').split(' ')
entries = dict((entry.split(':') for entry in entries))
if any((key not in entries for key in required)):
continue
try:
byr = int(entries["byr"])
if byr < 1920 or byr > 2002:
continue
iyr = int(entries["iyr"])
if iyr < 2010 or iyr > 2020:
continue
eyr = int(entries["eyr"])
if eyr < 2020 or eyr > 2030:
continue
height = entries["hgt"]
height_value = int(height[:-2])
if height.endswith("cm"):
if height_value<150 or height_value>193:
continue
elif height.endswith("in"):
if height_value<59 or height_value>76:
continue
else:
continue
if not re.match(r"#[0-9a-f]", entries["hcl"]):
continue
if entries["ecl"] not in ("amb", "blu", "brn", "gry", "grn", "hzl", "oth"):
continue
pid = int(entries["pid"])
if len(entries["pid"]) != 9:
continue
except ValueError as e:
print(e)
else:
result += 1
print(len(passports), result)
| {"/2_password_philosophy.py": ["/password_philosophy_input.py"]} |
68,026 | firegreen/adventofcode2020 | refs/heads/main | /8_Handheld_Halting.py | from Handheld_Halting_input import input
instructions = input.split("\n")
j=0
while j < len(instructions):
while instructions[j].startswith("acc"):
j += 1
accumulator = 0
i = 0
executed = set()
while i < len(instructions):
if i in executed:
break
executed.add(i)
type, value = instructions[i].split(" ", 2)
value = eval(value)
if type == "acc":
accumulator += value
i += 1
elif (type == "jmp" and i!=j) or (type == "nop" and i==j):
i = i + value
else:
i += 1
if i >= len(instructions):
print(accumulator)
break
else:
j += 1
| {"/2_password_philosophy.py": ["/password_philosophy_input.py"]} |
68,027 | firegreen/adventofcode2020 | refs/heads/main | /2_password_philosophy.py | from password_philosophy_input import input
result = 0
for password in input:
rule, password = password.split(": ")
occurence, char = rule.split(" ")
min, max = [int(value)-1 for value in occurence.split('-')]
if (password[min] == char) != (password[max] == char):
result += 1
print(result)
| {"/2_password_philosophy.py": ["/password_philosophy_input.py"]} |
68,028 | firegreen/adventofcode2020 | refs/heads/main | /7_Handy_Haversacks.py | from Handy_Haversacks_input import input
bags = dict()
for bag in input.split('\n'):
bag_color, definition = bag.split(" bags contain ")
sub_bags = []
for sub_bag in definition.split(', '):
number, sub_bag = sub_bag.split(' ', 1)
if number == "no":
break
sub_bag = ' '.join(sub_bag.split(' ', 3)[:2])
sub_bags.append((int(number), sub_bag))
bags[bag_color] = sub_bags
visited = dict()
def contains_shiny_gold(bag_name):
if bag_name not in visited:
visited[bag_name] = False
for number, sub_bag in bags[bag_name]:
if sub_bag == "shiny gold":
found = True
else:
found = contains_shiny_gold(sub_bag)
if found:
visited[bag_name] = found
break
return visited[bag_name]
counts = dict()
def count_bags(bag_name):
if bag_name not in counts:
counts[bag_name] = 0
for number, sub_bag in bags[bag_name]:
counts[bag_name] += number + number * count_bags(sub_bag)
return counts[bag_name]
for bag in bags:
contains_shiny_gold(bag)
print(sum(visited.values()), visited)
print(count_bags("shiny gold"))
| {"/2_password_philosophy.py": ["/password_philosophy_input.py"]} |
68,029 | firegreen/adventofcode2020 | refs/heads/main | /5_Binary_Boarding.py | from Binary_Boarding_input import input
max_id = 0
seats = []
for seat in input.split('\n'):
start_y, end_y = 0, 127
start_x, end_x = 0, 7
for char in seat[:7]:
middle = int((end_y+start_y)/2)
if char == "F":
end_y = middle
else:
start_y = middle
for char in seat[7:]:
middle = int((end_x+start_x)/2)
if char == "L":
end_x = middle
else:
start_x = middle
seat_id = end_y*8+end_x
seats.append(seat_id)
max_id = max(max_id, seat_id)
seats = sorted(seats)
print([a+1 for a,b in zip(seats, seats[1:]) if b-a>1])
print(sorted(seats))
print(max_id)
| {"/2_password_philosophy.py": ["/password_philosophy_input.py"]} |
68,030 | firegreen/adventofcode2020 | refs/heads/main | /3_toboggan_trajectory.py | import sys
from toboggan_trajectory_input import input
map = []
for i, line in enumerate(input.split("\n")):
map.append([1 if char == '#' else 0 for char in line])
rows = len(map)
columns = len(map[0])
for i in range(rows):
print(''.join(('#' if map[i][j] else ' ' for j in range(columns))))
x, y = 0, 0
result = 1
for inc_x, inc_y in [(1,1), (3,1), (5,1), (7,1), (1,2)]:
sub_result=0
x=0
for y in range(inc_y, rows, inc_y):
x += inc_x
if map[y][x%columns]:
sub_result += 1
print(sub_result)
result = result*sub_result
print(result) | {"/2_password_philosophy.py": ["/password_philosophy_input.py"]} |
68,031 | reckbo/pytask | refs/heads/master | /test_pytask.py | from plumbum import local
from pytasks import ExternalTask, Pipeline, TaskGenerator
@TaskGenerator
def create_text_file(contents, output):
output.write(contents)
@TaskGenerator
def hello(filepath, output):
inputs = filepath.read()
output.write('hello ' + inputs)
def make_pipeline(working_dir):
with Pipeline(working_dir) as pipeline:
source = create_text_file(contents='world', output='source.txt')
hello(filepath=source, output='out/hello.txt')
return pipeline
def test_pipeline_first_run():
with local.tempdir() as tmpdir:
pipeline = make_pipeline(tmpdir)
source_txt = tmpdir / 'source.txt'
hello_txt = tmpdir / 'out/hello.txt'
assert not source_txt.exists()
assert not hello_txt.exists()
pipeline.run()
assert source_txt.read() == 'world'
assert hello_txt.read() == 'hello world'
def test_pipeline_creates_missing():
with local.tempdir() as tmpdir:
pipeline = make_pipeline(tmpdir)
source_txt = tmpdir / 'source.txt'
hello_txt = tmpdir / 'out/hello.txt'
pipeline.run()
source_txt.delete()
pipeline.run()
assert source_txt.read() == 'world'
assert hello_txt.read() == 'hello world'
def test_pipeline_with_external_task():
with local.tempdir() as tmpdir:
with Pipeline(tmpdir) as pipeline:
ExternalTask('external.txt')
pipeline.run()
def test_pipeline_no_duplicate_outputs():
with Pipeline() as pipeline:
create_text_file(contents='world', output='source.txt')
create_text_file(contents='world', output='source.txt')
assert len(pipeline.tasks) == 1
| {"/pytask/__init__.py": ["/pytask/task.py"]} |
68,032 | reckbo/pytask | refs/heads/master | /pytask/task.py | import logging
import tabulate
import toolz
from plumbum import local
log = logging.getLogger(__name__)
_CONTEXT_MANAGER_DAG = None
# TODO rename and generalize
def value(elem):
if hasattr(elem, 'output'):
return elem.output()
elif type(elem) == list:
return [value(e) for e in elem]
elif type(elem) == tuple:
return tuple([value(e) for e in elem])
elif type(elem) == dict:
return dict((x, value(y)) for x, y in elem.items())
else:
return elem
class Task(object):
def __init__(self, f, *args, **kwargs):
if 'output' not in kwargs.keys():
raise TypeError('missing key "output"')
self.f = f
self.args = args
self.kwargs = kwargs
self.name = '%s.%s' % (f.__module__, f.__name__)
# TODO will pipeline ever be explicit?
pipeline = kwargs.pop('pipeline', None)
if not pipeline and _CONTEXT_MANAGER_DAG:
pipeline = _CONTEXT_MANAGER_DAG
if pipeline:
output = kwargs['output']
if pipeline.working_dir and isinstance(output, str):
self.kwargs['output'] = pipeline.working_dir / output
pipeline.add_task(self)
def __str__(self):
"""
String representation.
"""
def to_output(t):
if isinstance(t, Task):
return t.kwargs['output']
return t
kwargs = {k: to_output(v) for k, v in self.kwargs.items()}
kwargs = toolz.dissoc(kwargs, 'output')
if self.args:
args = [to_output(arg) for arg in self.args]
return f'{self.name}({args}, {kwargs})'
else:
return f'{self.name}({kwargs})'
def __repr__(self):
"""
Detailed representation.
"""
return self.__str__()
def __call__(self):
self.run()
def run(self):
# TODO add atomicity
output = self.output()
if output.exists():
print(f'{output} exists, skipping')
return
args = value(self.args)
kwargs = value(self.kwargs)
# log.info(f'Running: {self.f}({args},{kwargs})')
kwargs_show = [f'{k}={str(v)}' for k, v in kwargs.items()]
signature = ", ".join(list(args) + kwargs_show)
print(f'Running: {self.name}({signature})')
if not output.parent.exists():
output.parent.mkdir()
self.f(*args, **kwargs)
# def hash(self):
# if not hasattr(self, '__hash'):
# M = hashlib.sha1()
# hash_update(M, [('name', self.name.encode('utf-8')),
# ('args', self.args),
# ('kwargs', self.kwargs)])
# self.__hash = M.hexdigest().encode('utf-8')
# return self.__hash
def output(self):
return self.kwargs['output']
def parameters(self):
params = toolz.valfilter(lambda x: not hasattr(x, 'output'), self.kwargs)
params = toolz.keyfilter(lambda x: x is not 'output', params)
return params
def dependencies(self):
queue = [self.args, self.kwargs.values()]
while queue:
deps = queue.pop()
for dep in deps:
if isinstance(dep, Task):
yield dep
elif isinstance(dep, (list, tuple)):
queue.append(dep)
elif isinstance(dep, dict):
queue.append(iter(dep.values()))
class ExternalTask(Task):
def __init__(self, path):
if isinstance(path, str):
path = local.path(path)
self.args = []
self.kwargs = dict(output=path)
self.name = path
if _CONTEXT_MANAGER_DAG:
pipeline = _CONTEXT_MANAGER_DAG
pipeline.add_task(self)
def __str__(self):
return f'ExternalTask({self.output()})'
def __repr__(self):
return f'ExternalTask({self.output()})'
def run(self):
if not self.output().exists():
raise Exception(f'{self}: path does not exist')
def parameters(self):
return None
class TaskGenerator(object):
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
return Task(self.f, *args, **kwargs)
class Pipeline(object):
def __init__(self, working_dir=None):
self.tasks = []
self._old_context_manager_dags = []
if isinstance(working_dir, str):
working_dir = local.path(working_dir)
self.working_dir = working_dir
def __enter__(self):
global _CONTEXT_MANAGER_DAG
self._old_context_manager_dags.append(_CONTEXT_MANAGER_DAG)
_CONTEXT_MANAGER_DAG = self
return self
def __exit__(self, _type, _value, _tb):
global _CONTEXT_MANAGER_DAG
_CONTEXT_MANAGER_DAG = self._old_context_manager_dags.pop()
def add_task(self, task):
if task.kwargs['output'] in [t.output() for t in self.tasks]:
return # TODO warn if the dag id differs, i.e. two tasks are writing to the same file
self.tasks.append(task)
def run(self):
if not hasattr(self, '_topological_tasks'):
self._topological_tasks = topological_sort(self.tasks)
for task in self._topological_tasks:
task.run()
def status(self):
rows = [(t.name, t.parameters(), t.output(), t.output().exists()) for t in self.tasks]
table = tabulate.tabulate(rows, tablefmt='fancy_grid', headers=['Name', 'Parameters', 'Filepath', 'Exists'])
print(table)
def topological_sort(tasks):
'''
Sorts a list of tasks topologically. The list is sorted when
there is never a dependency between tasks[i] and tasks[j] if i < j.
'''
sorted = []
whites = set(tasks)
def dfs(t):
for dep in t.dependencies():
if dep in whites:
whites.remove(dep)
dfs(dep)
sorted.append(t)
while whites:
next = whites.pop()
dfs(next)
return sorted
| {"/pytask/__init__.py": ["/pytask/task.py"]} |
68,033 | reckbo/pytask | refs/heads/master | /pytask/hash.py | import task
def hash_update(M, elems):
'''
M = hash_update(M, elems)
Update the hash object ``M`` with the sequence ``elems``.
Parameters
----------
M : hashlib object
An object on which the update method will be called
elems : sequence of 2-tuples
Returns
-------
M : hashlib object
This is the same object as the argument
'''
from six.moves import cPickle as pickle
from six.moves import map
import six
try:
import numpy as np
except ImportError:
np = None
for n, e in elems:
M.update(pickle.dumps(n))
if isinstance(e, task.Task):
M.update(e.hash())
elif type(e) in (list, tuple):
M.update(repr(type(e)).encode('utf-8'))
hash_update(M, enumerate(e))
elif type(e) == set:
M.update(six.b('set'))
# With randomized hashing, different runs of Python might result in
# different orders, so sort. We cannot trust that all the elements
# in the set will be comparable, so we convert them to their hashes
# beforehand.
items = list(map(hash_one, e))
items.sort()
hash_update(M, enumerate(items))
elif type(e) == dict:
M.update(six.b('dict'))
items = [(hash_one(k), v) for k, v in e.items()]
items.sort(key=(lambda k_v: k_v[0]))
hash_update(M, items)
elif np is not None and type(e) == np.ndarray:
M.update(six.b('np.ndarray'))
M.update(pickle.dumps(e.dtype))
M.update(pickle.dumps(e.shape))
try:
buffer = e.data
M.update(buffer)
except:
M.update(e.copy().data)
else:
M.update(pickle.dumps(e))
return M
def hash_one(obj):
'''
hvalue = hash_one(obj)
Compute a hash from a single object
Parameters
----------
obj : object
Hashable object
Returns
-------
hvalue : str
'''
import hashlib
h = hashlib.sha1()
hash_update(h, [('hash1', obj)])
return h.hexdigest().encode('utf-8')
| {"/pytask/__init__.py": ["/pytask/task.py"]} |
68,034 | reckbo/pytask | refs/heads/master | /pytask/__init__.py | from .task import Task, ExternalTask, TaskGenerator, Pipeline
| {"/pytask/__init__.py": ["/pytask/task.py"]} |
68,035 | adbelniak/wind-forecast | refs/heads/master | /preprocess/synop_preprocess.py | import pandas as pd
import tensorflow.keras as keras
import numpy as np
from preprocess.fetch_synop_data import FEATURES
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
def normalize(data):
data_mean = data.mean(axis=0)
data_std = data.std(axis=0)
return (data - data_mean) / data_std
def split_features_into_arrays(data, train_split, past_len, future_offset, y_column_name="velocity"):
train_data = data.loc[:train_split - 1]
start = past_len + future_offset
end = start + train_split
x_data = train_data.values
y_data = data.iloc[start: end][[y_column_name]]
return x_data, y_data
def prepare_synop_dataset(file_path):
data = pd.read_csv(file_path)
data["date"] = pd.to_datetime(data[['year', 'month', 'day', 'hour']])
data[FEATURES] = normalize(data[FEATURES].values)
print(data.head())
return data
if __name__ == '__main__':
prepare_synop_dataset("synop_data/135_data.csv")
| {"/preprocess/synop_preprocess.py": ["/preprocess/fetch_synop_data.py"]} |
68,036 | adbelniak/wind-forecast | refs/heads/master | /preprocess/fetch_synop_data.py | import glob
from bs4 import BeautifulSoup
import requests
import re
from pathlib import Path
import os
from zipfile import ZipFile
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tqdm
import argparse
"""This code obtains SYNOP data from 'https://danepubliczne.imgw.pl/'.
SYNOP file contains meteorological data for single localisation and year.
https://danepubliczne.imgw.pl/data/dane_pomiarowo_obserwacyjne/dane_meteorologiczne/terminowe/synop/s_t_format.txt
- under this url, is available descriptions of file format. For project purpose it's required to process at least three columns,
which contains wind direction, wind velocity and gust of wind which are available at column:
- direction - 23
- velocity - 25
- gust - 27
and date columns:
- year - 2
- month - 3
- day - 4
- hour - 5
"""
YEAR = 2
MONTH = 3
DAY = 4
HOUR = 5
DIRECTION_COLUMN = 23
VELOCITY_COLUMN = 25
GUST_COLUMN = 27
TEMPERATURE = 29
PRESSURE = 41
FEATURES = ['year', 'month', 'day', 'hour', 'direction', 'velocity', 'gust', 'temperature', 'pressure']
def download_list_of_station(dir: str):
file_name = 'wykaz_stacji.csv'
if not os.path.isfile(os.path.join(dir, file_name)):
url = 'https://danepubliczne.imgw.pl/data/dane_pomiarowo_obserwacyjne/dane_meteorologiczne/' + file_name
file = requests.get(url, stream=True)
opened_file = open(os.path.join(dir, file_name), 'wb')
opened_file.write(file.content)
opened_file.close()
def get_localisation_id(localisation_name: str, dir='synop_data'):
loc_data = pd.read_csv(os.path.join(dir, 'wykaz_stacji.csv'), encoding="ISO-8859-1",
names=['unknown', 'city_name', 'meteo_code'])
row = loc_data.loc[loc_data['city_name'] == localisation_name]
if row.shape[0] == 0:
raise Exception("Location does not exists")
return row.iloc[0]['meteo_code']
def get_synop_data(localisation_code: str, year: str, dir: str):
dir_per_year = os.path.join(dir, year, 'download')
Path(dir_per_year).mkdir(parents=True, exist_ok=True)
url = "https://dane.imgw.pl/data/dane_pomiarowo_obserwacyjne/dane_meteorologiczne/terminowe/synop/" + year
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
zip_rows = soup.find_all('tr')
for row in zip_rows:
td_link = row.find('a')
if td_link:
contains_zip_file = re.match(rf'^(.+?){localisation_code}(.+?).zip$', td_link['href'])
if contains_zip_file:
file = requests.get(url + '/' + td_link['href'], stream=True)
opened_file = open(os.path.join(dir_per_year, td_link['href']), 'wb')
opened_file.write(file.content)
opened_file.close()
def extract_zip_files(year: str, dir: str):
dir_with_zip = os.path.join(dir, year, 'download')
data_directory = os.path.join(dir, year)
for file in os.listdir(dir_with_zip):
with ZipFile(os.path.join(dir_with_zip, file), 'r') as zip:
zip.extractall(path=data_directory)
def read_data(localisation_code: str, year: str, number_column, columns, dir='synop_data'):
station_data = pd.DataFrame(columns=columns)
for filepath in glob.iglob(rf'{dir}/{year}/*{localisation_code}*.csv', recursive=True):
synop_data = pd.read_csv(filepath, encoding="ISO-8859-1", header=None)
required_data = synop_data[number_column]
station_data[columns] = required_data
return station_data
def plot_scatter_data_for_year(localisation_code: str, year: int, dir='synop_data'):
station_data = pd.read_csv(os.path.join(dir, localisation_code + '_data.csv'))
one_year_data = station_data.loc[station_data['year'] == year]
one_year_data['date_time'] = pd.to_datetime(one_year_data[['year', 'month', 'day', 'hour']])
one_year_data.plot(kind='scatter', x='date_time', y='velocity', color='red')
plt.show()
def plot_each_month_in_year(localisation_code: str, year: int, dir='synop_data'):
station_data = pd.read_csv(os.path.join(dir, localisation_code + '_data.csv'))
one_year_data = station_data.loc[station_data['year'] == year]
one_year_data['date_time'] = pd.to_datetime(one_year_data[['year', 'month', 'day', 'hour']])
sns.boxplot(x='month', y="pressure",
data=one_year_data, palette="Set3")
plt.show()
def plot_box_all_data(localisation_code: str, dir='synop_data'):
station_data = pd.read_csv(os.path.join(dir, localisation_code + '_data.csv'))
print(station_data.head())
sns.boxplot(x='year', y="velocity",
data=station_data, palette="Set3")
plt.show()
def process_all_data(from_year, until_year, localisation_code, dir='synop_data'):
columns = FEATURES
station_data = pd.DataFrame(columns=columns)
number_column = [YEAR, MONTH, DAY, HOUR, DIRECTION_COLUMN, VELOCITY_COLUMN, GUST_COLUMN, TEMPERATURE, PRESSURE]
for year in tqdm.tqdm(range(from_year, until_year)):
get_synop_data(localisation_code, str(year), dir)
extract_zip_files(str(year), dir)
processed_wind_data = read_data(localisation_code, str(year), number_column, columns, dir)
station_data = station_data.append(processed_wind_data)
station_data.to_csv(os.path.join(dir, localisation_code + '_data.csv'), index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dir', help='Working directory', default='')
parser.add_argument('--out', help='Directory where to save synop files', default='synop_data')
parser.add_argument('--localisation_name', help='Localisation name for which to get data', default='HEL', type=str)
parser.add_argument('--start_year', help='Start date for fetching data', type=int, default=2001)
parser.add_argument('--end_year', help='End date for fetching data', type=int, default=2020)
parser.add_argument('--plot_box_year', help='Year fow which create box plot for each month', type=int, default=2019)
args = parser.parse_args()
# download_list_of_station(args.dir)
localisation_code = get_localisation_id(args.localisation_name, args.dir)
process_all_data(args.start_year, args.end_year, str(localisation_code))
plot_each_month_in_year(str(localisation_code), args.plot_box_year, args.out)
| {"/preprocess/synop_preprocess.py": ["/preprocess/fetch_synop_data.py"]} |
68,037 | adbelniak/wind-forecast | refs/heads/master | /gfs-archive-0-25/gfs_processor/own_logger.py | import logging
import sys
def get_logger():
logger = logging.getLogger(__name__)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
handler = logging.FileHandler('gfs_prcessor.log')
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
logger = get_logger() | {"/preprocess/synop_preprocess.py": ["/preprocess/fetch_synop_data.py"]} |
68,038 | adbelniak/wind-forecast | refs/heads/master | /gfs-archive-0-25/gfs_processor/gfs_prcessor.py | import pandas as pd
import schedule
from rda_request_sender import RequestStatus, REQ_ID_PATH
import rdams_client as rc
from modified_rda_download_client import download
from own_logger import logger
from pathlib import Path
import os
def read_pseudo_rda_request_db():
return pd.read_csv(REQ_ID_PATH, index_col=0)
def create_dir_by_location_and_request(req_id: str, latitude: str, longitute: str):
latitude = latitude.replace('.', '_')
longitute = longitute.replace('.', '_')
location_path = latitude + "_" + longitute
Path(location_path).mkdir(parents=True, exist_ok=True)
request_path = os.path.join(location_path, req_id)
Path(request_path).mkdir(parents=True, exist_ok=True)
return location_path, request_path
def download_request(req_id: int, latitude: str, longitute: str):
_, request_path = create_dir_by_location_and_request(str(req_id), latitude, longitute)
logger.info("start downloading")
try:
download(req_id, request_path)
except Exception as e:
logger.error("Downloading failed", exc_info=True)
raise e
def purge(req_id: int):
rc.purge_request(req_id)
def processor():
logger.info("Start processor")
request_db = read_pseudo_rda_request_db()
not_completed = request_db[request_db["status"] == RequestStatus.SENT.value]
logger.info("{} requests is pending".format(len(not_completed)))
for index, request in not_completed.iterrows():
req_id = request['req_id']
res = rc.get_status(req_id)
request_status = res['result']['status']
if request_status == 'Completed':
logger.info("Request id: {} is completed".format(req_id))
try:
download_request(req_id, str(request["latitude"]), str(request["longitude"]))
request_db.loc[index,"status"] = RequestStatus.COMPLETED.value
purge(req_id)
except Exception as e:
logger.error(e, exc_info=True)
request_db.to_csv(REQ_ID_PATH)
def scheduler():
try:
schedule.every(3).hours.do(processor)
except Exception as e:
logger.error(e, exc_info=True)
if __name__ == '__main__':
processor()
| {"/preprocess/synop_preprocess.py": ["/preprocess/fetch_synop_data.py"]} |
68,043 | yuckey8/kotobank_search | refs/heads/master | /main.py | import sys
import webbrowser as web
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty
from kivy.uix.button import Button
from kivy.core.window import Window
from kivy.properties import StringProperty
import os.path
import re
from urllib.parse import quote
from kivy.resources import resource_add_path
from voice import record_voice
from voice import get_text_from_voice
from voice import gererate_voice_data
from voice import play_wav
import asyncio
import subprocess
if hasattr(sys, "_MEIPASS"):
resource_add_path(sys._MEIPASS)
class AskQuestionScreen(BoxLayout, Screen):
answer_message = StringProperty()
press_count = 0
def __init__(self, **kwargs):
super(AskQuestionScreen, self).__init__(**kwargs)
self.answer_message = ''
self.press_count = 0
pass
def select_yes(self):
if self.press_count == 1:
self.answer_message = '既に感想を送ってくれたみたいだよ。\nありがとう!'
return
self.answer_message = 'ありがとう!\nこれからももっと使ってね!'
self.press_count += 1
def select_no(self):
if self.press_count == 1:
self.answer_message = '既に感想を送ってくれたみたいだよ。\nありがとうね〜'
return
self.answer_message = 'そうなのか…ごめんね(´・ω・`)\nこれからもっと頑張るね(´・ω・`)'
self.press_count += 1
class InputKeywordScreen(BoxLayout, Screen):
input_word = ObjectProperty(None)
result_msg = StringProperty()
address_to_kotobank = StringProperty("https://kotobank.jp")
URL_message = StringProperty("コトバンクへ")
def __init__(self, **kwargs):
super(InputKeywordScreen, self).__init__(**kwargs)
self.result_msg = 'ここに意味が表示されるよ'
def get_input_msg(self):
return self.input_word
def get_data_from_kotobank(self):
try:
commands = "curl -kI https://kotobank.jp/word/" + quote(self.input_word.text) + " -o header"
req = os.system(commands)
f = open('header')
header_info = f.read() # ファイル終端まで全て読んだデータを返す
# print(tmp)
hit_list = re.findall("Location:.*", header_info)
URL = "https://kotobank.jp" + hit_list[0][10:]
commands = "curl -k " + URL + " > raw.html"
req = os.system(commands)
f = open('raw.html')
raw_html = f.read() # ファイル終端まで全て読んだデータを返す
commands = "rm -f header raw.html"
req = os.system(commands)
except ValueError:
URL = raw_html = "ValueEror"
print('ValueError')
except KeyError:
URL = raw_html = "KeyError"
print("KeyError")
except TypeError:
URL = raw_html = "TypeError"
print("TypeError")
except IndexError:
URL = raw_html = "IndexError"
print("IndexError")
return raw_html, URL
def normalize_html(self, raw_html):
raw_html = raw_html.replace("\n", "")
raw_html = raw_html.replace(" ", "")
html = raw_html.replace("\t", "")
return html
def shape_from_html(self, html):
try:
hit_list = re.search("<sectionclass=\"description\">.*</section></div><!--/.ex解説--><pclass=\"source\">", html)
hit_data_from_html = hit_list.group()
cut_end_point = hit_data_from_html.find('</section>')
hit_data_from_html = hit_data_from_html[:cut_end_point]
hit_data_from_html = re.sub("<[^>]*?>", "", hit_data_from_html)
except ValueError:
hit_data_from_html = "ValueEror"
print('ValueError')
except KeyError:
hit_data_from_html = "KeyError"
print("KeyError")
except TypeError:
hit_data_from_html = "TypeError"
print("TypeError")
except IndexError:
hit_data_from_html = "IndexError"
print("IndexError")
except AttributeError:
hit_data_from_html = "AttributeError"
print("AttributeError")
return hit_data_from_html
def shape_result(self, hit_data_from_html):
CHAR_MAX_NUM = 250
CHAR_NUM_IN_LINE = 31
cut_data = hit_data_from_html[:CHAR_MAX_NUM]
#print("a:", cut_data)
shaped_result = '\n'
for i in range(int(len(cut_data)/CHAR_NUM_IN_LINE)+1):
shaped_result += cut_data[CHAR_NUM_IN_LINE*i : CHAR_NUM_IN_LINE*i+CHAR_NUM_IN_LINE] + '\n'
pass
#print(len(cut_data))
if len(cut_data) == CHAR_MAX_NUM:
shaped_result = shaped_result[:-1] + '…\n'
# print(b)
return shaped_result
def set_sent_from_text(self):
if self.input_word.text == "":
self.result_msg = "なにか入力してよ〜〜"
self.address_to_kotobank = "https://kotobank.jp"
self.URL_message = 'コトバンクへ'
return
raw_html, URL = self.get_data_from_kotobank()
if (raw_html == "IndexError"):
self.result_msg = "ごめんね、単語が見つからなかったよ"
self.address_to_kotobank = "https://kotobank.jp"
self.URL_message = 'コトバンクへ'
return
html = self.normalize_html(raw_html)
hit_data_from_html = self.shape_from_html(html)
if (hit_data_from_html == "AttributeError"):
self.result_msg = "ちょっと下のボタンで検索してみて"
self.address_to_kotobank = URL
self.URL_message = 'コトバンクで調べてみよう'
return
msg = self.shape_result(hit_data_from_html)
# print(msg)
self.result_msg = msg
self.address_to_kotobank = URL
self.URL_message = 'コトバンクで調べてみよう'
#print(self.URL_message)
#print(self.result_msg)
gererate_voice_data("\"" + self.result_msg + "\"", "res.wav")
play_wav("res.wav")
commands = "rm -f res.wav"
proc = subprocess.Popen(
commands,
shell = True,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
stdout_data, stderr_data = proc.communicate() #処理実行を待つ(†1)
# loop = asyncio.get_event_loop()
# loop.run_until_complete(self.pass_msg())
pass
def set_sent_from_voice(self):
print("音声入力中")
record_voice()
voice_text = get_text_from_voice().replace("、", "").replace("。", "")
# self.result_msg = "終わり"
print("音声入力終了")
self.input_word.text = voice_text
self.set_sent_from_text()
def access_to_kotobank(self):
browser = web.get('"/usr/bin/google-chrome" %s')
browser.open(self.address_to_kotobank)
class TellMeApp(App):
def build(self):
sm = ScreenManager()
sm.add_widget(InputKeywordScreen(name='input'))
sm.add_widget(AskQuestionScreen(name='question'))
return sm
if __name__ == '__main__':
TellMeApp().run()
| {"/main.py": ["/voice.py"]} |
68,044 | yuckey8/kotobank_search | refs/heads/master | /voice.py | import pyaudio
import wave
import requests
import json
import configparser
import subprocess
def gererate_voice_data(speak_str, wavefile_path):
jtalk_path = "open_jtalk"
voicemodel_path = "hts-voice/mei/mei_normal.htsvoice"
dictmodel_path = "naist-jdic"
# voicemodel_path = "/usr/share/hts-voice/mei/mei_normal.htsvoice"
# dictmodel_path = "/var/lib/mecab/dic/open-jtalk/naist-jdic"
#print(speak_str)
commands = "echo "+ str(speak_str).replace("\n", "") + "|"\
+ jtalk_path\
+ " -m "+ voicemodel_path\
+ " -x "+ dictmodel_path\
+ " -ow "+ wavefile_path
#print(commands)
proc = subprocess.Popen(
commands,
shell = True,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
stdout_data, stderr_data = proc.communicate() #処理実行を待つ(†1)
#print(stdout_data) #標準出力の確認
#print(stderr_data) #標準エラーの確認
def play_wav(wavefile_path):
commands = "aplay " + wavefile_path
#print(commands)
proc = subprocess.Popen(
commands,
shell = True)
stdout_data, stderr_data = proc.communicate() #処理実行を待つ(†1)
def record_voice():
CHUNK = 1024
FORMAT = pyaudio.paInt16 # int16型
CHANNELS = 1 # ステレオ:2
RATE = 16000 # 16kHz
RECORD_SECONDS = 3 # 3秒録音
WAVE_OUTPUT_FILENAME = "voice_input.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
def get_text_from_voice():
config = configparser.ConfigParser()
config.read('config.ini')
APIKEY = config.get('API', 'APIKEY')
url = "https://api.apigw.smt.docomo.ne.jp/amiVoice/v1/recognize?APIKEY={}".format(APIKEY)
files = {"a": open("voice_input.wav", 'rb'), "v":"on"}
r = requests.post(url, files=files)
commands = "rm -f voice_input.wav"
proc = subprocess.Popen(
commands,
shell = True,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
stdout_data, stderr_data = proc.communicate() #処理実行を待つ(†1)
# print(r.json()['text'])
print(r)
return r.json()['text']
# record_voice()
# get_text_from_voice()
#gererate_voice_data("あああ", "tmp.wav")
| {"/main.py": ["/voice.py"]} |
68,053 | prabin544/caesar-cipher | refs/heads/master | /caesar_cipher/caesar_cipher.py |
"""
ord(char) changes letter -> number
chr(num) changes number -> letter
A - 65
Z - 90
a - 97
z - 122
- encrypt(‘abc’,1) would return ‘bcd’
- encrypt(‘abc’, 10) would return ‘klm’
- shifts that exceed 26 should wrap around
- encrypt(‘abc’,27) would return ‘bcd’
def encrypt function
declare result string
iterate through each letter
declare int_char = ord(letter)
check for lower or uppercase
if LOWERCASE
(this means int_char between 97-122)
keyed = int_char + key
if keyed is greater than 122
find out the difference beween keyed and 122
modulo that difference by 26
add that difference to 97
push the result to result string
if UPPERCASE
(this means int_char between 65-90)
keyed = int_char + key
if keyed is greater than 90
find out the difference between keyed and 90
modulo that difference
add the difference to 65
push the result to result string
if its neither
just add the character to result string
"""
def encrypt(plaintext, key):
string = ""
for char in plaintext:
if char.islower():
keyed = ord(char) + key
if keyed > 122 or keyed < 97:
difference = keyed - 123
modulo = difference % 26
string += chr(97 + modulo)
else:
string += chr(keyed)
elif char.isupper():
keyed = ord(char) + key
if keyed > 90 or keyed < 65:
difference = keyed - 91
modulo = difference % 26
string += chr(65 + modulo)
else:
string += chr(keyed)
else:
string += char
return string
def decrypt(encryptedText, key):
return encrypt(encryptedText, -key)
# -------------------------------------------------------------------------------
import nltk
nltk.download("words", quiet=True)
nltk.download("names", quiet=True)
from nltk.corpus import words, names
word_list = words.words()
name_list = names.words()
def crack(encrypted_string):
"""
- declare encrypted_words_list = encrypted_string split into a list of words
- declare highest_word_count = the combination with the highest word count. Initialize this with 0
- declare most_probable_key = the key that gives us the highest word count
** there are 25 other possible combinations that could encrypt the string
** use encrypt/decrypt(should not matter which one) to test the other 25 combinations using keys 1-26
for x in range(1,26):
declare count = 0
for word in encrypted_words_list
if decrypted(word, x) in word_list or decrypted(word,x) in name_list:
count += 1
if count > highest_word_count:
highest_word_count = count
most_probable_key = x
declare ratio = highest_word_count / word_list_length
** now we have the ratio, and the most probable key
- we can print the ratio and the decrypted string
- or whatever we're supposed to do in the instructions
"""
encrypted_words_list = encrypted_string.split()
highest_word_count = 0
most_probable_key = 0
for x in range(1, 26):
count = 0
for word in encrypted_words_list:
if decrypt(word, x) in word_list or decrypt(word, x) in name_list: # name_list - line 90
count += 1
if count > highest_word_count:
highest_word_count = count
most_probable_key = x
probability = highest_word_count / len(encrypted_words_list) * 100
decrypted_word = decrypt(encrypted_string, most_probable_key)
print(f"Decryption Probability: {probability}%")
print(f"Most Probable Key: {most_probable_key}")
return decrypted_word
if __name__ == "__main__":
# input1 = "ABCD"
# input2 = "abcd"
# input3 = "ABab"
# input4 = "AB ab AB cd dfadf fasdf"
# input5 = "Hello World. We did it."
# result1 = encrypt(input5, 900)
# print(result1)
# result2 = decrypt(result1, 900)
# print(result2)
real_sentence = "It was the best of times, it was the worst of times."
encrypted = encrypt(real_sentence, 18)
result6 = crack(encrypted)
print(result6)
| {"/tests/test_caesar_cipher.py": ["/caesar_cipher/caesar_cipher.py"]} |
68,054 | prabin544/caesar-cipher | refs/heads/master | /tests/test_caesar_cipher.py | import pytest
from caesar_cipher.caesar_cipher import encrypt, decrypt, crack
def test_can_encrypt_with_a_given_shift():
actual = encrypt("ABCDE", 2)
expected = "CDEFG"
assert actual == expected
def test_can_decrypt_previously_encrypted_string_with_the_same_shift():
actual = decrypt("CDEFG", 2)
expected = "ABCDE"
assert actual == expected
def test_encryption_can_handle_upper_and_lower_case_letters():
actual = encrypt("ABC abc mno", 26)
expected = "ABC abc mno"
assert actual == expected
def test_encryption_should_allow_non_alpha_characters_but_ignore_them_including_white_space():
actual = encrypt("ABC def !@#$%^^ abc", 3)
expected = "DEF ghi !@#$%^^ def"
assert actual == expected
def test_crack_can_decrypt_a_sentence_without_knowing_the_shift():
encrypted = encrypt("It was the best of times, it was the worst of times", 1096)
actual = crack(encrypted)
expected = "It was the best of times, it was the worst of times"
assert actual == expected
| {"/tests/test_caesar_cipher.py": ["/caesar_cipher/caesar_cipher.py"]} |
68,056 | nextwallet-svr/ZebraPushAPI | refs/heads/master | /config.py | import os
CURR_NET = os.getenv("ZPUSH_NETWORK", "test")
PUSH_SERVER = {
"test": {
"root_url": "http://testdev.zebrablocklabs.com:5100/",
"timeout": 10
},
"main": {
"root_url": os.getenv("ZPUSH_URL", "http://dev.zebrablocklabs.com:5100/"),
"timeout": 10
}
}
def get_config():
return PUSH_SERVER[CURR_NET]
| {"/__init__.py": ["/push_client.py"], "/push_client.py": ["/__init__.py"]} |
68,057 | nextwallet-svr/ZebraPushAPI | refs/heads/master | /__init__.py | from .push_client import zpush
| {"/__init__.py": ["/push_client.py"], "/push_client.py": ["/__init__.py"]} |
68,058 | nextwallet-svr/ZebraPushAPI | refs/heads/master | /push_client.py | import logging
import json
from . import config
from simple_rest_client.api import API
from simple_rest_client.resource import Resource
class PushServiceResource(Resource):
actions = {
"send_mail": {"method": "POST", 'url': 'commonmail'},
"send_sms": {"method": "POST", "url": "commonsms"},
"send_notice": {"method": "POST", "url": "commonmsg"}
}
class InternalServiceCall:
def __init__(self, config):
self.config = config
self.push_service_api = self.get_push_api()
def get_push_api(self):
root_url = self.config.get("root_url")
timeout = self.config.get('timeout')
api = API(
api_root_url = root_url,
timeout=timeout,
append_slash=False
)
api.add_resource(resource_name='push',
resource_class=PushServiceResource
)
return api
def send_mail(self, mail_info):
"""
receive a dict of the mail info and send it to
the ZebraPush server to send
"""
mail_str = json.dumps(mail_info)
try:
body = {
"mail_info": mail_str
}
response = self.push_service_api.push.send_mail(
body=body, params={}, headers={}
)
except Exception as e:
logging.error("send mail failed with error"+str(e))
return {"retcode": -1, "message": "internal server error"}
return response.body
def send_sms(self, sms_info):
"""
receive a dict of SMS info, and send it to
the ZebraPush server to send
"""
sms_str = json.dumps(sms_info)
try:
body = {
"sms_info": sms_str
}
response = self.push_service_api.push.send_sms(
body=body, params={}, headers={}
)
except Exception as e:
logging.error("send SMS failed with error"+str(e))
return {"retcode": -1, "message": "internal server error"}
return response.body
def send_notice(self, push_info):
"""
receive a dict of notice info, and send it to
the ZebraPush server to send
"""
push_str = json.dumps(push_info)
try:
body = {
"push_message": push_str
}
response = self.push_service_api.push.send_notice(
body=body, params={}, headers={}
)
except Exception as e:
logging.error("send SMS failed with error"+str(e))
return {"retcode": -1, "message": "internal server error"}
return response.body
# initiate the object
concrete_config = config.get_config()
zpush = InternalServiceCall(concrete_config)
| {"/__init__.py": ["/push_client.py"], "/push_client.py": ["/__init__.py"]} |
68,060 | gen-ko/ynn | refs/heads/master | /script/script_1_6_visual_two_layer.py | # This script is to visualize the parameters of the network
import numpy
import matplotlib.pyplot as plt
import pickle
import os
print('---------- starting script 1 - 3 ----------')
# fix the absolute path of file
full_path = os.path.realpath(__file__)
path, _ = os.path.split(full_path)
network_dump_path = os.path.join(path, '../temp/network2-0-0-0.dump')
print('load network dump file...')
with open(network_dump_path, 'rb') as f:
nn = pickle.load(f)
w: numpy.ndarray = nn.layers[0].w
ncol = 10
nrow = int(w.shape[1]/ncol)
fig = plt.figure()
print('ploting', end='')
plt.axis('off')
for i in range(w.shape[1]):
ax = plt.subplot(nrow, ncol, i+1)
im = ax.imshow(w[:, i].reshape([28, 28]))
ax.axis('off')
print('.', end='')
print('\n', end='')
plt.savefig(os.path.join(path, os.pardir, "output", "output-fig4.png"))
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,061 | gen-ko/ynn | refs/heads/master | /src/nlp.py | import numpy
from src import layer
from src import network as nn
'''
class NlpGeneral(nn.NeuralNetwork):
def __init__(self, layers: [layer.Layer], connections: dict):
nn.NeuralNetwork.__init__(self, layers, connections)
self.next_layers: dict = {}
self.prev_layers: dict = {}
self.input_layers: list = []
self.h_out: dict = {}
self.h_in: dict = {}
self.layer_order: list = []
self.output: numpy.ndarray
for key in connections:
if key is 'input':
layer_list: list = []
for name in connections[key]:
layer_list += name_pool[name]
self.input_layers = layer_list
elif key is 'output':
self.output_layer = connections[key][0]
else:
current_layer = name_pool[key]
self.layer_order += current_layer
for name in connections[key]:
next_layer = name_pool[name]
try:
self.next_layers[current_layer] += next_layer
except KeyError:
self.next_layers[current_layer] = [next_layer]
try:
self.prev_layers[next_layer] += current_layer
except KeyError:
self.prev_layers[next_layer] = [current_layer]
return
def fprop(self, x, keep_state: bool=False):
input_layer_size = len(self.input_layers)
h_in: dict = {}
h_out: dict = {}
for i in range(input_layer_size):
current_layer: layer.Layer = self.input_layers[i]
h_in[current_layer] = x[:, i]
for i in range(len(self.layer_order)):
current_layer: layer.Layer = self.layer_order[i]
if len(self.prev_layers[current_layer]) > 1:
for i in range(len(self.prev_layers[current_layer])):
prev_layer = self.prev_layers[current_layer][i]
try:
h_in[current_layer] += h_out[prev_layer]
except NameError:
h_in[current_layer] = h_out[prev_layer]
h_out[current_layer] = current_layer.forward(h_in[current_layer])
h_in[self.next_layers[current_layer]] = h_out[current_layer]
h_out[self.output_layer] = self.output_layer.forward(h_out[self.output_layer])
if keep_state:
self.h_in = h_in
self.h_out = h_out
return h_out[self.output_layer]
def bprop(self, y):
d_h_top: dict = {}
d_h_down: dict = {}
d_h_down[self.output_layer] = self.output_layer.backward(y,
self.h_out[self.output_layer],
self.h_in[self.output_layer])
d_h3 = self.layer3.backward(y, self.h4, self.h3)
d_h2 = self.layer2.backward(d_h3, self.h3, self.h2)
# the gradiants of h2 is the same as h2_0, h2_1 and h2_2, which can be derived
d_h1_2 = self.layer1_2.backward(d_h2, self.h2_2, self.h1_2)
d_h1_1 = self.layer1_1.backward(d_h2, self.h2_1, self.h1_1)
d_h1_0 = self.layer1_0.backward(d_h2, self.h2_0, self.h1_0)
self.layer0.backward(d_h1_2, self.h1_2, self.h0_2)
self.layer0.backward(d_h1_1, self.h1_1, self.h0_1)
self.layer0.backward(d_h1_0, self.h1_0, self.h0_0)
return
def update(self):
self.layer0.update(self.learning_rate)
self.layer1_0.update(self.learning_rate, momentum=self.momentum)
self.layer1_1.update(self.learning_rate, momentum=self.momentum)
self.layer1_2.update(self.learning_rate, momentum=self.momentum)
self.layer2.update(self.learning_rate, momentum=self.momentum)
return
def train(self, x_train, y_train, x_valid, y_valid, epoch=200, batch_size=256, learning_rate=0.1, momentum=0.0):
print('------------------ Start Training -----------------')
print('\tepoch\t|\ttrain loss\t|\ttrain error\t|\tvalid loss\t|\tvalid error\t')
self.momentum = momentum
self.train_error = numpy.zeros(shape=(epoch,), dtype=numpy.float64)
self.valid_error = numpy.zeros(shape=(epoch,), dtype=numpy.float64)
self.train_loss = numpy.zeros(shape=(epoch,), dtype=numpy.float64)
self.valid_loss = numpy.zeros(shape=(epoch,), dtype=numpy.float64)
self.batch_size = batch_size
self.learning_rate = learning_rate
for j in range(epoch):
x_train, y_train = uf.shuffle(x_train, y_train)
train_score = 0.0
for i in range(0, y_train.size, batch_size):
x_batch = x_train[i: i + batch_size]
y_batch = y_train[i: i + batch_size]
output_train = self.fprop(x_batch, keep_state=True)
predict_train = uf.pick_class(output_train)
self.train_loss[j] += uf.cross_entropy_loss(output_train, y_batch, take_average=False)
train_score += numpy.sum(predict_train == y_batch)
self.bprop(y_batch)
self.update()
self.train_loss[j] = self.train_loss[j] / y_train.shape[0]
self.train_error[j] = (1.0 - train_score / y_train.shape[0])
# start a validation
output_valid = self.fprop(x_valid, keep_state=False)
predict_valid = uf.pick_class(output_valid)
self.valid_loss[j] = uf.cross_entropy_loss(output_valid, y_valid, take_average=True)
valid_score = numpy.sum(predict_valid == y_valid)
self.valid_error[j] = (1.0 - valid_score / y_valid.shape[0])
print('\t', j, '\t', sep='', end=' ')
print('\t|\t ', "{0:.5f}".format(self.train_loss[j]),
' \t|\t ', "{0:.5f}".format(self.train_error[j]),
' \t|\t ', "{0:.5f}".format(self.valid_loss[j]),
' \t|\t ', "{0:.5f}".format(self.valid_error[j]),
'\t',
sep='')
return
'''
class NlpL3TypeB(nn.NeuralNetwork):
def __init__(self, dict_size: int, embedding_size: int, hidden_units: int):
self.layers = [layer.Embedding(dict_size, embedding_size, 'l0'), # layer 0
layer.Linear(embedding_size, hidden_units, 'l1-0'), # layer 1
layer.Linear(embedding_size, hidden_units, 'l1-1'), # layer 2
layer.Linear(embedding_size, hidden_units, 'l1-2'), # layer 3
layer.Tanh(hidden_units, 'l2'), # layer 4
layer.Linear(hidden_units, dict_size, 'l3'), # layer 5
layer.Softmax(dict_size, 'l4')] # layer 6
self.h: list = None
return
def fprop(self, x, keep_state: bool=False):
h = list()
h.append(x[:, 0]) # h0
h.append(x[:, 1]) # h1
h.append(x[:, 2]) # h2
h.append(self.layers[0].forward(h[0])) # h3
h.append(self.layers[0].forward(h[1])) # h4
h.append(self.layers[0].forward(h[2])) # h5
h.append(self.layers[1].forward(h[3])) # h6
h.append(self.layers[2].forward(h[4])) # h7
h.append(self.layers[3].forward(h[5])) # h8
h.append(h[6] + h[7] + h[8]) # h9
h.append(self.layers[4].forward(h[9])) # h10
h.append(self.layers[5].forward(h[10])) # h11
h.append(self.layers[6].forward(h[11])) # h12
if keep_state:
self.h = h
return h[-1]
def bprop(self, y):
d_h4 = self.layers[6].backward(y, self.h[12], self.h[11])
d_h3 = self.layers[5].backward(d_h4, self.h[11], self.h[10])
d_h2 = self.layers[4].backward(d_h3, self.h[10], self.h[9])
# the gradiants of h2 is the same as h2_0, h2_1 and h2_2, which can be derived
d_h1_2 = self.layers[3].backward(d_h2, self.h[8], self.h[5])
d_h1_1 = self.layers[2].backward(d_h2, self.h[7], self.h[4])
d_h1_0 = self.layers[1].backward(d_h2, self.h[6], self.h[3])
self.layers[0].backward(d_h1_2, self.h[5], self.h[2])
self.layers[0].backward(d_h1_1, self.h[4], self.h[1])
self.layers[0].backward(d_h1_0, self.h[3], self.h[0])
return
class NlpL3TypeA(nn.NeuralNetwork):
def __init__(self, dict_size: int, embedding_size: int, hidden_units: int):
self.layers = [layer.Embedding(dict_size, embedding_size, 'l0'),
layer.Linear(embedding_size, hidden_units, 'l1-0'),
layer.Linear(embedding_size, hidden_units, 'l1-1'),
layer.Linear(embedding_size, hidden_units, 'l1-2'),
layer.Linear(hidden_units, dict_size, 'l2'),
layer.Softmax(dict_size, 'l3')]
self.h: list = None
return
def fprop(self, x, keep_state: bool=False):
h = list()
h.append(x[:, 0]) # h0
h.append(x[:, 1]) # h1
h.append(x[:, 2]) # h2
h.append(self.layers[0].forward(h[0])) # h3
h.append(self.layers[0].forward(h[1])) # h4
h.append(self.layers[0].forward(h[2])) # h5
h.append(self.layers[1].forward(h[3])) # h6
h.append(self.layers[2].forward(h[4])) # h7
h.append(self.layers[3].forward(h[5])) # h8
h.append(h[6] + h[7] + h[8]) # h9
h.append(self.layers[4].forward(h[9])) # h10
h.append(self.layers[5].forward(h[10])) # h11
if keep_state:
self.h = h
return h[-1]
def bprop(self, y):
d_h3 = self.layers[5].backward(y, self.h[11], self.h[10])
d_h2 = self.layers[4].backward(d_h3, self.h[10], self.h[9])
# the gradiants of h2 is the same as h2_0, h2_1 and h2_2, which can be derived
d_h1_2 = self.layers[3].backward(d_h2, self.h[8], self.h[5])
d_h1_1 = self.layers[2].backward(d_h2, self.h[7], self.h[4])
d_h1_0 = self.layers[1].backward(d_h2, self.h[6], self.h[3])
self.layers[0].backward(d_h1_2, self.h[5], self.h[2])
self.layers[0].backward(d_h1_1, self.h[4], self.h[1])
self.layers[0].backward(d_h1_0, self.h[3], self.h[0])
return
class NlpL3TypeR(nn.NeuralNetwork):
def __init__(self, dict_size: int, embedding_size: int, hidden_units: int):
self.layers = [layer.Embedding(dict_size, embedding_size, 'l0-Embedding'),
layer.Recursive(embedding_size, hidden_units, 'l1-R'),
layer.Tanh(hidden_units, 'l2-Tanh'),
layer.Linear(hidden_units, dict_size, 'l3-L'),
layer.Softmax(dict_size, 'l4-Softmax')]
self.h: list = None
return
def fprop(self, x, keep_state: bool=False):
h = list()
h.append(x[:, 0]) # h0
h.append(x[:, 1]) # h1
h.append(x[:, 2]) # h2
h.append(self.layers[0].forward(h[0])) # h3
h.append(self.layers[0].forward(h[1])) # h4
h.append(self.layers[0].forward(h[2])) # h5
num_batch = x.shape[0]
self.s0 = numpy.zeros((num_batch, self.layers[1].output_dimension), dtype=numpy.float32)
s1 = self.layers[1].forward(h[3], self.s0)
a1 = self.layers[2].forward(s1)
h.append(s1) # h6
h.append(a1) # h7
s2 = self.layers[1].forward(h[4], a1)
a2 = self.layers[2].forward(s2)
h.append(s2) # h8
h.append(a2) # h9
s3 = self.layers[1].forward(h[5], a2)
a3 = self.layers[2].forward(s3)
h.append(s3) # h10
h.append(a3) # h11
h.append(self.layers[3].forward(a3)) # h12
h.append(self.layers[4].forward(h[12])) # h13
if keep_state:
self.h = h
return h[-1]
def bprop(self, y):
d_h12 = self.layers[4].backward(y, self.h[13], self.h[12])
d_h11 = self.layers[3].backward(d_h12, self.h[12], self.h[11])
d_h10 = self.layers[2].backward(d_h11, self.h[11], self.h[10])
d_h5, d_h9 = self.layers[1].backward(d_h10, self.h[10], self.h[5], self.h[9])
d_h8 = self.layers[2].backward(d_h9, self.h[9], self.h[8])
d_h4, d_h7 = self.layers[1].backward(d_h8, self.h[8], self.h[4], self.h[7])
d_h6 = self.layers[2].backward(d_h7, self.h[7], self.h[6])
d_h3, _ = self.layers[1].backward(d_h6, self.h[6], self.h[3], self.s0)
self.layers[0].backward(d_h3, self.h[3], self.h[0])
self.layers[0].backward(d_h4, self.h[4], self.h[1])
self.layers[0].backward(d_h5, self.h[5], self.h[2])
return
class NlpL3TypeRC(nn.NeuralNetwork):
def __init__(self, dict_size: int, embedding_size: int, hidden_units: int):
self.layers = [layer.Embedding(dict_size, embedding_size, 'l0-Embedding'),
layer.Recursive(embedding_size, hidden_units, 'l1-R'),
layer.Tanh(hidden_units, 'l2-Tanh'),
layer.Linear(hidden_units, dict_size, 'l3-L'),
layer.Softmax(dict_size, 'l4-Softmax')]
self.h: list = None
if __debug__:
print('DEBUG MODE ENABLED')
return
def fprop(self, x, keep_state: bool=False):
h = list()
h.append(x[:, 0]) # h0
h.append(x[:, 1]) # h1
h.append(x[:, 2]) # h2
h.append(self.layers[0].forward(h[0])) # h3
h.append(self.layers[0].forward(h[1])) # h4
h.append(self.layers[0].forward(h[2])) # h5
num_batch = x.shape[0]
self.s0 = numpy.zeros((num_batch, self.layers[1].output_dimension), dtype=numpy.float32)
s1 = self.layers[1].forward(h[3], self.s0)
a1 = self.layers[2].forward(s1)
h.append(s1) # h6
h.append(a1) # h7
s2 = self.layers[1].forward(h[4], a1)
a2 = self.layers[2].forward(s2)
h.append(s2) # h8
h.append(a2) # h9
s3 = self.layers[1].forward(h[5], a2)
a3 = self.layers[2].forward(s3)
h.append(s3) # h10
h.append(a3) # h11
h.append(self.layers[3].forward(a3)) # h12
h.append(self.layers[4].forward(h[12])) # h13
if keep_state:
self.h = h
return h[-1]
def bprop(self, y):
d_h12 = self.layers[4].backward(y, self.h[13], self.h[12])
d_h11 = self.layers[3].backward(d_h12, self.h[12], self.h[11])
d_h10 = self.layers[2].backward(d_h11, self.h[11], self.h[10])
d_h5, d_h9 = self.layers[1].backward(d_h10, self.h[10], self.h[5], self.h[9])
#d_h8 = self.layers[2].backward(d_h9, self.h[9], self.h[8])
#d_h4, d_h7 = self.layers[1].backward(d_h8, self.h[8], self.h[4], self.h[7])
#d_h6 = self.layers[2].backward(d_h7, self.h[7], self.h[6])
#d_h3, _ = self.layers[1].backward(d_h6, self.h[6], self.h[3], self.s0)
#self.layers[0].backward(d_h3, self.h[3], self.h[0])
#self.layers[0].backward(d_h4, self.h[4], self.h[1])
self.layers[0].backward(d_h5, self.h[5], self.h[2])
return
'''
def update(self, train_settings: uf.TrainSettings):
for layer_c in self.layers:
layer_c.update(train_settings)
return
'''
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,062 | gen-ko/ynn | refs/heads/master | /script/script_3_5_visualize.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import layer
import src.nlp as nlp
import matplotlib.pyplot as plt
import numpy
import os
import pickle
from src import util as uf
from src import callback as cb
from src import train as utf
# resolve file path
path, _ = os.path.split(os.path.realpath(__file__))
dictionary_filename = os.path.join(path,'../output/dump', 'dictionary.dump')
# phase A, train the model
train = False
if train:
# load data
data_train_filepath = os.path.join(path, '../output/dump', 'train.dump')
data_valid_filepath = os.path.join(path, '../output/dump', 'valid.dump')
# load preprocessed data
with open(data_train_filepath, 'rb+') as f:
data_train = pickle.load(f)
with open(data_valid_filepath, 'rb+') as f:
data_valid = pickle.load(f)
x_train = data_train[:, 0:3]
y_train = data_train[:, 3]
x_valid = data_valid[:, 0:3]
y_valid = data_valid[:, 3]
data_store_train = uf.DataStore(x_train, y_train)
data_store_valid = uf.DataStore(x_valid, y_valid)
train_settings = uf.TrainSettings(learning_rate=0.1, batch_size=512, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-3-5', epoch=100, prefix='h128')
# build the neural network
mynlp = nlp.NlpL3TypeA(dict_size=8000, embedding_size=2, hidden_units=128)
utf.cross_train(mynlp, data_store_train, data_store_valid, train_settings)
else:
# load the network
nn_filename = os.path.join(path, '../output/dump', 'script-3-5-h128-i-s.dump')
nn = nlp.NlpL3TypeA(8000, 2, 128)
with open(nn_filename, 'rb') as f:
nn.load(pickle.load(f))
# load vocabulary
with open(dictionary_filename, 'rb') as f:
d: dict = pickle.load(f)
# visualize the words
# draw 500 random words from d
dv = list(d.values())
dk = list(d.keys())
dk_random = numpy.array(dk)
d_random = numpy.random.choice(dv, size=(500, ))
d_list = list(d_random)
weights: dict = nn.layers[0].w
y = weights[d_random]
plt.scatter(y[:, 0], y[:, 1])
plt.show()
dk_random = dk_random[d_list]
for label, xi, yi in zip(dk_random, y[:, 0], y[:, 1]):
plt.annotate(label, xy=(xi, yi), xytext=(0, 0), textcoords='offset points')
plt.xlim(min(y[:, 0]), max(y[:, 0]))
plt.ylim(min(y[:, 1]), max(y[:, 1]))
plt.show()
print('hi')
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,063 | gen-ko/ynn | refs/heads/master | /script/script_1_10_tanh.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import plot_data
from src import layer
from src.network import NeuralNetwork_Dumpable as NN
import src.network as network
import matplotlib.pyplot as plt
import numpy
import os
# format of data
# disitstrain.txt contains 3000 lines, each line 785 numbers, comma delimited
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
data_filepath = '../data'
data_train_filename = 'digitstrain.txt'
data_valid_filename = 'digitsvalid.txt'
data_test_filename = 'digitstest.txt'
data_train_filepath = os.path.join(path, data_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, data_filepath, data_valid_filename)
data_test_filepath = os.path.join(path, data_filepath, data_test_filename)
print('start initializing...')
network.init_nn(random_seed=1099)
x_train, y_train = load_data.load_from_path(data_train_filepath)
x_valid, y_valid = load_data.load_from_path(data_valid_filepath)
layers = [layer.Linear(784, 100),
layer.Tanh(100, 100),
layer.Linear(100, 100),
layer.Softmax(10, 10)]
myNN = NN(layers, learning_rate=0.1, regularizer=0.0001, momentum=0.9)
myNN.train(x_train, y_train, x_valid, y_valid, epoch=300, batch_size=32) | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,064 | gen-ko/ynn | refs/heads/master | /script/script_3_2_linear_hidden.py | # required python version: 3.6+
# from src import layer
import src.nlp as nlp
import numpy
import os
import pickle
from src import callback as cb
from src import train as utf
from src.util.status import DataStore
from src.util.status import TrainSettings
# resolve file path
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
dump_filepath = '../output/dump'
data_train_filename = 'train.dump'
data_valid_filename = 'valid.dump'
data_train_filepath = os.path.join(path, dump_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, dump_filepath, data_valid_filename)
# load preprocessed data
with open(data_train_filepath, 'rb+') as f:
data_train = pickle.load(f)
with open(data_valid_filepath, 'rb+') as f:
data_valid = pickle.load(f)
x_train = data_train[:, 0:3]
y_train = data_train[:, 3]
x_valid = data_valid[:, 0:3]
y_valid = data_valid[:, 3]
# set the random seed
numpy.random.seed(1099)
data_store_train = DataStore(x_train, y_train)
data_store_valid = DataStore(x_valid, y_valid)
train_settings = TrainSettings(learning_rate=0.01, batch_size=512, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-3-2', epoch=100, prefix='h128')
# build the neural network
mynlp = nlp.NlpL3TypeA(dict_size=8000, embedding_size=16, hidden_units=128)
utf.cross_train(mynlp, data_store_train, data_store_valid, train_settings)
train_settings = TrainSettings(learning_rate=0.01, batch_size=512, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-3-2', epoch=100, prefix='h256')
# build the neural network
mynlp = nlp.NlpL3TypeA(dict_size=8000, embedding_size=16, hidden_units=256)
utf.cross_train(mynlp, data_store_train, data_store_valid, train_settings)
train_settings = TrainSettings(learning_rate=0.01, batch_size=512, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-3-2', epoch=100, prefix='h512')
# build the neural network
mynlp = nlp.NlpL3TypeA(dict_size=8000, embedding_size=16, hidden_units=512)
utf.cross_train(mynlp, data_store_train, data_store_valid, train_settings)
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,065 | gen-ko/ynn | refs/heads/master | /src/util/metrics.py | import numpy
# the sum of cross_entropy_loss over a batch
def cross_entropy_loss(predict_prob: numpy.ndarray, y_label: numpy.ndarray) -> float:
loss = 0.0
assert y_label.ndim is 1
assert predict_prob.ndim is 2
assert y_label.shape[0] == predict_prob.shape[0]
for i in range(y_label.size):
loss += -numpy.log(predict_prob[i, y_label[i]])
return loss
def perplexity(predict_prob: numpy.ndarray, y_label: numpy.ndarray) -> (float, int):
loss = 0.0
assert y_label.ndim is 1
assert predict_prob.ndim is 2
assert y_label.shape[0] == predict_prob.shape[0]
batch_size = predict_prob.shape[0]
weight = batch_size
for i in range(y_label.size):
loss += -numpy.log(predict_prob[i, y_label[i]])
avg_loss = loss / batch_size
perp = numpy.exp2(avg_loss)
return perp, weight
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,066 | gen-ko/ynn | refs/heads/master | /src/callback.py | import src.train as utf
import src.util as uf
import numpy
import src.plotter as plotter
import src.train as utf
from src.util import status
from src.util import metrics
def loss_callback(status: status.Status):
current_epoch = status.current_epoch
tmp_cross_entropy = (metrics.cross_entropy_loss(status.soft_prob, status.y_batch) / status.size)
status.loss[current_epoch] += tmp_cross_entropy
status.error[current_epoch] += (numpy.sum(status.predict != status.y_batch) / status.size)
try:
perp, weight = metrics.perplexity(status.soft_prob, status.y_batch)
tmp = status.perplexity[current_epoch]
tmp = tmp + perp * weight / status.size
status.perplexity[current_epoch] = tmp
except TypeError:
status.perplexity = numpy.zeros(shape=(status.target_epoch,), dtype=numpy.float32)
perp, weight = metrics.perplexity(status.soft_prob, status.y_batch)
tmp = perp * weight / status.size
status.perplexity[current_epoch] = tmp
return
def plot_callback(status_train: status.Status, status_valid: status.Status):
plotter.plot_loss(status_train, status_valid)
plotter.plot_perplexity(status_train, status_valid)
return | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,067 | gen-ko/ynn | refs/heads/master | /script/script_4_1_pbp_n.py | # required python version: 3.6+
import numpy
import os
import pickle
src, file = os.path.split(__file__)
from src import callback as cb
from src import train as utf
from src.util.status import DataStore
from src.util.status import TrainSettings
from src import network
from src import layer
import tensorflow as tf
# set the random seed
numpy.random.seed(1099)
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
x_train = mnist.train.images # 55000 x 784
x_valid = mnist.validation.images # 5000 x 784
y_train = mnist.train.labels
y_valid = mnist.validation.labels
data_store_train = DataStore(x_train, y_train)
data_store_valid = DataStore(x_valid, y_valid)
#############################
train_settings = TrainSettings(learning_rate=0.01, batch_size=16, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-4-1', epoch=100, prefix='e16')
layers = [layer.Linear(784, 100),
#layer.BN(100, 100),
layer.Sigmoid(100),
layer.Linear(100, 10),
layer.Softmax(10)]
mynn = network.MLP(layers)
utf.cross_train(mynn, data_store_train, data_store_valid, train_settings)
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,068 | gen-ko/ynn | refs/heads/master | /src/util/status.py | import numpy
class TrainSettings(object):
def __init__(self, learning_rate=0.01, momentum=0.0, l2=0.0, l1=0.0, dropout=0.0,
epoch=200, batch_size=64, auto_stop=False, auto_plot=False, auto_visualize=False,
plot_callback=None, loss_callback=None, filename='f', prefix='p', infix='i', suffix='s'):
self.learning_rate = learning_rate
self.momentum = momentum
self.l2: float = l2
self.l1: float = l1
self.dropout = dropout
self.epoch = epoch
self.batch_size = batch_size
self.auto_stop = auto_stop
self.auto_plot = auto_plot
self.auto_visualize = auto_visualize
self.plot_callback = plot_callback
self.loss_callback = loss_callback
self.filename = filename
self.prefix = prefix
self.infix = infix
self.suffix = suffix
return
class DataStore(object):
def __init__(self, x: numpy.ndarray, y: numpy.ndarray=None):
self.x = x
self.size = x.shape[0]
self.draw_num = 0
self.x_shuffled: numpy.ndarray = None
self.y_shuffled: numpy.ndarray = None
try:
self.y = y
self.y_exists = True
self.draw_batch = self.draw_batch_dual
self.draw_direct = self.draw_direct_dual
self.shuffle = self.shuffle_dual
self.shuffle()
except TypeError:
self.y = None
self.y_exists = False
self.draw_batch = self.draw_batch_x
self.draw_direct = self.draw_direct_x
self.shuffle = self.shuffle_single
self.shuffle()
return
def shuffle_dual(self):
shuffle_idx = numpy.arange(self.size)
numpy.random.shuffle(shuffle_idx)
self.x_shuffled = self.x[shuffle_idx]
self.y_shuffled = self.y[shuffle_idx]
return
def shuffle_single(self):
shuffle_idx = numpy.arange(self.size)
numpy.random.shuffle(shuffle_idx)
self.x_shuffled = self.x[shuffle_idx]
return
def draw_batch_dual(self, batch_size: int) -> (bool, numpy.ndarray, numpy.ndarray):
leftover: int = batch_size + self.draw_num - self.size
if leftover > 0:
self.shuffle()
x1 = self.x_shuffled[self.draw_num:self.size]
y1 = self.y_shuffled[self.draw_num:self.size]
x2 = self.x_shuffled[0:leftover]
y2 = self.y_shuffled[0:leftover]
self.draw_num = leftover
x = numpy.append(x1, x2, axis=0)
y = numpy.append(y1, y2, axis=0)
return True, x, y
elif leftover == 0:
x = self.x_shuffled[self.draw_num:self.size]
y = self.y_shuffled[self.draw_num:self.size]
self.shuffle()
self.draw_num = 0
return True, x, y
else:
next_draw_num = self.draw_num + batch_size
x = self.x_shuffled[self.draw_num:next_draw_num]
y = self.y_shuffled[self.draw_num:next_draw_num]
self.draw_num = next_draw_num
return False, x, y
def draw_batch_x(self, batch_size: int) -> (bool, numpy.ndarray):
leftover: int = batch_size + self.draw_num - self.size
if leftover > 0:
self.shuffle()
x1 = self.x_shuffled[self.draw_num:self.size]
x2 = self.x_shuffled[0:leftover]
self.draw_num = leftover
x = numpy.append(x1, x2, axis=0)
return True, x
elif leftover == 0:
x = self.x_shuffled[self.draw_num:self.size]
self.shuffle()
self.draw_num = 0
return True, x
else:
next_draw_num = self.draw_num + batch_size
x = self.x_shuffled[self.draw_num:next_draw_num]
self.draw_num = next_draw_num
return False, x
def draw_direct_dual(self, *args) -> (bool, numpy.ndarray, numpy.ndarray):
return True, self.x, self.y
def draw_direct_x(self, *args) -> (bool, numpy.ndarray):
return True, self.x
class Status(object):
def __init__(self, train_settings: TrainSettings, data_store: DataStore, is_train: bool):
self.target_epoch = train_settings.epoch
self.current_epoch = 0
self.error: numpy.ndarray = numpy.zeros(shape=(self.target_epoch,), dtype=numpy.float32)
self.loss: numpy.ndarray = numpy.zeros(shape=(self.target_epoch,), dtype=numpy.float32)
self.perplexity: numpy.ndarray = None
self.soft_prob: numpy.ndarray = None
self.predict: numpy.ndarray = None
self.x_batch: numpy.ndarray = None
self.y_batch: numpy.ndarray = None
self.size: int = data_store.size
self.is_train: bool = is_train
self.train_settings: TrainSettings = train_settings
self.data_store: DataStore = data_store
if self.is_train:
self.batch_size = self.train_settings.batch_size
self.draw_batch = self.draw_batch_train
else:
self.batch_size = self.size
self.draw_batch = self.draw_direct
def draw_batch_train(self):
return self.data_store.draw_batch(self.batch_size)
def draw_direct(self):
return self.data_store.draw_direct()
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,069 | gen-ko/ynn | refs/heads/master | /src/train.py | from src.network import NeuralNetwork
import src.util_functions as uf
import numpy
from src.util.status import DataStore
from src.util.status import TrainSettings
from src.util.status import Status
import os
import pickle
def print_log(status_train: Status, status_valid: Status):
current_epoch = status_train.current_epoch
print('\t', status_train.current_epoch, '\t', sep='', end=' ')
print('\t|\t ', "{0:.5f}".format(status_train.loss[current_epoch]),
' \t|\t ', "{0:.5f}".format(status_train.error[current_epoch]),
' \t|\t ', "{0:.5f}".format(status_valid.loss[current_epoch]),
' \t|\t ', "{0:.5f}".format(status_valid.error[current_epoch]),
'\t',
sep='')
def iteration(nn: NeuralNetwork, status: Status):
increase_epoch, status.x_batch, status.y_batch = status.draw_batch()
status.soft_prob = nn.fprop(status.x_batch, status.is_train)
status.predict = uf.pick_class(status.soft_prob)
status.train_settings.loss_callback(status)
if status.is_train:
nn.bprop(status.y_batch)
nn.update(status.train_settings)
return increase_epoch
def cross_train(nn: NeuralNetwork, data_store_train: DataStore, data_store_valid: DataStore, train_settings: TrainSettings):
print('------------------ Start Training -----------------')
print('\tepoch\t|\ttrain loss\t|\ttrain error\t|\tvalid loss\t|\tvalid error\t')
status_train = Status(train_settings, data_store_train, True)
status_valid = Status(train_settings, data_store_valid, False)
while status_train.current_epoch < status_train.target_epoch:
if iteration(nn, status_train):
iteration(nn, status_valid)
print_log(status_train, status_valid)
if status_train.current_epoch % 3 == 2:
train_settings.plot_callback(status_train, status_valid)
status_train.current_epoch += 1
status_valid.current_epoch += 1
filename = train_settings.filename + '-' + train_settings.prefix + '-' + train_settings.infix + '-' + train_settings.suffix + '.dump'
full_path = os.path.realpath(__file__)
path, _ = os.path.split(full_path)
savepath = os.path.join(path, '../output/dump', filename)
metricpath = os.path.join(path, '../output/metric', filename)
with open(metricpath, 'wb+') as f:
pickle.dump([status_train, status_valid], f)
with open(savepath, 'wb+') as f:
pickle.dump(nn.dump(), f)
return
def inference(nn: NeuralNetwork, data_store: DataStore, settings: TrainSettings):
status = Status(settings, data_store, False)
iteration(nn, status)
return status.predict
def nlp_inference_sentence(nn: NeuralNetwork, head: list, vocab: dict):
inv_vocab = {v: k for k, v in vocab.items()}
if len(head) is not 3:
raise ValueError(f'the length of the sentence head should be 3, but is actually {len(head)}')
word_next = 'START'
while word_next != 'END' and len(head) <= 13:
id1 = vocab[head[-3]]
id2 = vocab[head[-2]]
id3 = vocab[head[-1]]
data_input = numpy.array([[id1, id2, id3]])
id_prob = nn.fprop(data_input)
id_next = uf.pick_class(id_prob)[0]
word_next = inv_vocab[id_next]
head.append(word_next)
return head
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,070 | gen-ko/ynn | refs/heads/master | /ynn/python/layers/pbp.py | import numpy
import math
from src import ytensor
import warnings
warnings.filterwarnings('error')
name_pool: dict = {}
class Layer(object):
def __init__(self, input_dimension, output_dimension, name: str = 'Base'):
self.id = name_pool.__sizeof__()
if name in name_pool:
name_num = name_pool.__sizeof__()
new_name = name + '_' + str(name_num)
name_num += 1
while new_name in name_pool:
new_name = name + '_' + str(name_num)
self.name = new_name
else:
self.name = name
name_pool[self.name] = self
self.input_dimension = input_dimension
self.output_dimension = output_dimension
def forward(self, *args):
raise ValueError('Calling a virtual function')
def backward(self, *args):
raise ValueError('Calling a virtual function')
def update(self, *args):
return
def dump(self):
return 0
def load(self, *args):
return
class Linear(Layer):
def __init__(self, input_dimension, output_dimension, name: str = 'Linear'):
Layer.__init__(self, input_dimension, output_dimension, name)
wi = math.sqrt(6.0) / math.sqrt(input_dimension + output_dimension + 0.0)
self.w = numpy.random.uniform(low=-wi, high=wi, size=(input_dimension, output_dimension)).astype(numpy.float32)
self.b = numpy.zeros((output_dimension,), dtype=numpy.float32)
self.delta_w = numpy.zeros(shape=self.w.shape, dtype=numpy.float32)
self.delta_b = numpy.zeros(shape=self.b.shape, dtype=numpy.float32)
self.d_w = numpy.zeros(self.w.shape, dtype=numpy.float32)
self.d_b = numpy.zeros(self.b.shape, dtype=numpy.float32)
def forward(self, x):
return numpy.dot(x, self.w) + self.b
def backward(self, d_top, h_top, h_bottom):
# slow, 5s
batch_size = d_top.shape[0]
self.d_w = numpy.tensordot(h_bottom, d_top, axes=(0, 0))
self.d_b = numpy.sum(d_top, axis=0)
# for i in range(batch_size):
# self.d_w += numpy.outer(h_in[i], d_a[i])
# self.d_b += d_a[i]
self.d_w /= batch_size
self.d_b /= batch_size
d_bottom = numpy.dot(d_top, self.w.T)
return d_bottom
def update(self, train_settings: TrainSettings):
regular = train_settings.l2 * 2.0
momentum = train_settings.momentum
learning_rate = train_settings.learning_rate
tmp = self.d_w + regular * self.w
self.delta_w = -learning_rate * tmp + momentum * self.delta_w
self.w += self.delta_w
tmp = self.d_b
self.delta_b = -learning_rate * tmp + momentum * self.delta_b
self.b += self.delta_b
self.d_w = numpy.zeros(self.w.shape)
self.d_b = numpy.zeros(self.b.shape)
return
def dump(self):
return [self.w, self.b]
def load(self, blob):
self.w = blob[0]
self.b = blob[1]
return
class ProbabilisticGaussianLinear(Layer):
def __init__(self, input_dimension, output_dimension, name: str = 'ProbabilisticGaussianLinear'):
Layer.__init__(self, input_dimension, output_dimension, name)
# lambda is a global prior to weights
self.alpha_0_gamma = 6.0
self.beta_0_gamma = 6.0
self.gamma = numpy.random.gamma(shape=6.0, scale=1 / 6.0, size=()).item()
self.lam = numpy.random.gamma(shape=6.0, scale=1 / 6.0, size=()).item()
self.scale = 1.0 / self.lam
# W : include bias V_l x (V_{l-1} + 1)
self.w_mean = numpy.zeros(shape=(input_dimension + 1, output_dimension), dtype=numpy.float32)
self.w_var = numpy.random.normal(loc=0.0, scale=self.lam, size=(input_dimension + 1, self.output_dimension))
self.delta_w_mean = numpy.zeros(shape=self.w_mean.shape, dtype=numpy.float32)
self.delta_w_var = numpy.zeros(shape=self.w_var.shape, dtype=numpy.float32)
self.d_w_mean = numpy.zeros(self.w_mean.shape, dtype=numpy.float32)
self.d_w_var = numpy.zeros(self.w_var.shape, dtype=numpy.float32)
return
@property
def w(self):
return numpy.random.normal(loc=self.w_mean, scale=self.w_var,
size=(self.input_dimension, self.output_dimension))
@property
def b(self):
return numpy.random.normal(loc=self.b_mean, scale=self.b_var, size=(self.output_dimension,))
def forward(self, x):
return numpy.dot(x, self.w) + self.b
def forward_proba(self, x_mean, x_var):
y_mean = numpy.dot(x_mean, self.w_mean) + self.b_mean / numpy.sqrt(float(self.input_dimension + 1))
y_var = numpy.dot(x_var, self.w_mean * self.w_mean) + self.w_var + self.b_var
return y_mean, y_var
def backward(self, d_top_mean, d_top_var, h_top_mean, h_top_var, h_bottom_mean, h_bottom_var):
self.d_w_mean = self.w_var + self.d_z_w_mean
self.d_w_var = - self.w_var * self.w_var * (self.d_z_w_mean * self.d_z_w_mean - 2 * self.d_z_w_var)
self.d_b_mean = self.b_var + self.d_z_b_mean
self.d_b_var = - self.z_var * self.z_var * (self.d_z_b_mean * self.d_z_b_mean - 2 * self.d_z_b_var)
return
def update(self, train_settings: TrainSettings):
regular = train_settings.l2 * 2.0
momentum = train_settings.momentum
learning_rate = train_settings.learning_rate
tmp = self.d_w_mean + regular * self.w_mean
self.delta_w_mean = -learning_rate * tmp + momentum * self.delta_w_mean
self.w_mean += self.delta_w_mean
tmp = self.d_w_var + regular * self.w_var
self.delta_w_var = -learning_rate * tmp + momentum * self.delta_w_var
self.w_var += self.delta_w_var
tmp = self.d_b_mean
self.delta_b_mean = -learning_rate * tmp + momentum * self.delta_b_mean
self.b_mean += self.delta_b_mean
tmp = self.d_b_var
self.delta_b_var = -learning_rate * tmp + momentum * self.delta_b_var
self.b_var += self.delta_b_var
self.d_w_mean = numpy.zeros(self.w_mean.shape)
self.d_w_var = numpy.zeros(self.w_var.shape)
self.d_b_mean = numpy.zeros(self.b_mean.shape)
self.d_b_var = numpy.zeros(self.b_var.shape)
return
# vanilla RNN
class Recursive(Layer):
def __init__(self, input_dimension, output_dimension, name: str = 'Recursive'):
Layer.__init__(self, input_dimension, output_dimension, name)
wi = math.sqrt(6.0) / math.sqrt(input_dimension + output_dimension + 0.0)
self.wxh = numpy.random.uniform(low=-wi, high=wi, size=(input_dimension, output_dimension)).astype(
numpy.float32)
self.whh = numpy.random.uniform(low=-wi, high=wi, size=(output_dimension, output_dimension)).astype(
numpy.float32)
self.b = numpy.zeros((output_dimension,), dtype=numpy.float32)
self.delta_wxh = numpy.zeros(shape=self.wxh.shape, dtype=numpy.float32)
self.delta_whh = numpy.zeros(shape=self.whh.shape, dtype=numpy.float32)
self.delta_b = numpy.zeros(shape=self.b.shape, dtype=numpy.float32)
self.d_wxh = numpy.zeros(self.wxh.shape, dtype=numpy.float32)
self.d_whh = numpy.zeros(self.whh.shape, dtype=numpy.float32)
self.d_b = numpy.zeros(self.b.shape, dtype=numpy.float32)
self.default_d_top: numpy.ndarray = None
self.default_d_state_top: numpy.ndarray = None
def forward(self, h_bottom, state_in):
t1 = numpy.dot(h_bottom, self.wxh)
t2 = numpy.dot(state_in, self.whh)
h_top = t1 + t2 + self.b
return h_top
def backward(self, d_top, h_top, h_bottom, state_bottom):
batch_size = d_top.shape[0]
self.d_wxh += numpy.tensordot(h_bottom, d_top, axes=(0, 0)) / batch_size
self.d_whh += numpy.tensordot(state_bottom, d_top, axes=(0, 0)) / batch_size
self.d_b += numpy.sum(d_top, axis=0) / batch_size
d_bottom = numpy.dot(d_top, self.wxh.T)
d_state = numpy.dot(d_top, self.whh.T)
return d_bottom, d_state
def update(self, train_settings: TrainSettings):
regular = train_settings.l2 * 2.0
momentum = train_settings.momentum
learning_rate = train_settings.learning_rate
tmp = self.d_wxh + regular * self.wxh
self.delta_wxh = -learning_rate * tmp + momentum * self.delta_wxh
self.wxh += self.delta_wxh
tmp = self.d_whh + regular * self.whh
self.delta_whh = -learning_rate * tmp + momentum * self.delta_whh
self.whh += self.delta_whh
tmp = self.d_b
self.delta_b = -learning_rate * tmp + momentum * self.delta_b
self.b += self.delta_b
self.d_wxh = numpy.zeros(self.wxh.shape)
self.d_whh = numpy.zeros(self.whh.shape)
self.d_b = numpy.zeros(self.b.shape)
return
def dump(self):
return [self.wxh, self.whh, self.b]
def load(self, blob):
self.wxh = blob[0]
self.whh = blob[1]
self.b = blob[2]
return
class RBM(Layer):
def __init__(self, input_dimension, output_dimension):
Layer.__init__(self, input_dimension, output_dimension)
b = math.sqrt(6.0) / math.sqrt(input_dimension + output_dimension + 0.0)
self.w = numpy.random.uniform(low=-b, high=b, size=(output_dimension, input_dimension))
self.h_bias = numpy.zeros(shape=(output_dimension, 1), dtype=numpy.float64)
self.x_bias = numpy.zeros(shape=(input_dimension, 1), dtype=numpy.float64)
def forward(self, x):
tmp = numpy.dot(self.w, x)
tmp += self.h_bias
# tmp = numpy.clip(tmp, -500.0, 500.0)
# tmp = numpy.exp(-tmp) + 1
# tmp = numpy.reciprocal(tmp)
tmp = ytensor.sigmoid(tmp)
return tmp
def sample_h_given_x(self, x):
h_mean = self.forward(x)
h_sample = numpy.random.binomial(n=1, p=h_mean, size=h_mean.shape)
return h_sample
def sample_x_given_h(self, h):
x_mean = self.backward(h)
x_sample = numpy.random.binomial(n=1, p=x_mean, size=x_mean.shape)
return x_sample
def gibbs_xhx(self, x):
h_sample = self.sample_h_given_x(x)
x_sample = self.sample_x_given_h(h_sample)
return x_sample
def gibbs_hxh(self, h):
x_sample = self.sample_x_given_h(h)
h_sample = self.sample_h_given_x(x_sample)
return h_sample
def backward(self, h):
tmp = numpy.dot(self.w.T, h)
tmp += self.x_bias
# tmp = numpy.clip(tmp, -500.0, 500.0)
# tmp = numpy.exp(-tmp) + 1
# tmp = numpy.reciprocal(tmp)
tmp = ytensor.sigmoid(tmp)
return tmp
def update(self, delta_w, delta_h_bias, delta_x_bias, learning_rate):
self.w += learning_rate * delta_w
self.h_bias += learning_rate * delta_h_bias
self.x_bias += learning_rate * delta_x_bias
class AutoEncoder(Layer):
def __init__(self, input_dimension, output_dimension):
Layer.__init__(self, input_dimension, output_dimension)
# b = math.sqrt(6.0) / math.sqrt(input_dimension + output_dimension + 0.0)
# self.w = numpy.random.uniform(low=-b, high=b, size=(output_dimension, input_dimension))
self.w = numpy.random.normal(0, 0.1, (output_dimension, input_dimension))
self.h_bias = numpy.zeros(shape=(output_dimension, 1), dtype=numpy.float64)
self.x_bias = numpy.zeros(shape=(input_dimension, 1), dtype=numpy.float64)
def forward(self, x):
tmp = numpy.dot(self.w, x)
tmp += self.h_bias
tmp = numpy.clip(tmp, -500.0, 500.0)
tmp = numpy.exp(-tmp) + 1
tmp = numpy.reciprocal(tmp)
return tmp
def backward(self, h):
tmp = numpy.dot(self.w.T, h)
tmp += self.x_bias
tmp = numpy.clip(tmp, -500.0, 500.0)
tmp = numpy.exp(-tmp) + 1
tmp = numpy.reciprocal(tmp)
return tmp
def update(self, delta_w, delta_h_bias, delta_x_bias, learning_rate):
self.w += learning_rate * delta_w
self.h_bias += learning_rate * delta_h_bias
self.x_bias += learning_rate * delta_x_bias
class Dropout(Layer):
def __init__(self, input_dimension, output_dimension, drop_rate=0.1):
Layer.__init__(self, input_dimension, output_dimension)
assert input_dimension == output_dimension, 'input and output dimension is not equal'
self.drop_rate = drop_rate
def forward(self, x):
y = numpy.array(x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
if numpy.random.uniform(0, 1, ) < self.drop_rate:
y[i, j] = 0
return y
def backward(self, *args):
return args
class Nonlinear(Layer):
def __init__(self, dimension, name: str = 'Nonlinear'):
Layer.__init__(self, dimension, dimension, name)
return
def activation(self, a):
raise ValueError('Calling a virtual function')
def derivative(self, h):
raise ValueError('Calling a virtual function')
def backward(self, d_h_out, h_out, h_in):
deri = self.derivative(h_out)
d_h_in = numpy.multiply(d_h_out, deri)
return d_h_in
def forward(self, x):
return self.activation(x)
def update(self, train_settings: TrainSettings):
return
class Sigmoid(Nonlinear):
def activation(self, x: numpy.ndarray):
tmp = numpy.clip(x, -500.0, 500.0)
tmp = numpy.exp(-tmp) + 1
tmp = numpy.reciprocal(tmp)
return tmp
def derivative(self, h_out: numpy.ndarray):
tmp = 1.0 - h_out
tmp = numpy.multiply(tmp, h_out)
return tmp
class ReLU(Nonlinear):
def activation(self, x: numpy.ndarray):
tmp = numpy.maximum(0.0, x)
return tmp
def derivative(self, h_out: numpy.ndarray):
tmp = h_out > 0
tmp = tmp.astype(numpy.float64)
return tmp
class Tanh(Nonlinear):
def activation(self, x: numpy.ndarray):
tmp = numpy.tanh(x)
return tmp
def derivative(self, h_out: numpy.ndarray):
tmp = numpy.power(h_out, 2)
tmp = 1 - tmp
return tmp
# modified to conform to the new input layout, (batch_size, dimension)
class Softmax(Layer):
def __init__(self, dimension, name: str = 'Softmax'):
Layer.__init__(self, dimension, dimension, name)
return
def forward(self, x):
tmp = numpy.clip(x, -100, 100)
tmp = numpy.exp(tmp)
tmp2 = numpy.sum(tmp, axis=1)
tmp = tmp.T
tmp /= tmp2
return tmp.T
def backward(self, y, h_out, h_in):
batch_size = h_out.shape[0]
for i in range(batch_size):
h_out[i, y[i]] -= 1.0
return h_out
def update(self, *args):
return
class BN(Layer):
def __init__(self, in_dim, out_dim):
Layer.__init__(self, in_dim, out_dim)
self.eps = 1e-5
b = math.sqrt(6.0) / math.sqrt(in_dim + out_dim + 0.0)
self.gamma = 1.0
self.beta = 0.0
self.delta_gamma = 0.0
self.delta_b = 0.0
self.g_g = 0.0
self.g_b = 0.0
self.x_hat: numpy.ndarray
self.ivar = 0.0
def forward(self, x):
D, N = x.shape
tmp_x = x.T
mu = numpy.mean(tmp_x, axis=0)
xmu = tmp_x - mu
sq = xmu ** 2
var = 1. / N * numpy.sum(sq, axis=0)
sqrtvar = numpy.sqrt(var + self.eps)
self.ivar = 1. / sqrtvar
self.x_hat = xmu * self.ivar
tmp = self.gamma * self.x_hat
out = tmp + self.beta
return out.T
def backward(self, g_h, h_out, h_in):
# get the dimensions of the input/output
D, N = g_h.shape
dout = g_h.T
x_hat = self.x_hat
inv_var = self.ivar
dxhat = dout * self.gamma
tmp1 = (1. / N) * self.ivar
tmp2 = (N * dxhat - numpy.sum(dxhat, axis=0))
tmp3 = (x_hat * numpy.sum(dxhat * x_hat, axis=0))
dx = tmp1 * (tmp2 - tmp3)
self.g_b = numpy.sum(dout, axis=0)
self.g_g = numpy.sum(numpy.multiply(x_hat, dout), axis=0)
return dx.T
def update(self, learning_rate, regular, momentum):
tmp = self.g_g + regular * 2.0 * self.gamma
self.delta_gamma = -learning_rate * tmp + momentum * self.delta_gamma
self.gamma += self.delta_gamma
tmp = self.g_b
self.delta_b = -learning_rate * tmp + momentum * self.delta_b
self.beta += self.delta_b
self.g_g = 0.0
self.g_b = 0.0
class Embedding(Layer):
def __init__(self, input_dimension, output_dimension, name: str = 'Embedding'):
Layer.__init__(self, input_dimension, output_dimension, name)
# b = math.sqrt(6.0) / math.sqrt(input_dimension + output_dimension + 0.0)
self.d_w = None
# self.d_w_last = numpy.zeros(shape=(1,),dtype=numpy.float32)
self.d_w_index = None
# self.d_w_index_last = numpy.zeros(shape=(1,),dtype=int)
# self.w = numpy.random.uniform(low=-b, high=b, size=(input_dimension, output_dimension))
# self.w = numpy.array(self.w, dtype=numpy.float32)
self.w = numpy.random.normal(0.0, 1.0, size=(input_dimension, output_dimension)).astype(numpy.float32)
def forward(self, x):
return self.w[x]
def backward(self, d_h, h_out, h_in):
batch_size = h_out.shape[0]
try:
self.d_w = numpy.append(self.d_w, d_h / batch_size, axis=0)
self.d_w_index = numpy.append(self.d_w_index, h_in, axis=0)
except:
self.d_w = d_h / batch_size
self.d_w_index = h_in
return
def update(self, train_settings):
# batch_size = self.d_w_index.size
# self.w[self.d_w_index_last] -= train_settings.momentum * self.d_w
self.w[self.d_w_index] -= train_settings.learning_rate * self.d_w
self.d_w_index = None
self.d_w = None
return
def dump(self):
return [self.w]
def load(self, blob):
self.w = blob[0]
return
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,071 | gen-ko/ynn | refs/heads/master | /src/util_functions.py | import numpy
from scipy.stats import entropy
def cross_entropy_loss(predict_prob: numpy.ndarray, y_label: numpy.ndarray, take_average: bool = False) -> float:
loss = 0.0
batch_size = predict_prob.shape[0]
for i in range(y_label.size):
try:
loss += -numpy.log(predict_prob[i, y_label[i]])
except:
raise ValueError('Cross Entropy Loss Overflowing')
if take_average:
loss /= batch_size
return loss
def pick_class(softprob: numpy.ndarray) -> numpy.ndarray:
classes = numpy.argmax(softprob, axis=1)
return classes
def shuffle(x, y) -> (numpy.ndarray, numpy.ndarray):
shuffle_idx = numpy.arange(y.size)
numpy.random.shuffle(shuffle_idx)
x = x[shuffle_idx]
y = y[shuffle_idx]
return x, y
def predict_score(predict_label: numpy.ndarray, y_label: numpy.ndarray, take_average: bool = False) -> float:
tmp: float = numpy.sum(predict_label == y_label).astype(float)
if take_average:
tmp /= y_label.size
return tmp
'''
def perplexity(prob_distribution: numpy.ndarray) -> float:
tmp = entropy(prob_distribution.T)
tmp = numpy.power(2, tmp)
return numpy.sum(tmp)
'''
def perplexity(predict_prob: numpy.ndarray, y_label: numpy.ndarray, take_average: bool = False) -> float:
loss = 0.0
batch_size = predict_prob.shape[0]
for i in range(y_label.size):
try:
loss += numpy.exp2(-numpy.log(predict_prob[i, y_label[i]]))
except:
raise ValueError('Perplexity Overflowing')
if take_average:
loss /= batch_size
return loss
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,072 | gen-ko/ynn | refs/heads/master | /script/script_2_3_sample.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import plot_data
from src import layer
import src.rbm as rbm
import matplotlib.pyplot as plt
import numpy
import os
import pickle
# format of data
# disitstrain.txt contains 3000 lines, each line 785 numbers, comma delimited
print('start initializing...')
full_path = os.path.realpath(__file__)
path, _ = os.path.split(full_path)
data_filepath = '../output/dump'
filepath = os.path.join(path, data_filepath, 'script-2-2-k=5-autostop-rbm-whx-2359.dump')
# w (100, 784)
# h_bias (100, 1)
# x_bias (784, 1)
with open(filepath, 'rb') as f:
w, h_bias, x_bias = pickle.load(f)
numpy.random.seed(1099)
myRBM = rbm.RBM(28*28, 100)
myRBM.layer.w = w
myRBM.layer.h_bias = h_bias
myRBM.layer.x_bias = x_bias
x = myRBM.generate_sample(sample_num=100, sampler_step=1000)
filename_prefix = 'rbm-generated-'
filename_suffix = 'visualize-2359'
filename_extension = '.png'
filename = filename_prefix + filename_suffix + filename_extension
figure_path = os.path.join(path, '../output/generate', filename)
ncol = 10
nrow = int(x.shape[1] / ncol)
plt.figure(3)
plt.axis('off')
plt.subplots_adjust(wspace=0.01, hspace=0.01)
for i in range(x.shape[1]):
ax = plt.subplot(nrow, ncol, i + 1)
ax.imshow(x[:, i].reshape(28,28))
ax.axis('off')
plt.savefig(os.path.join(path, figure_path))
plt.close(3) | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,073 | gen-ko/ynn | refs/heads/master | /script/script_2_7_hidden_num.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import layer
import src.rbm as rbm
import src.autoencoder as autoencoder
import matplotlib.pyplot as plt
import numpy
import os
# format of data
# disitstrain.txt contains 3000 lines, each line 785 numbers, comma delimited
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
data_filepath = '../data'
data_train_filename = 'digitstrain.txt'
data_valid_filename = 'digitsvalid.txt'
data_test_filename = 'digitstest.txt'
data_train_filepath = os.path.join(path, data_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, data_filepath, data_valid_filename)
data_test_filepath = os.path.join(path, data_filepath, data_test_filename)
print('start initializing...')
#rbm.init_rbm(random_seed=1099)
#numpy.random.RandomState(seed=1099)
numpy.random.seed(1099)
x_train, y_train = load_data.load_from_path(data_train_filepath)
x_valid, y_valid = load_data.load_from_path(data_valid_filepath)
myRBM = rbm.RBM(28*28, hidden_units=50)
myRBM.set_visualize(28, 28, stride=20)
myRBM.set_plot(stride=20)
myRBM.set_autostop(window=60, stride=40)
myRBM.train(x_train, x_valid, k=5, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-7-RBM-H50')
myRBM = rbm.RBM(28*28, hidden_units=100)
myRBM.set_visualize(28, 28, stride=20)
myRBM.set_plot(stride=20)
myRBM.set_autostop(window=60, stride=40)
myRBM.train(x_train, x_valid, k=10, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-7-RBM-H100')
myRBM = rbm.RBM(28*28, hidden_units=200)
myRBM.set_visualize(28, 28, stride=20)
myRBM.set_plot(stride=20)
myRBM.set_autostop(window=60, stride=40)
myRBM.train(x_train, x_valid, k=20, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-7-RBM-H200')
myRBM = rbm.RBM(28*28, hidden_units=500)
myRBM.set_visualize(28, 28, stride=20)
myRBM.set_plot(stride=20)
myRBM.set_autostop(window=60, stride=40)
myRBM.train(x_train, x_valid, k=20, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-7-RBM-H500')
myAE= autoencoder.AutoEncoder(28*28, hidden_units=50)
myAE.set_visualize(28, 28, stride=20)
myAE.set_plot(stride=20)
myAE.set_autostop(window=60, stride=40)
myAE.train(x_train, x_valid, k=5, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-7-AE-H50')
myAE= autoencoder.AutoEncoder(28*28, hidden_units=100)
myAE.set_visualize(28, 28, stride=20)
myAE.set_plot(stride=20)
myAE.set_autostop(window=60, stride=40)
myAE.train(x_train, x_valid, k=10, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-7-AE-H100')
myAE= autoencoder.AutoEncoder(28*28, hidden_units=200)
myAE.set_visualize(28, 28, stride=20)
myAE.set_plot(stride=20)
myAE.set_autostop(window=60, stride=40)
myAE.train(x_train, x_valid, k=20, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-7-AE-H200')
myAE = autoencoder.AutoEncoder(28*28, hidden_units=500)
myAE.set_visualize(28, 28, stride=20)
myAE.set_plot(stride=20)
myAE.set_autostop(window=60, stride=40)
myAE.train(x_train, x_valid, k=20, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-7-AE-H500')
myAE= autoencoder.AutoEncoder(28*28, hidden_units=50)
myAE.set_visualize(28, 28, stride=20)
myAE.set_plot(stride=20)
myAE.set_autostop(window=60, stride=40)
myAE.train(x_train, x_valid, k=5, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-7-DAE-H50',
dropout=True, dropout_rate=0.1)
myAE= autoencoder.AutoEncoder(28*28, hidden_units=100)
myAE.set_visualize(28, 28, stride=20)
myAE.set_plot(stride=20)
myAE.set_autostop(window=60, stride=40)
myAE.train(x_train, x_valid, k=10, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-7-DAE-H100',
dropout=True, dropout_rate=0.1)
myAE= autoencoder.AutoEncoder(28*28, hidden_units=200)
myAE.set_visualize(28, 28, stride=20)
myAE.set_plot(stride=20)
myAE.set_autostop(window=60, stride=40)
myAE.train(x_train, x_valid, k=20, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-7-DAE-H200',
dropout=True, dropout_rate=0.1)
myAE = autoencoder.AutoEncoder(28*28, hidden_units=500)
myAE.set_visualize(28, 28, stride=20)
myAE.set_plot(stride=20)
myAE.set_autostop(window=60, stride=40)
myAE.train(x_train, x_valid, k=20, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-7-DAE-H500',
dropout=True, dropout_rate=0.1)
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,074 | gen-ko/ynn | refs/heads/master | /src/rbm.py | import numpy
import pickle
from src import layer
import os
from time import gmtime, strftime
import matplotlib.pyplot as plt
# initialize the RBM model constructor before using
def init_rbm(random_seed=1099):
# set a random seed to ensure the consistence between different runs
numpy.random.seed(1099)
return
class RBM(object):
def __init__(self, input_units, hidden_units):
self.layer = layer.RBM(input_dimension=input_units, output_dimension=hidden_units)
self.is_autostop_enabled = False
self.is_plot_enabled = False
self.is_visualize_enabled = False
self.is_dump_enabled = False
def gibbs_sampling(self, x_t_sample):
# x shape (sample_dimension, 1)
h_t_prob = self.layer.forward(x_t_sample)
h_t_sample = self.sample_h(h_t_prob)
h_neg_prob = numpy.array(h_t_prob)
h_neg_sample = numpy.array(h_t_sample)
for ki in range(0, self.k, 1):
x_neg_prob = self.layer.backward(h_neg_sample)
x_neg_sample = self.sample_x(x_neg_prob)
h_neg_prob = self.layer.forward(x_neg_sample)
h_neg_sample = self.sample_h(h_neg_prob)
return [x_neg_prob, x_neg_sample, h_t_prob, h_neg_prob, h_neg_sample]
def reconstruct(self, x_t_sample):
# x shape (sample_dimension, 1)
h_t_prob = self.layer.forward(x_t_sample)
x_reconstruct = self.layer.backward(h_t_prob)
return x_reconstruct
@staticmethod
def sample_h(h_p):
h_sample = numpy.random.binomial(n=1, p=h_p, size=h_p.shape)
return h_sample
@staticmethod
def sample_x(x_p):
x_sample = numpy.random.binomial(n=1, p=x_p, size=x_p.shape)
return x_sample
def update(self, x_t, x_neg_prob, h_t_prob, h_neg_prob):
delta_w = numpy.zeros(shape=self.layer.w.shape, dtype=numpy.float64)
delta_h_bias = numpy.zeros(shape=self.layer.h_bias.shape)
delta_x_bias = numpy.zeros(shape=self.layer.x_bias.shape)
batch_size = x_t.shape[1]
for i in range(0, batch_size, 1):
delta_w += numpy.outer(h_t_prob[:, i], x_t[:, i]) - numpy.outer(h_neg_prob[:, i], x_neg_prob[:, i])
tmp1 = h_t_prob[:, i]
tmp2 = h_neg_prob[:, i]
delta_h_bias += (tmp1 - tmp2).reshape(delta_h_bias.shape[0], delta_h_bias.shape[1])
delta_x_bias += (x_t[:, i] - x_neg_prob[:, i]).reshape(delta_x_bias.shape)
delta_w /= batch_size
delta_h_bias /= batch_size
delta_x_bias /= batch_size
self.layer.update(delta_w=delta_w,
delta_h_bias=delta_h_bias,
delta_x_bias=delta_x_bias,
learning_rate=self.learning_rate)
def shuffle(self, x):
shuffle_idx = numpy.arange(x.shape[0])
numpy.random.shuffle(shuffle_idx)
x = x[shuffle_idx]
return x
def set_visualize(self, dim1, dim2, stride=20, enabled=False):
self.visualize_x_dim = dim1
self.visualize_y_dim = dim2
self.is_visualize_enabled = enabled
self.visualize_stride = stride
def set_plot(self, stride=20, enabled=False):
self.is_plot_enabled = enabled
self.plot_stride = stride
def set_dump(self, stride=20, enabled=False):
self.is_dump_enabled = enabled
self.dump_stride = stride
def set_autostop(self, window=20, stride=20):
self.is_autostop_enabled = True
self.stop_stride = stride
self.stop_window = window
def convergence_detection(self, current_epoch):
if current_epoch > 2 * self.stop_window:
loss_last = numpy.mean(self.plot_loss_valid[current_epoch-self.stop_window+1:current_epoch+1])
loss_recent = numpy.mean(self.plot_loss_valid[current_epoch-2*self.stop_window+1:current_epoch-self.stop_window+1])
if loss_last > loss_recent:
return True
return False
def train(self, x_train, x_valid, k=1, epoch=200, learning_rate=0.01, batch_size=128, dump=False, plotfile='time'):
self._dump = dump
print('------------------ Start Training -----------------')
print('------------- using cross-entropy loss ------------')
self.learning_rate = learning_rate
self.batch_size = batch_size
self.plot_loss_train = numpy.zeros((epoch, ), dtype=numpy.float64)
self.plot_loss_valid = numpy.zeros((epoch, ), dtype=numpy.float64)
self.k = k
for j in range(epoch):
x_train = self.shuffle(x_train)
loss_train = 0.0
if j % 20 == 0:
print('|\tepoch\t|\ttrain loss\t|\tvalid loss\t|')
for i in range(0, x_train.shape[0], batch_size):
x_batch = x_train[i: i + batch_size]
x_batch = x_batch.T
x_t_sample = x_batch
x_neg_prob, x_neg_sample, h_t_prob, h_neg_prob, h_neg_sample = self.gibbs_sampling(x_t_sample=x_t_sample)
x_reconstruct = self.reconstruct(x_t_sample)
loss_train += self.cross_entropy_loss(x_t_sample, x_reconstruct, False)
self.update(x_t_sample, x_neg_sample, h_t_prob, h_neg_prob)
x_valid_sample = x_valid.T
x_valid_neg_prob = self.reconstruct(x_valid_sample)
loss_train /= x_train.shape[0]
loss_valid = self.cross_entropy_loss(x_valid_sample, x_valid_neg_prob, False)
loss_valid /= x_valid.shape[0]
print('\t', j, '\t', sep='', end=' ')
print('\t|\t ', "{0:.5f}".format(loss_train),
' \t|\t ', "{0:.5f}".format(loss_valid),
'\t',
sep='')
self.plot_loss_train[j] = loss_train
self.plot_loss_valid[j] = loss_valid
if self.is_visualize_enabled:
if j % self.visualize_stride == self.visualize_stride - 1:
self.visualize(current_epoch=j, plotfile=plotfile)
if self.is_plot_enabled:
if j % self.plot_stride == self.plot_stride - 1:
self.plot(current_epoch=j, plotfile=plotfile)
if self.is_dump_enabled:
if j % self.dump_stride == self.dump_stride - 1:
self.dump(current_epoch=j, plotfile=plotfile)
if self.is_autostop_enabled:
if j % self.stop_stride == self.stop_stride - 1:
if self.convergence_detection(j):
self.autostop(current_epoch=j, plotfile=plotfile)
print('---- Earling stopping now ----')
return
self.autostop(current_epoch=3000, plotfile=plotfile)
return
def autostop(self, current_epoch, plotfile):
plotfile += '-autostop'
self.visualize(current_epoch=current_epoch, plotfile=plotfile)
self.plot(current_epoch=current_epoch, plotfile=plotfile)
self.dump(current_epoch=current_epoch, plotfile=plotfile)
return
def generate_sample(self, sample_num=1, sampler_step=1000):
self.k = sampler_step
# generate an input
x_input = numpy.random.binomial(n=1, p=0.5, size=(self.layer.x_bias.shape[0], sample_num))
output_prob, output, _, _, _ = self.gibbs_sampling(x_input)
return output_prob
def dump(self, current_epoch, plotfile):
# fix the absolute path of file
full_path = os.path.realpath(__file__)
path, _ = os.path.split(full_path)
filename_prefix = 'rbm-'
filename_infix = strftime("%y%m%d%H%M%S", gmtime()) + '-'
filename_suffix = 'whx-'
filename_epoch = str(current_epoch)
filename_extension = '.dump'
if plotfile == 'time':
filename = filename_prefix + filename_infix + filename_suffix + filename_epoch + filename_extension
else:
filename = plotfile + '-' + filename_prefix + filename_suffix + filename_epoch + filename_extension
file_path = os.path.join(path, '../output/dump', filename)
with open(file_path, 'wb') as f:
pickle.dump([self.layer.w, self.layer.h_bias, self.layer.x_bias], f)
return
def cross_entropy_loss(self, x_sample, x_reconstruct, take_average=True):
loss = 0.0
# the loss between input x and the reconstruction of x
# x may be a batch (sample_dimension, batch_size)
try:
tmp = -numpy.multiply(x_sample, numpy.log(x_reconstruct)) -numpy.multiply((1 - x_sample),
numpy.log(1 - x_reconstruct))
if take_average:
loss = sum(sum(tmp)) / x_sample.shape[1]
else:
loss = sum(sum(tmp))
except:
print('error, loss not computable')
return loss
def visualize(self, current_epoch, plotfile):
# fix the absolute path of file
full_path = os.path.realpath(__file__)
path, _ = os.path.split(full_path)
filename_prefix = 'rbm-'
filename_infix = strftime("%y%m%d%H%M%S", gmtime()) + '-'
filename_suffix = 'visualize-'
filename_epoch = str(current_epoch)
filename_extension = '.png'
if plotfile == 'time':
filename = filename_prefix + filename_infix + filename_suffix + filename_epoch + filename_extension
else:
filename = plotfile + '-' + filename_prefix + filename_suffix + filename_epoch + filename_extension
figure_path = os.path.join(path, '../output/visualize', filename)
ncol = 10
nrow = int(self.layer.w.shape[0] / ncol)
plt.figure(2)
plt.axis('off')
plt.subplots_adjust(wspace=0.01, hspace=0.01)
for i in range(self.layer.w.shape[0]):
ax = plt.subplot(nrow, ncol, i + 1)
ax.imshow(self.layer.w[i, :].reshape([self.visualize_x_dim, self.visualize_y_dim]))
ax.axis('off')
plt.savefig(os.path.join(path, figure_path))
plt.close(2)
return
def plot(self, current_epoch, plotfile):
full_path = os.path.realpath(__file__)
path, _ = os.path.split(full_path)
filename_prefix = 'rbm-'
filename_infix = strftime("%y%m%d%H%M%S", gmtime()) + '-'
filename_suffix = 'cost'
filename_extension = '.png'
if plotfile == 'time':
filename = filename_prefix + filename_infix + filename_suffix + filename_extension
else:
filename = plotfile + '-' + filename_prefix + filename_suffix + filename_extension
titletext = f'lr={self.learning_rate}, ' \
f'k={self.k},hidden units={self.layer._output_dimension},batch={self.batch_size}'
plt.figure(1)
line_1, = plt.plot(self.plot_loss_train[0:current_epoch], label='train loss')
line_2, = plt.plot(self.plot_loss_valid[0:current_epoch], label='valid loss')
plt.legend(handles=[line_1, line_2])
plt.xlabel('epoch')
plt.ylabel('cross-entropy cost')
plt.title(titletext)
plt.savefig(os.path.join(path, '../output/plot-loss', filename))
plt.close(1)
return
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,075 | gen-ko/ynn | refs/heads/master | /script/script_3_8_rnn_embedding_size.py | # required python version: 3.6+
import src.nlp as nlp
import numpy
import os
import pickle
from src import callback as cb
from src import train as utf
from src.util.status import DataStore
from src.util.status import TrainSettings
# resolve file path
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
dump_filepath = '../output/dump'
data_train_filename = 'train.dump'
data_valid_filename = 'valid.dump'
data_train_filepath = os.path.join(path, dump_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, dump_filepath, data_valid_filename)
# load preprocessed data
with open(data_train_filepath, 'rb+') as f:
data_train = pickle.load(f)
with open(data_valid_filepath, 'rb+') as f:
data_valid = pickle.load(f)
x_train = data_train[:, 0:3]
y_train = data_train[:, 3]
x_valid = data_valid[:, 0:3]
y_valid = data_valid[:, 3]
# set the random seed
numpy.random.seed(1099)
data_store_train = DataStore(x_train, y_train)
data_store_valid = DataStore(x_valid, y_valid)
#############################
train_settings = TrainSettings(learning_rate=0.01, batch_size=16, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-3-8', epoch=100, prefix='e16')
# build the neural network
mynlp = nlp.NlpL3TypeR(dict_size=8000, embedding_size=16, hidden_units=128)
utf.cross_train(mynlp, data_store_train, data_store_valid, train_settings)
##############################
train_settings = TrainSettings(learning_rate=0.01, batch_size=16, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-3-8', epoch=100, prefix='e32')
# build the neural network
mynlp = nlp.NlpL3TypeR(dict_size=8000, embedding_size=32, hidden_units=128)
utf.cross_train(mynlp, data_store_train, data_store_valid, train_settings)
############################
train_settings = TrainSettings(learning_rate=0.01, batch_size=16, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-3-8', epoch=100, prefix='e64')
# build the neural network
mynlp = nlp.NlpL3TypeR(dict_size=8000, embedding_size=64, hidden_units=128)
utf.cross_train(mynlp, data_store_train, data_store_valid, train_settings)
#############################
train_settings = TrainSettings(learning_rate=0.01, batch_size=16, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-3-8', epoch=100, prefix='e128')
# build the neural network
mynlp = nlp.NlpL3TypeR(dict_size=8000, embedding_size=128, hidden_units=128)
utf.cross_train(mynlp, data_store_train, data_store_valid, train_settings)
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,076 | gen-ko/ynn | refs/heads/master | /script/script_3_4_inference.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import layer
import src.nlp as nlp
import matplotlib.pyplot as plt
import numpy
import os
import pickle
from src import util as uf
from src import callback as cb
from src import train as utf
# resolve file path
path, _ = os.path.split(os.path.realpath(__file__))
dictionary_filename = os.path.join(path,'../output/dump', 'dictionary.dump')
# load vocabulary
with open(dictionary_filename, 'rb') as f:
d = pickle.load(f)
nn_filename = os.path.join(path, '../output/dump', 'script-3-2-h128-i-s.dump')
nn = nlp.NlpL3TypeA(8000, 16, 128)
with open(nn_filename, 'rb') as f:
nn.load(pickle.load(f))
sentence = utf.nlp_inference_sentence(nn, ['government', 'of', 'united'], d)
print(sentence)
sentence = utf.nlp_inference_sentence(nn, ['city', 'of', 'new'], d)
print(sentence)
sentence = utf.nlp_inference_sentence(nn, ['life', 'in', 'the'], d)
print(sentence)
sentence = utf.nlp_inference_sentence(nn, ['he', 'is', 'the'], d)
print(sentence)
sentence = utf.nlp_inference_sentence(nn, ['there', 'are', 'millions'], d)
print(sentence)
sentence = utf.nlp_inference_sentence(nn, ['it', 'is', 'not'], d)
print(sentence)
sentence = utf.nlp_inference_sentence(nn, ['in', 'the', 'next'], d)
print(sentence) | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,077 | gen-ko/ynn | refs/heads/master | /script/script_1_5_two_hidden.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import plot_data
from src import layer
from src.network import NeuralNetwork_Dumpable as NN
import src.network as network
import matplotlib.pyplot as plt
import numpy
import os
# format of data
# disitstrain.txt contains 3000 lines, each line 785 numbers, comma delimited
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
data_filepath = '../data'
data_train_filename = 'digitstrain.txt'
data_valid_filename = 'digitsvalid.txt'
data_test_filename = 'digitstest.txt'
data_train_filepath = os.path.join(path, data_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, data_filepath, data_valid_filename)
data_test_filepath = os.path.join(path, data_filepath, data_test_filename)
print('start initializing...')
network.init_nn(random_seed=1099)
learning_rates = [0.02]
momentums = [0.9]
regularizers = [0.00001]
x_train, y_train = load_data.load_from_path(data_train_filepath)
x_valid, y_valid = load_data.load_from_path(data_valid_filepath)
for i2 in range(len(regularizers)):
for i3 in range(len(momentums)):
for i4 in range(len(learning_rates)):
layers = [layer.Linear(784, 100),
layer.BN(100, 100),
layer.Sigmoid(100, 100),
layer.Linear(100, 100),
layer.BN(100, 100),
layer.Sigmoid(100, 100),
layer.SoftmaxLayer(100, 10)]
name = 'network2' + '-' + str(i2) + '-' + str(i3) + '-' + str(i4) + '.dump'
myNN = NN(layers, learning_rate=learning_rates[i4], regularizer=regularizers[i2], momentum=momentums[i3])
myNN.train(x_train, y_train, x_valid, y_valid, epoch=300, batch_size=32)
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,078 | gen-ko/ynn | refs/heads/master | /src/network.py | import src.layer as layer
import numpy as np
import src.util_functions as uf
class NeuralNetwork(object):
def fprop(self, *args):
raise ValueError('Calling a virtual function')
def bprop(self, *args):
raise ValueError('Calling a virtual function')
def status_callback(self, *args):
raise ValueError('Calling a virtual function')
def plot_callback(self, *args):
raise ValueError('Calling a virtual function')
def update(self, train_settings):
for layer_c in self.layers:
layer_c.update(train_settings)
return
def dump(self):
dump_obj = list()
for layer_c in self.layers:
dump_obj.append(layer_c.dump())
return dump_obj
def load(self, dump_obj: list):
for layer_c in self.layers:
layer_c.load(dump_obj.pop(0))
return
# TODO: refactor the NeuralNetwork class
class MLP(NeuralNetwork):
def __init__(self, layers):
self.layers = layers
self.h: list = None
if __debug__:
print('DEBUG MODE ENABLED')
return
def fprop(self, x, keep_state: bool=False):
h = [x]
hi = x
# h: 0, 1, ..., L, L+1
for layer in self.layers:
hi = layer.forward(hi)
h.append(hi)
if keep_state:
self.h = h
return h[-1]
# assuming the loss function is cross-entropy
def bprop(self, y):
di = y
nlayers = len(self.layers)
for i in reversed(range(nlayers)):
layer = self.layers[i]
di = layer.backward(di, self.h[i+1], self.h[i])
return | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,079 | gen-ko/ynn | refs/heads/master | /script/script_2_2_k.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import layer
import src.rbm as rbm
import matplotlib.pyplot as plt
import numpy
import os
# format of data
# disitstrain.txt contains 3000 lines, each line 785 numbers, comma delimited
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
data_filepath = '../data'
data_train_filename = 'digitstrain.txt'
data_valid_filename = 'digitsvalid.txt'
data_test_filename = 'digitstest.txt'
data_train_filepath = os.path.join(path, data_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, data_filepath, data_valid_filename)
data_test_filepath = os.path.join(path, data_filepath, data_test_filename)
print('start initializing...')
#rbm.init_rbm(random_seed=1099)
#numpy.random.RandomState(seed=1099)
numpy.random.seed(5800)
x_train, y_train = load_data.load_from_path(data_train_filepath)
x_valid, y_valid = load_data.load_from_path(data_valid_filepath)
myRBM = rbm.RBM(28*28, hidden_units=100)
myRBM.set_visualize(28, 28, stride=20)
myRBM.set_plot(stride=20)
myRBM.set_autostop(window=80, stride=40)
myRBM.train(x_train, x_valid, k=5, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-2-k=5')
myRBM = rbm.RBM(28*28, hidden_units=100)
myRBM.set_visualize(28, 28, stride=20)
myRBM.set_plot(stride=20)
myRBM.set_autostop(window=80, stride=40)
myRBM.train(x_train, x_valid, k=10, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-2-k=10')
myRBM = rbm.RBM(28*28, hidden_units=100)
myRBM.set_visualize(28, 28, stride=20)
myRBM.set_plot(stride=20)
myRBM.set_autostop(window=80, stride=40)
myRBM.train(x_train, x_valid, k=20, epoch=3000, learning_rate=0.05, batch_size=32, plotfile='script-2-2-k=20') | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,080 | gen-ko/ynn | refs/heads/master | /script/script_2_1_naive.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import layer
import src.rbm as rbm
import matplotlib.pyplot as plt
import numpy
import os
# format of data
# disitstrain.txt contains 3000 lines, each line 785 numbers, comma delimited
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
data_filepath = '../data'
data_train_filename = 'digitstrain.txt'
data_valid_filename = 'digitsvalid.txt'
data_test_filename = 'digitstest.txt'
data_train_filepath = os.path.join(path, data_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, data_filepath, data_valid_filename)
data_test_filepath = os.path.join(path, data_filepath, data_test_filename)
print('start initializing...')
#rbm.init_rbm(random_seed=1099)
#numpy.random.RandomState(seed=1099)
numpy.random.seed(1099)
x_train, y_train = load_data.load_from_path(data_train_filepath)
x_valid, y_valid = load_data.load_from_path(data_valid_filepath)
myRBM = rbm.RBM(28*28, hidden_units=100)
myRBM.set_visualize(28,28)
myRBM.set_autostop(window=40, stride=20)
myRBM.train(x_train, x_valid, k=1, epoch=3000, learning_rate=0.1, batch_size=128, plotfile='script-2-1-naive')
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,081 | gen-ko/ynn | refs/heads/master | /src/plotter.py | import numpy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
from src import util as uf
from src import train as utf
def reshape_row_major(x, num_row, num_column):
# type: (numpy.ndarray, int, int) -> numpy.ndarray
return numpy.reshape(x, (num_row, num_column))
def plot_image(x_reshaped):
return plt.imshow(x_reshaped, cmap='gray')
class PlotterBase(object):
def __init__(self, plotfile, filename_prefix, filename_suffix):
self.plotfile = plotfile
filename_extension = '.png'
self.filename = plotfile + '-' + filename_prefix + filename_suffix + filename_extension
full_path = os.path.realpath(__file__)
path, _ = os.path.split(full_path)
self.savepath = os.path.join()
def plot(self, *args):
raise ValueError('Calling a virtual function')
class PlotterLoss(PlotterBase):
def __init__(self, plotfile, filename_prefix, filename_suffix):
PlotterBase.__init__(self, plotfile, filename_prefix, filename_suffix)
def set_title(self, learning_rate, hidden_units, batch_size, k):
self.titletext = f'lr={learning_rate}, ' \
f'k={k},hidden units={hidden_units},batch={batch_size}'
def plot(self, current_epoch, loss_train, loss_valid):
#plt.figure(1)
line_1, = plt.plot(loss_train[0:current_epoch], label='train loss')
line_2, = plt.plot(loss_valid[0:current_epoch], label='valid loss')
plt.legend(handles=[line_1, line_2])
plt.xlabel('epoch')
plt.ylabel('cross-entropy cost')
plt.title(self.titletext)
plt.savefig(self.savepath)
plt.close()
return
def plot_base(v1, v2, label, save_path):
#plt.figure(1)
label_1 = 'train' + ' ' + label
label_2 = 'valid' + ' ' + label
line_1, = plt.plot(v1, label=label_1)
line_2, = plt.plot(v2, label=label_2)
plt.legend(handles=[line_1, line_2])
plt.xlabel('epoch')
plt.ylabel(label)
#plt.title(self.titletext)
plt.savefig(save_path)
plt.close()
return
def plot_perplexity(status_train: utf.Status, status_valid: utf.Status):
try:
plot_file = status_train.train_settings.filename
except ValueError:
plot_file = 'loss'
try:
filename_prefix = status_train.train_settings.prefix
except AttributeError:
filename_prefix = 'prefix'
try:
filename_suffix = status_train.train_settings.suffix
except AttributeError:
filename_suffix = 'perp'
filename_extension = '.png'
filename = plot_file + '-' + filename_prefix + '-' + filename_suffix + '-perp' + filename_extension
full_path = os.path.realpath(__file__)
path, _ = os.path.split(full_path)
savepath = os.path.join(path, '../output/perplexity', filename)
v1 = status_train.perplexity[0:status_train.current_epoch+1]
v2 = status_valid.perplexity[0:status_valid.current_epoch+1]
label = 'perplexity'
plot_base(v1, v2, label, savepath)
return
def plot_loss(status_train: utf.Status, status_valid: utf.Status):
try:
plot_file = status_train.train_settings.filename
except AttributeError:
plot_file = 'loss'
try:
filename_prefix = status_train.train_settings.prefix
except AttributeError:
filename_prefix = 'prefix'
try:
filename_suffix = status_train.train_settings.suffix
except AttributeError:
filename_suffix = 'loss'
filename_extension = '.png'
filename = plot_file + '-' + filename_prefix + '-' + filename_suffix + '-loss' + filename_extension
full_path = os.path.realpath(__file__)
path, _ = os.path.split(full_path)
savepath = os.path.join(path, '../output/plot-loss', filename)
v1 = status_train.loss[0:status_train.current_epoch+1]
v2 = status_valid.loss[0:status_valid.current_epoch+1]
label = 'loss'
plot_base(v1, v2, label, savepath)
return
def plot_general(vs: list, labels: [str], xlabel: str, ylabel: str, save_path: str, title: str=None):
plt.figure(1)
assert len(vs) == len(labels)
lines = []
for i in range(len(vs)):
line, = plt.plot(vs[i], label=labels[i])
lines.append(line)
plt.legend(handles=lines)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.savefig(save_path)
plt.close(1)
return
# TODO: A plloter for RBM, NN, AutoEncoder needs to be implemented
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,082 | gen-ko/ynn | refs/heads/master | /script/script_4_2_plot_bp.py | import src.plotter as plotter
import pickle
import numpy
import matplotlib.pyplot as plt
import os
from src import train as utf
f1 = '../output/metric/script-1-1-1-e16-i-s.dump'
f2 = '../output/metric/script-1-1-2-e16-i-s.dump'
f3 = '../output/metric/script-1-1-3-e16-i-s.dump'
with open(f1, 'rb') as f:
s1, s2 = pickle.load(f)
with open(f2, 'rb') as f:
s3, s4 = pickle.load(f)
with open(f3, 'rb') as f:
s5, s6 = pickle.load(f)
v1 = s1.loss
v2 = s2.loss
v3 = s3.loss
v4 = s4.loss
v5 = s5.loss
v6 = s6.loss
plotter.plot_general([v1, v2, v3, v4, v5, v6], ['v1', 'v2', 'v3', 'v4', 'v5', 'v6'], 'x', 'y', '../output/metric/fig1.png', 'title') | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,083 | gen-ko/ynn | refs/heads/master | /script/script_1_4_learning_rate.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import plot_data
from src import layer
from src.network import NeuralNetwork_Dumpable as NN
import src.network as network
import matplotlib.pyplot as plt
import numpy
import os
# format of data
# disitstrain.txt contains 3000 lines, each line 785 numbers, comma delimited
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
data_filepath = '../data'
data_train_filename = 'digitstrain.txt'
data_valid_filename = 'digitsvalid.txt'
data_test_filename = 'digitstest.txt'
data_train_filepath = os.path.join(path, data_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, data_filepath, data_valid_filename)
data_test_filepath = os.path.join(path, data_filepath, data_test_filename)
# x range [0, 1]
x_train, y_train = load_data.load_from_path(data_train_filepath)
x_valid, y_valid = load_data.load_from_path(data_valid_filepath)
# warm-up phase
'''
print(x_train.shape)
print(y_train.shape)
print(y_train)
x_train_reshaped = plot_data.reshape_row_major(x_train[2550], 28, 28)
# plt.imshow(x_train_reshaped)
# so data is row majored
plot_data.plot_image(x_train_reshaped)
plt.show()
'''
#l1 = Layer(784, 100, 10)
print("start initiliazing...")
network.init_nn(random_seed=20791)
random_state = numpy.random.RandomState(seed=2056791)
'''
myNN = NN(layers, learning_rate=0.01, debug=False, regularizer=0.001, momentum=0.9)
myNN.train_dump(x_train, y_train, x_valid, y_valid, epoch=200, dump_file=os.path.join(path, '../temp/network-2.dump'))
'''
'''
myNN = NN(layers, learning_rate=0.01, debug=False, regularizer=0.0001, momentum=0.9)
myNN.train_dump(x_train, y_train, x_valid, y_valid, epoch=200, dump_file=os.path.join(path, '../temp/network-3.dump'))
'''
hidden_units = [20, 100, 200, 500]
regularizers = [0.0, 0.0001, 0.001]
momentums = [0.0, 0.5, 0.9]
learning_rates = [0.1, 0.01, 0.2, 0.5]
for i1 in range(len(hidden_units)):
for i2 in range(len(regularizers)):
for i3 in range(len(momentums)):
for i4 in range(len(learning_rates)):
layers = [layer.Linear(784, 100),
layer.BN(100, 100),
layer.ReLU(100, 100),
layer.BN(100, 100),
layer.SoftmaxLayer(100, 10)]
name = 'network-' + str(i1) + '-' + str(i2) + '-' + str(i3) + '-' + str(i4) + '.dump'
myNN = NN(layers, learning_rate=learning_rates[i4], regularizer=regularizers[i2], momentum=momentums[i3])
myNN.train(x_train, y_train, x_valid, y_valid, epoch=300, batch_size=32)
print('training finished')
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,084 | gen-ko/ynn | refs/heads/master | /script/script_3_6_rnn.py |
# Tensorflow RNN tutorial imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.client import device_lib
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello))
vocabulary_size = 8000
embedding_size = 16
word_embeddings = tf.get_variable("word_embeddings",
[vocabulary_size, embedding_size])
emebedded_word_ids = tf.nn.embedding_lookup(word_embeddings, word_ids)
# Tensorflow RNN tutorial code
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string("data_path", None,
"Where the training/test data is stored.")
flags.DEFINE_string("save_path", None,
"Model output directory.")
flags.DEFINE_bool("use_fp16", False,
"Train using 16-bit floats instead of 32bit floats")
flags.DEFINE_integer("num_gpus", 1,
"If larger than 1, Grappler AutoParallel optimizer "
"will create multiple training replicas with each GPU "
"running one replica.")
flags.DEFINE_string("rnn_mode", None,
"The low level implementation of lstm cell: one of CUDNN, "
"BASIC, and BLOCK, representing cudnn_lstm, basic_lstm, "
"and lstm_block_cell classes.")
FLAGS = flags.FLAGS
BASIC = "basic"
CUDNN = "cudnn"
BLOCK = "block"
def data_type():
return tf.float16 if FLAGS.use_fp16 else tf.float32
class RnnConfig(object):
"""Most are the same with mall config."""
init_scale = 0.1
learning_rate = 0.1
max_grad_norm = 5
num_layers = 1
num_steps = 3
hidden_size = 128
embedding_size = 16
max_epoch = 100
max_max_epoch = 100
keep_prob = 1.0 # 1.0 means no drop-out
lr_decay = 0.0
batch_size = 16
vocab_size = 8000
rnn_mode = CUDNN
class RnnModel(object):
def __init__(self, is_training, config, input_):
self._is_training = is_training
self._input = input_
self._rnn_params = None
self._cell = None
self.batch_size = input_.batch_size
self.num_steps = input_.num_steps
self.embedding_size = config.embedding_size
self.hidden_size = config.hidden_size
self.vocab_size = config.vocab_size
with tf.device("/cpu:0"):
embedding_w = tf.get_variable(
"embedding_w", (self.vocab_size, self.embedding_size), dtype=data_type())
# input_.input_data shall be the ids of the words
embedding = tf.nn.embedding_lookup(embedding_w, input_.input_data)
# drop-out at the input level, usually there is no need to drop-out
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(embedding, config.keep_prob)
output, state = self._build_rnn_graph(embedding, config, is_training)
softmax_w = tf.get_variable(
"softmax_w", [self.hidden_size, self.vocab_size], dtype=data_type())
softmax_b = tf.get_variable(
"softmax_b", [self.vocab_size], dtype=data_type())
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
loss = tf.nn.softmax_cross_entropy_with_logtis(labels=input_.target,
logits=logits)
# Update the cost
self._cost = tf.reduce_sum(loss)
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
def _get_lstm_cell(self, config, is_training):
if config.rnn_mode == BASIC:
return tf.contrib.rnn.BasicLSTMCell(
config.hidden_size, forget_bias=0.0, state_is_tuple=True,
reuse=not is_training)
if config.rnn_mode == BLOCK:
return tf.contrib.rnn.LSTMBlockCell(
config.hidden_size, forget_bias=0.0)
raise ValueError("rnn_mode %s not supported" % config.rnn_mode)
def _build_rnn_graph(self, inputs, config, is_training):
"""Build the inference graph using CUDNN cell."""
inputs = tf.transpose(inputs, [1, 0, 2])
self._cell = tf.contrib.cudnn_rnn.CudnnLSTM(
num_layers=config.num_layers,
num_units=config.hidden_size,
input_size=config.hidden_size,
dropout=1 - config.keep_prob if is_training else 0)
params_size_t = self._cell.params_size()
self._rnn_params = tf.get_variable(
"lstm_params",
initializer=tf.random_uniform(
[params_size_t], -config.init_scale, config.init_scale),
validate_shape=False)
c = tf.zeros([config.num_layers, self.batch_size, config.hidden_size],
tf.float32)
h = tf.zeros([config.num_layers, self.batch_size, config.hidden_size],
tf.float32)
self._initial_state = (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),)
outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training)
outputs = tf.transpose(outputs, [1, 0, 2])
outputs = tf.reshape(outputs, [-1, config.hidden_size])
return outputs, (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size * max(1, FLAGS.num_gpus) /
(time.time() - start_time)))
return np.exp(costs / iters) | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,085 | gen-ko/ynn | refs/heads/master | /script/script_1_2.py | import pickle
import os
import matplotlib.pyplot as plt
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
data_filepath = '../temp'
network_dump_filename = 'network.dump'
train_error_dump_filename = 'train_error.dump'
valid_error_dump_filename = 'valid_error.dump'
network_dump_filepath = os.path.join(path, data_filepath, network_dump_filename)
train_error_dump_filepath = os.path.join(path, data_filepath, train_error_dump_filename)
valid_error_dump_filepath = os.path.join(path, data_filepath, valid_error_dump_filename)
with open(network_dump_filepath, 'rb') as f:
nn = pickle.load(f)
with open(train_error_dump_filepath, 'rb') as f:
train_error = pickle.load(f)
with open(valid_error_dump_filepath, 'rb') as f:
valid_error = pickle.load(f)
line_1, = plt.plot(nn._loss, label='train loss')
line_2, = plt.plot(nn._loss_valid, label='valid loss')
plt.legend(handles=[line_1, line_2])
plt.ylim(0, 0.2)
plt.xlim(0,200)
plt.xlabel('epoch')
plt.ylabel('cross-entropy loss')
plt.savefig(os.path.join(path, '../output/output-fig1.png'))
plt.figure()
line_1, = plt.plot(train_error, label='train error')
line_2, = plt.plot(valid_error, label='valid error')
plt.legend(handles=[line_1, line_2])
plt.xlabel('epoch')
plt.ylabel('error rate')
plt.savefig(os.path.join(path, '../output/output-fig2.png'))
print('hi-2')
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,086 | gen-ko/ynn | refs/heads/master | /src/ytensor.py | # Created by Yuan Liu on Sep/21/17
import numpy
from scipy.special import expit
def fma(x: numpy.ndarray, w: numpy.ndarray, b: numpy.ndarray):
# x : (sample_num, d1), w : (d2, d1), b : (d2, )
return numpy.dot(w, x) + b
def sigmoid(x: numpy.ndarray):
return expit(x)
def gibbs_sampling(x: numpy.ndarray, k, w):
# x shape (sample_dimension, 1)
h_t_prob = sigmoid(fma(x, w, b))
h_t_sample = self.sample_h(h_t_prob)
h_neg_prob = numpy.array(h_t_prob)
h_neg_sample = numpy.array(h_t_sample)
for ki in range(0, 1, 1):
x_neg_prob = self.layer.backward(h_neg_sample)
x_neg_sample = self.sample_x(x_neg_prob)
h_neg_prob = self.layer.forward(x_neg_sample)
h_neg_sample = self.sample_h(h_neg_prob)
return [x_neg_prob, x_neg_sample, h_t_prob, h_neg_prob, h_neg_sample]
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,087 | gen-ko/ynn | refs/heads/master | /src/load_data.py | import numpy
# load 1 dimension / flatten data with the label as the last element of each line
def load_from_path(data_filepath):
# data_train: numpy.ndarray
data_input = numpy.loadtxt(data_filepath, delimiter=',')
# x_train: numpy.ndarray
x_dimension = data_input.shape[1] - 1
x = data_input[:, 0: x_dimension]
y = data_input[:, x_dimension].astype(int)
return x, y
class DataStore(object):
def __init__(self):
self.hasData = False
self.dataNum = 0
self.dataDim = 0
self.dataPath: str = ''
# def feed_data(self, path: str):: TODO | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,088 | gen-ko/ynn | refs/heads/master | /script/script_1_1_naive.py | # required python version: 3.6+
if __name__ == "__main__" and __package__ is None:
from sys import path
from os.path import dirname as dir
path.append(dir(path[0]))
# __package__ = "src"
import numpy
import src.callback as cb
import src.train as utf
from src.util.status import DataStore
from src.util.status import TrainSettings
from src import network
from src import layer
import tensorflow as tf
# set the random seed
numpy.random.seed(1099)
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
x_train = mnist.train.images # 55000 x 784
x_valid = mnist.validation.images # 5000 x 784
y_train = mnist.train.labels
y_valid = mnist.validation.labels
data_store_train = DataStore(x_train, y_train)
data_store_valid = DataStore(x_valid, y_valid)
#############################
"""
print('Settings 1')
train_settings = TrainSettings(learning_rate=0.001, batch_size=16, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-1-1-1', epoch=200, prefix='e16')
layers = [layer.Linear(784, 100),
#layer.BN(100, 100),
layer.Sigmoid(100),
layer.Linear(100, 10),
layer.Softmax(10)]
mynn = network.MLP(layers)
utf.cross_train(mynn, data_store_train, data_store_valid, train_settings)
"""
#########################
"""
print('Settings 2')
train_settings = TrainSettings(learning_rate=0.002, batch_size=16, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-1-1-2', epoch=200, prefix='e16')
layers = [layer.Linear(784, 100),
#layer.BN(100, 100),
layer.Sigmoid(100),
layer.Linear(100, 10),
layer.Softmax(10)]
mynn = network.MLP(layers)
utf.cross_train(mynn, data_store_train, data_store_valid, train_settings)
"""
#############################
print('Settings 3')
train_settings = TrainSettings(learning_rate=0.005, batch_size=16, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-1-1-3', epoch=200, prefix='e16')
layers = [layer.Linear(784, 100),
#layer.BN(100, 100),
layer.Sigmoid(100),
layer.Linear(100, 10),
layer.Softmax(10)]
mynn = network.MLP(layers)
utf.cross_train(mynn, data_store_train, data_store_valid, train_settings)
print('Settings 4')
train_settings = TrainSettings(learning_rate=0.01, batch_size=16, momentum=0.0, plot_callback=cb.plot_callback,
loss_callback=cb.loss_callback, filename='script-1-1-4', epoch=300, prefix='e16')
layers = [layer.Linear(784, 100),
#layer.BN(100, 100),
layer.Sigmoid(100),
layer.Linear(100, 10),
layer.Softmax(10)]
mynn = network.MLP(layers)
utf.cross_train(mynn, data_store_train, data_store_valid, train_settings)
| {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,089 | gen-ko/ynn | refs/heads/master | /script/script_3_1_preprocessing.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import layer
import src.rbm as rbm
import src.autoencoder as autoencoder
import matplotlib.pyplot as plt
import numpy
import os
import pickle
# Phase 0: Read File
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
data_filepath = '../data'
save_filepath = '../output/n-gram'
dump_filepath = '../output/dump'
gram_name = '4-gram.png'
dictionary_name = 'dictionary.dump'
inv_dictionary_name = 'inv-dictionary.dump'
data_train_filename = 'train.txt'
data_valid_filename = 'val.txt'
data_train_dump_filename = 'train.dump'
data_valid_dump_filename = 'valid.dump'
data_train_filepath = os.path.join(path, data_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, data_filepath, data_valid_filename)
gram_savepath = os.path.join(path, save_filepath, gram_name)
dictionary_dumppath = os.path.join(path, dump_filepath, dictionary_name)
inv_dictionary_dumppath = os.path.join(path, dump_filepath, inv_dictionary_name)
data_train_dump_filepath = os.path.join(path, dump_filepath, data_train_dump_filename)
data_valid_dump_filepath = os.path.join(path, dump_filepath, data_valid_dump_filename)
# load text data from path
def load_from_path(data_filepath):
# data_train: numpy.ndarray
data_input = numpy.loadtxt(data_filepath, dtype=str, delimiter='\n')
# x_train: numpy.ndarray
return data_input
data_train = load_from_path(data_train_filepath)
data_valid = load_from_path(data_valid_filepath)
# Phase 1: split
# Create a vocabulary dictionary you are required to create an entry for every word in the training set
# also, make the data lower-cased
def split_lins(data_input):
all_lines: list = []
train_size = data_input.size
# will serve as a lookup table for the words and their corresponding id.
for i in range(train_size):
tmp_list = data_input[i].lower().split()
tmp_list.insert(0, 'START')
tmp_list.append('END')
all_lines.insert(i, tmp_list)
return all_lines
all_lines_train = split_lins(data_train)
all_lines_valid = split_lins(data_valid)
# build the dictionary
def build_vocabulary(all_lines: list):
vocabulary: dict = {}
train_size = len(all_lines)
for i in range(train_size):
for word in all_lines[i]:
try:
vocabulary[word] += 1
except KeyError:
vocabulary[word] = 1
return vocabulary
vocabulary = build_vocabulary(all_lines_train)
# truncate the dictionary
def truncate_dictionary(dictionary: dict, size: int):
sorted_list = sorted(vocabulary.items(), key=lambda x:x[1])
sorted_list.reverse()
dictionary_size = size
truncated_vocabulary: dict = {}
for i in range(dictionary_size - 1):
word, freq = sorted_list[i]
truncated_vocabulary[word] = freq
truncated_vocabulary['UNK'] = 0
for i in range(dictionary_size - 1, vocabulary.__len__()):
_, freq = sorted_list[i]
truncated_vocabulary['UNK'] += freq
# re-sort the dictionary
sorted_list = sorted(truncated_vocabulary.items(), key=lambda x:x[1])
sorted_list.reverse()
dictionary_size = 8000
truncated_vocabulary: dict = {}
for i in range(dictionary_size - 1):
word, freq = sorted_list[i]
truncated_vocabulary[word] = freq
return truncated_vocabulary
truncated_vocabulary = truncate_dictionary(vocabulary, 8000)
# generate a dictionary map string to IDs
def gen_word_to_id_dict(vocabulary):
dictionary: dict = {}
idn = 0
for word in truncated_vocabulary:
dictionary[word] = idn
idn += 1
return dictionary
dict_word_to_id = gen_word_to_id_dict(truncated_vocabulary)
# replace less frequent words in all_lines with 'UNK'
def replace_with_unk(all_lines, dict_word_to_id):
tokenized_lines = []
train_size = len(all_lines)
for i in range(train_size):
tokenized_lines.append([])
for j in range(len(all_lines[i])):
if not all_lines[i][j] in truncated_vocabulary:
tokenized_lines[i].append(dict_word_to_id['UNK'])
else:
tokenized_lines[i].append(dict_word_to_id[all_lines[i][j]])
return tokenized_lines
tokenized_lines_train = replace_with_unk(all_lines_train, dict_word_to_id)
tokenized_lines_valid = replace_with_unk(all_lines_valid, dict_word_to_id)
# build a 4-gram
def build_four_gram(tokenized_lines):
four_gram: dict = {}
for i in range(len(tokenized_lines)):
cur_line = tokenized_lines[i]
cur_len = len(cur_line)
if (cur_len < 4):
continue
for j in range(cur_len-3):
cur_tuple = (cur_line[j], cur_line[j+1], cur_line[j+2], cur_line[j+3])
try:
four_gram[cur_tuple] += 1
except KeyError:
four_gram[cur_tuple] = 1
# sort the 4-gram
sorted_list = sorted(four_gram.items(), key=lambda x:x[1])
sorted_list.reverse()
return sorted_list
four_gram_train = build_four_gram(tokenized_lines_train)
four_gram_valid = build_four_gram(tokenized_lines_valid)
# plot the 4-gram
def plot_four_gram(four_gram: list):
x_axis = numpy.arange(four_gram.__len__())
y_axis = numpy.zeros(four_gram.__len__())
for i in range(four_gram.__len__()):
y_axis[i] = four_gram[i][1]
plt.figure(1)
line_1, = plt.plot(y_axis, label='4-gram')
plt.xlabel('the ids sorted by the frequency')
plt.ylabel('frequency')
plt.title('4-gram')
plt.savefig(gram_savepath)
plt.close(1)
return
flag_plot_four_gram = True
if flag_plot_four_gram:
plot_four_gram(four_gram_train)
# invert the key-value pair of dictionary
inv_dictionary = {v: k for k, v in dict_word_to_id.items()}
def print_top_four_gram(four_gram: list, top_num: int):
for i in range(top_num):
gram_tuple = four_gram[i][0]
print(inv_dictionary[gram_tuple[0]],
inv_dictionary[gram_tuple[1]],
inv_dictionary[gram_tuple[2]],
inv_dictionary[gram_tuple[3]],
sep=' ')
return
flag_print_most_frequent_grams: bool = True
if flag_print_most_frequent_grams:
print_top_four_gram(four_gram_train, 50)
# dump the dictionary for later use
with open(dictionary_dumppath, 'wb+') as f:
pickle.dump(dict_word_to_id, f)
with open(inv_dictionary_dumppath, 'wb+') as f:
pickle.dump(inv_dictionary, f)
# generate the one-hot representation of inputs
# get the number of the inputs
def process_four_gram(four_gram):
num_input: int = len(four_gram)
X: numpy.ndarray
for i in range(num_input):
tup = four_gram[i]
w1 = tup[0][0]
w2 = tup[0][1]
w3 = tup[0][2]
w4 = tup[0][3]
for j in range(tup[1]):
array = numpy.array([w1, w2, w3, w4]).reshape(1, 4)
try:
X = numpy.concatenate((X, array), axis=0)
except NameError:
X = array
return X
X_train = process_four_gram(four_gram_train)
X_valid = process_four_gram(four_gram_valid)
# dump the one-hot representation of input
with open(data_train_dump_filepath, 'wb+') as f:
pickle.dump(X_train, f)
with open(data_valid_dump_filepath, 'wb+') as f:
pickle.dump(X_valid, f)
print(truncated_vocabulary['UNK'])
# Phase 3: Compute the number of trainable parameters in the network
flag_print_num_trainable: bool = True
if flag_print_num_trainable:
print(8000 * 16 + 128 * 16 * 3 + 128 + 8000 * 128 + 8000) | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,090 | gen-ko/ynn | refs/heads/master | /script/script_2_5_autoencoder.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import plot_data
from src import layer
import src.autoencoder as autoencoder
import matplotlib.pyplot as plt
import numpy
import os
# format of data
# disitstrain.txt contains 3000 lines, each line 785 numbers, comma delimited
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
data_filepath = '../data'
data_train_filename = 'digitstrain.txt'
data_valid_filename = 'digitsvalid.txt'
data_test_filename = 'digitstest.txt'
data_train_filepath = os.path.join(path, data_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, data_filepath, data_valid_filename)
data_test_filepath = os.path.join(path, data_filepath, data_test_filename)
print('start initializing...')
numpy.random.seed(1099)
x_train, _ = load_data.load_from_path(data_train_filepath)
x_valid, _ = load_data.load_from_path(data_valid_filepath)
myAE = autoencoder.AutoEncoder(28*28, hidden_units=100)
myAE.set_visualize(28,28)
myAE.set_autostop(window=40, stride=20)
myAE.train(x_train, x_valid, k=1, epoch=3000, learning_rate=0.03, batch_size=128, plotfile='script-2-5-AE') | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,091 | gen-ko/ynn | refs/heads/master | /script/script_1_9_ReLU.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import plot_data
from src import layer
from src.network import NeuralNetwork_Dumpable as NN
import src.network as network
import matplotlib.pyplot as plt
import numpy
import os
# format of data
# disitstrain.txt contains 3000 lines, each line 785 numbers, comma delimited
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
data_filepath = '../data'
data_train_filename = 'digitstrain.txt'
data_valid_filename = 'digitsvalid.txt'
data_test_filename = 'digitstest.txt'
data_train_filepath = os.path.join(path, data_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, data_filepath, data_valid_filename)
data_test_filepath = os.path.join(path, data_filepath, data_test_filename)
network.init_nn(random_seed=1099)
x_train, y_train = load_data.load_from_path(data_train_filepath)
x_valid, y_valid = load_data.load_from_path(data_valid_filepath)
is_ReLU = False
is_Tanh = (not is_ReLU ) and True
if is_ReLU:
print('start initializing ReLU...')
layers = [layer.Linear(784, 100),
layer.ReLU(100, 100),
layer.Linear(100, 100),
layer.Softmax(10, 10)]
myNN = NN(layers, learning_rate=0.1, regularizer=0.0001, momentum=0.9)
myNN.train(x_train, y_train, x_valid, y_valid, epoch=300, batch_size=32)
elif is_Tanh:
print('start initializing Tanh...')
layers = [layer.Linear(784, 100),
layer.Tanh(100, 100),
layer.Linear(100, 100),
layer.Softmax(10, 10)]
myNN = NN(layers, learning_rate=0.1, regularizer=0.0001, momentum=0.9)
myNN.train(x_train, y_train, x_valid, y_valid, epoch=300, batch_size=32)
else:
print('start initializing Sigmoid...')
layers = [layer.Linear(784, 100),
layer.Sigmoid(100, 100),
layer.Linear(100, 100),
layer.Softmax(10, 10)]
myNN = NN(layers, learning_rate=0.1, regularizer=0.0001, momentum=0.9)
myNN.train(x_train, y_train, x_valid, y_valid, epoch=300, batch_size=32) | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,092 | gen-ko/ynn | refs/heads/master | /script/script_2_4_pretrain.py | # required python version: 3.6+
import os
import sys
import src.load_data as load_data
from src import plot_data
from src import layer
from src.network import NeuralNetwork_Dumpable as NN
import src.network as network
import matplotlib.pyplot as plt
import numpy
import os
import pickle
# format of data
# disitstrain.txt contains 3000 lines, each line 785 numbers, comma delimited
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
data_filepath = '../data'
data_train_filename = 'digitstrain.txt'
data_valid_filename = 'digitsvalid.txt'
data_test_filename = 'digitstest.txt'
data_train_filepath = os.path.join(path, data_filepath, data_train_filename)
data_valid_filepath = os.path.join(path, data_filepath, data_valid_filename)
data_test_filepath = os.path.join(path, data_filepath, data_test_filename)
# x range [0, 1]
x_train, y_train = load_data.load_from_path(data_train_filepath)
x_valid, y_valid = load_data.load_from_path(data_valid_filepath)
#l1 = Layer(784, 100, 10)
print("start initiliazing...")
# SET UP GLOBAL PARAMETERS
lr = 0.05
momentum = 0.0
regularizer = 0.0
numpy.random.seed(1099)
layers = [layer.Linear(784, 100),
layer.Sigmoid(100, 100),
layer.Linear(100, 10),
layer.Softmax(10, 10)]
myNN = NN(layers, learning_rate=lr, momentum=momentum, regularizer=regularizer)
full_path = os.path.realpath(__file__)
path, _ = os.path.split(full_path)
data_filepath = '../output/dump'
filepath = os.path.join(path, data_filepath, 'script-2-1-naive-autostop-rbm-whx-2639.dump')
with open(filepath, 'rb') as f:
w, h_bias, x_bias = pickle.load(f)
myNN.layers[0].w = w.T
myNN.layers[0].b = h_bias
myNN.train(x_train, y_train, x_valid, y_valid, epoch=200, batch_size=32)
layers = [layer.Linear(784, 100),
layer.Sigmoid(100, 100),
layer.Linear(100, 10),
layer.Softmax(10, 10)]
myNN = NN(layers, learning_rate=lr, momentum=momentum, regularizer=regularizer)
filepath = os.path.join(path, data_filepath, 'script-2-5-AE-autostop-rbm-whx-1579.dump')
with open(filepath, 'rb') as f:
w, h_bias, x_bias = pickle.load(f)
myNN.layers[0].w = w.T
myNN.layers[0].b = h_bias
myNN.train(x_train, y_train, x_valid, y_valid, epoch=200, batch_size=32)
layers = [layer.Linear(784, 100),
layer.Sigmoid(100, 100),
layer.Linear(100, 10),
layer.Softmax(10, 10)]
myNN = NN(layers, learning_rate=lr, momentum=momentum, regularizer=regularizer)
filepath = os.path.join(path, data_filepath, 'script-2-6-DAE-autostop-rbm-whx-1299.dump')
with open(filepath, 'rb') as f:
w, h_bias, x_bias = pickle.load(f)
myNN.layers[0].w = w.T
myNN.layers[0].b = h_bias
myNN.train(x_train, y_train, x_valid, y_valid, epoch=200, batch_size=32)
layers = [layer.Linear(784, 100),
layer.Sigmoid(100, 100),
layer.Linear(100, 10),
layer.Softmax(10, 10)]
myNN = NN(layers, learning_rate=lr, momentum=momentum, regularizer=regularizer)
myNN.train(x_train, y_train, x_valid, y_valid, epoch=200, batch_size=32) | {"/script/script_3_5_visualize.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_10_tanh.py": ["/src/load_data.py", "/src/network.py"], "/script/script_3_2_linear_hidden.py": ["/src/nlp.py", "/src/util/status.py"], "/src/callback.py": ["/src/train.py", "/src/plotter.py"], "/script/script_4_1_pbp_n.py": ["/src/util/status.py"], "/src/train.py": ["/src/network.py", "/src/util_functions.py", "/src/util/status.py"], "/script/script_2_3_sample.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_7_hidden_num.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_3_8_rnn_embedding_size.py": ["/src/nlp.py", "/src/util/status.py"], "/script/script_3_4_inference.py": ["/src/load_data.py", "/src/nlp.py"], "/script/script_1_5_two_hidden.py": ["/src/load_data.py", "/src/network.py"], "/src/network.py": ["/src/util_functions.py"], "/script/script_2_2_k.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_1_naive.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_4_2_plot_bp.py": ["/src/plotter.py"], "/script/script_1_4_learning_rate.py": ["/src/load_data.py", "/src/network.py"], "/script/script_1_1_naive.py": ["/src/callback.py", "/src/train.py", "/src/util/status.py"], "/script/script_3_1_preprocessing.py": ["/src/load_data.py", "/src/rbm.py"], "/script/script_2_5_autoencoder.py": ["/src/load_data.py"], "/script/script_1_9_ReLU.py": ["/src/load_data.py", "/src/network.py"], "/script/script_2_4_pretrain.py": ["/src/load_data.py", "/src/network.py"]} |
68,093 | WeiGai/SID | refs/heads/main | /data_processing/dataset_augmentation.py | #!/usr/bin/env python
# coding: utf-8
# In[10]:
import cv2
import os
from os import listdir
from os.path import isfile, join
from PIL import Image as Image
import numpy as np
from scipy.optimize import curve_fit
def relit(x, a, b):
return (a * x.astype(np.float)/255 + b)*255
from matplotlib import pyplot as plt
def plshow(im,title='MINE'):
if len(im.shape)>2:
# plt.imshow(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
plt.imshow(im)
else:
plt.imshow(im,cmap='gray')
plt.title(title)
plt.rcParams["figure.figsize"] = (80,12)
plt.show()
sd_path = 'dataset/ISTD/train_A'
mask_path = 'dataset/ISTD/train_B'
sdfree_path = 'dataset/ISTD/train_C_fixed_ours'
out = 'dataset/ISTD/train_param'
des_path = 'dataset/ISTD_augmented/'
for d in ['train_A','train_B','train_C']:
try:
os.makedirs(os.path.join(des_path,d))
except:
print('cant create folder')
im_list = [f for f in listdir(sd_path) if isfile(join(sd_path, f)) and f.endswith('png')]
print(len(im_list),im_list[0])
kernel = np.ones((5,5),np.uint8)
def im_relit(Rpopt,Gpopt,Bpopt,dump):
#fc this shit
sdim = dump.copy()
sdim.setflags(write=1)
sdim = sdim.astype(np.float)
sdim[:,:,0] = (sdim[:,:,0]/255) * Rpopt[0] + Rpopt[1]
sdim[:,:,1] = (sdim[:,:,1]/255) * Gpopt[0] + Gpopt[1]
sdim[:,:,2] = (sdim[:,:,2]/255) * Bpopt[0] + Bpopt[1]
sdim = sdim*255
# print(np.amin(sdim),np.amax(sdim))
return sdim
def im_darken(Rpopt,Gpopt,Bpopt,dump,f):
#fc this shit
sdim = dump.copy()
sdim.setflags(write=1)
sdim = sdim.astype(np.float)
sdim[:,:,0] = ((sdim[:,:,0]/255) - Rpopt[1]) / (Rpopt[0] * f)
sdim[:,:,1] = ((sdim[:,:,1]/255) - Gpopt[1]) / (Gpopt[0] * f)
sdim[:,:,2] = ((sdim[:,:,2]/255) - Bpopt[1]) / (Bpopt[0] * f)
sdim = sdim*255
# print(np.amin(sdim),np.amax(sdim))
return sdim
errors= []
for im in im_list[1:10]:
sd = np.asarray(Image.open(join(sd_path,im)))
mean_sdim = np.mean(sd,axis=2)
mask_ori = np.asarray(Image.open(join(mask_path,im)))
mask = cv2.erode(mask_ori ,kernel,iterations = 2)
sdfree = np.asarray(Image.open(join(sdfree_path,im)))
mean_sdfreeim = np.mean(sdfree,axis=2)
#pixels for regression funtion
i, j = np.where(np.logical_and(np.logical_and(np.logical_and(mask>=1,mean_sdim>5),mean_sdfreeim<230),np.abs(mean_sdim-mean_sdfreeim)>10))
source = sd*0
source[tuple([i,j])] = sd[tuple([i,j])]
target = sd*0
target[tuple([i,j])]= sdfree[tuple([i,j])]
R_s = source[:,:,0][tuple([i,j])]
G_s = source[:,:,1][tuple([i,j])]
B_s = source[:,:,2][tuple([i,j])]
R_t = target[:,:,0][tuple([i,j])]
G_t = target[:,:,1][tuple([i,j])]
B_t = target[:,:,2][tuple([i,j])]
c_bounds = [[1,-0.1],[10,0.5]]
Rpopt, pcov = curve_fit(relit, R_s, R_t,bounds=c_bounds)
Gpopt, pcov = curve_fit(relit, G_s, G_t,bounds=c_bounds)
Bpopt, pcov = curve_fit(relit, B_s, B_t,bounds=c_bounds)
relitim = im_relit(Rpopt,Gpopt,Bpopt,sd)
matte = (relitim - sdfree +1)/ (relitim - sd +1)
F = [0.8,0.9,1.1,1.2]
for f in F:
darkenim = im_darken(Rpopt,Gpopt,Bpopt,sdfree,f)
new_sdim = sdfree * matte + (1-matte) * darkenim
new_sdim[new_sdim>255] = 255
new_sdim[new_sdim<0] = 0
plshow(new_sdim.astype(np.uint))
# In[ ]:
| {"/src/models/SIDPAMI_model.py": ["/src/models/distangle_model.py"]} |
68,094 | WeiGai/SID | refs/heads/main | /src/data/weaklyshadowparam_dataset.py | import os.path
import torchvision.transforms as transforms
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image,ImageChops
from PIL import ImageFilter
import torch
from pdb import set_trace as st
import random
import numpy as np
import time
class WeaklyShadowParamDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.all =[]
self.real_only = []
self.fake_only = []
dir_A = os.path.join(opt.dataroot, opt.phase + 'A')
dir_B = os.path.join(opt.dataroot, opt.phase + 'B')
dir_C = os.path.join(opt.dataroot, opt.phase + 'C')
dir_param = os.path.join(opt.dataroot, opt.phase + 'param')
dir_matte = os.path.join(opt.dataroot, 'matte')
for root,_,fnames in sorted(os.walk(dir_A)):
for fname in fnames:
if fname.endswith('.png'):
X=dict()
X['im_path'] = os.path.join(root,fname)
X['mask_path'] = os.path.join(dir_B,fname)
X['GT_path'] = os.path.join(dir_C,fname)
X['param_path'] = os.path.join(dir_param,fname+'.txt')
X['isreal'] = True
X['imname'] = fname
self.real_only.append(X)
self.all.append(X)
dir_A = os.path.join(opt.wdataroot, opt.phase + 'A')
dir_B = os.path.join(opt.wdataroot, opt.phase + 'B')
dir_C = os.path.join(opt.wdataroot, opt.phase + 'C')
dir_param = os.path.join(opt.wdataroot, opt.phase + 'param')
dir_matte = os.path.join(opt.wdataroot, 'matte')
for root,_,fnames in sorted(os.walk(dir_A)):
for fname in fnames:
if fname.endswith('.png'):
X=dict()
X['im_path'] = os.path.join(root,fname)
X['mask_path'] = os.path.join(dir_B,fname)
X['GT_path'] = os.path.join(dir_C,fname)
X['param_path'] = os.path.join(dir_param,fname+'.txt')
X['isreal'] = False
X['imname'] = fname
self.fake_only.append(X)
self.all.append(X)
transform_list = [transforms.ToTensor(),
transforms.Normalize(mean=opt.norm_mean,
std = opt.norm_std)]
self.transformA = transforms.Compose(transform_list)
self.transformB = transforms.Compose([transforms.ToTensor()])
self.stats()
def stats(self):
print("Dataset type: %s "%(self.name()))
print("Total Image: %d "%(len(self.all)))
print("Real / FAke : %d / %d "%(len(self.real_only),len(self.all)-len(self.real_only)))
def __getitem__(self,index):
birdy = {}
if not hasattr(self.opt,'s_real'):
self.opt.s_real = 0.5
if random.random() < self.opt.s_real:
choosen_set= 'real_only'
else:
choosen_set = 'fake_only'
index = random.randint(0,len(getattr(self,choosen_set))-1)
choosen = getattr(self,choosen_set)[index]
A_path = choosen['im_path']
imname = choosen['imname']
B_path = choosen['mask_path']
A_img = Image.open(A_path).convert('RGB')
sparam = open(choosen['param_path'])
line = sparam.read()
shadow_param = np.asarray([float(i) for i in line.split(" ") if i.strip()])
shadow_param = shadow_param[0:6]
ow = A_img.size[0]
oh = A_img.size[1]
w = np.float(A_img.size[0])
h = np.float(A_img.size[1])
if os.path.isfile(B_path):
B_img = Image.open(B_path)
else:
print('MASK NOT FOUND : %s'%(B_path))
B_img = Image.fromarray(np.zeros((int(w),int(h)),dtype = np.float),mode='L')
birdy['C'] = Image.open(choosen['GT_path']).convert('RGB')
loadSize = self.opt.loadSize
if self.opt.randomSize:
loadSize = np.random.randint(loadSize + 1,loadSize * 1.3 ,1)[0]
# if self.opt.randomSize:
# loadSize = np.random.randint(loadSize + 1,loadSize * 1.3 ,1)[0]
if self.opt.keep_ratio:
if w>h:
ratio = np.float(loadSize)/np.float(h)
neww = np.int(w*ratio)
newh = loadSize
else:
ratio = np.float(loadSize)/np.float(w)
neww = loadSize
newh = np.int(h*ratio)
else:
neww = loadSize
newh = loadSize
birdy['A'] = A_img
birdy['B'] = B_img
t =[Image.FLIP_LEFT_RIGHT,Image.ROTATE_90]
for i in range(0,4):
c = np.random.randint(0,3,1,dtype=np.int)[0]
if c==2: continue
for i in ['A','B','C']:
if i in birdy:
birdy[i]=birdy[i].transpose(t[c])
degree=np.random.randint(-20,20,1)[0]
for i in ['A','B','C']:
birdy[i]=birdy[i].rotate(degree)
for k,im in birdy.items():
birdy[k] = im.resize((neww, newh),Image.NEAREST)
w = birdy['A'].size[0]
h = birdy['A'].size[1]
#rescale A to the range [1; log_scale] then take log
#birdy['B_over'] = B_img.filter(ImageFilter.MaxFilter(7))
if self.opt.lambda_bd>0:
birdy['bd_img'] = ImageChops.subtract(B_img.filter(ImageFilter.MaxFilter(3)),B_img.filter(ImageFilter.MinFilter(3)))
for k,im in birdy.items():
birdy[k] = self.transformB(im)
#aug_lit = (np.random.rand()-0.5)+1
#for i in ['A','C']:
# birdy[i]=birdy[i]*aug_lit
#shadow_param[[0,2,4]] = shadow_param[[0,2,4]]*aug_lit
for i in ['A','C','B']:
if i in birdy:
birdy[i] = (birdy[i] - 0.5)*2
if not self.opt.no_crop:
w_offset = random.randint(0,max(0,w-self.opt.fineSize-1))
h_offset = random.randint(0,max(0,h-self.opt.fineSize-1))
for k,im in birdy.items():
birdy[k] = im[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]
if (not self.opt.no_flip) and random.random() < 0.5:
idx = [i for i in range(birdy['A'].size(2) - 1, -1, -1)]
idx = torch.LongTensor(idx)
for k,im in birdy.items():
birdy[k] = im.index_select(2, idx)
for k,im in birdy.items():
birdy[k] = im.type(torch.FloatTensor)
birdy['imname'] = imname
birdy['w'] = ow
birdy['h'] = oh
birdy['A_paths'] = A_path
birdy['B_baths'] = B_path
#if the shadow area is too small, let's not change anything:
if torch.sum(birdy['B']>0) < 10 :
shadow_param=[0,1,0,1,0,1]
birdy['isreal'] = 1 if choosen['isreal'] else 0
birdy['param'] = torch.FloatTensor(np.array(shadow_param))
#print(birdy['param'])
return birdy
def __len__(self):
return len(self.real_only)*2
def name(self):
return 'ShadowParamDataset'
| {"/src/models/SIDPAMI_model.py": ["/src/models/distangle_model.py"]} |
68,095 | WeiGai/SID | refs/heads/main | /data_processing/compute_params.py | #!/usr/bin/env python
# coding: utf-8
# In[3]:
import cv2
import os
from os import listdir
from os.path import isfile, join
from PIL import Image as Image
import numpy as np
from scipy.optimize import curve_fit
def relit(x, a, b):
return (a * x.astype(np.float)/255 + b)*255
from matplotlib import pyplot as plt
def plshow(im,title='MINE'):
if len(im.shape)>2:
# plt.imshow(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
plt.imshow(im)
else:
plt.imshow(im,cmap='gray')
plt.title(title)
plt.rcParams["figure.figsize"] = (80,12)
plt.show()
sd_path = 'dataset/ISTD/train_A'
mask_path = 'dataset/ISTD/train_B'
sdfree_path = 'dataset/ISTD/train_C_fixed_ours'
out = 'dataset/ISTD/train_params/'
if not os.path.exists(out):
os.makedirs(out)
im_list = [f for f in listdir(sd_path) if isfile(join(sd_path, f)) and f.endswith('png')]
print(len(im_list),im_list[0])
kernel = np.ones((5,5),np.uint8)
def im_relit(Rpopt,Gpopt,Bpopt,dump):
#some weird bugs with python
sdim = dump.copy()
sdim.setflags(write=1)
sdim = sdim.astype(np.float)
sdim[:,:,0] = (sdim[:,:,0]/255) * Rpopt[0] + Rpopt[1]
sdim[:,:,1] = (sdim[:,:,1]/255) * Gpopt[0] + Gpopt[1]
sdim[:,:,2] = (sdim[:,:,2]/255) * Bpopt[0] + Bpopt[1]
sdim = sdim*255
# print(np.amin(sdim),np.amax(sdim))
return sdim
errors= []
for im in im_list[10:20]:
sd = np.asarray(Image.open(join(sd_path,im)))
mean_sdim = np.mean(sd,axis=2)
mask_ori = np.asarray(Image.open(join(mask_path,im)))
mask = cv2.erode(mask_ori ,kernel,iterations = 2)
sdfree = np.asarray(Image.open(join(sdfree_path,im)))
mean_sdfreeim = np.mean(sdfree,axis=2)
#pixels for regression funtion
i, j = np.where(np.logical_and(np.logical_and(np.logical_and(mask>=1,mean_sdim>5),mean_sdfreeim<230),np.abs(mean_sdim-mean_sdfreeim)>10))
source = sd*0
source[tuple([i,j])] = sd[tuple([i,j])]
target = sd*0
target[tuple([i,j])]= sdfree[tuple([i,j])]
R_s = source[:,:,0][tuple([i,j])]
G_s = source[:,:,1][tuple([i,j])]
B_s = source[:,:,2][tuple([i,j])]
R_t = target[:,:,0][tuple([i,j])]
G_t = target[:,:,1][tuple([i,j])]
B_t = target[:,:,2][tuple([i,j])]
c_bounds = [[1,-0.1],[10,0.5]]
Rpopt, pcov = curve_fit(relit, R_s, R_t,bounds=c_bounds)
Gpopt, pcov = curve_fit(relit, G_s, G_t,bounds=c_bounds)
Bpopt, pcov = curve_fit(relit, B_s, B_t,bounds=c_bounds)
relitim = im_relit(Rpopt,Gpopt,Bpopt,sd)
final = sd.copy()
final[tuple([i,j])] = relitim[tuple([i,j])]
final[final>255] =255
final[final<0] = 0
plshow(final)
error = np.mean(np.abs(relitim[tuple([i,j])].astype(np.float) - sdfree[tuple([i,j])]).astype(np.float))
print(error,Rpopt,Gpopt,Bpopt)
f = open(join(out,im+'.txt'),"a")
f.write("%f %f %f %f %f %f"%(Rpopt[1],Rpopt[0],Gpopt[1],Gpopt[0],Bpopt[1],Bpopt[0]))
f.close()
# print(error)
errors.append(error)
print(np.mean(errors))
#no bound - 8.55
#### y_bound ###error
# 0.5 8.86
# 0.1 15.692271753155671
# 0.25 10.830443545867785
# 1 8.86
# In[ ]:
| {"/src/models/SIDPAMI_model.py": ["/src/models/distangle_model.py"]} |
68,096 | WeiGai/SID | refs/heads/main | /src/models/SIDPAMI_model.py | import torch
from collections import OrderedDict
import time
import numpy as np
import torch.nn.functional as F
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import util.util as util
from .distangle_model import DistangleModel
from PIL import ImageOps,Image
class SIDPAMIModel(DistangleModel):
def name(self):
return 'Shadow Image Decomposition model PAMI19'
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.set_defaults(pool_size=0, no_lsgan=True, norm='batch')
parser.set_defaults(dataset_mode='aligned')
parser.add_argument('--wdataroot',default='None', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--use_our_mask', action='store_true')
parser.add_argument('--mask_train',type=str,default=None)
parser.add_argument('--mask_test',type=str,default=None)
return parser
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
self.loss_names = ['G_param','alpha','rescontruction','G_GAN','D']
# specify the images you want to save/display. The program will call base_model.get_current_visuals
self.visual_names = ['input_img', 'alpha_pred','out','final','masked_fake']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
self.model_names = ['G','M','D']
# load/define networks
opt.output_nc= 3
if self.opt.netG =='vgg':
self.netG = networks.define_vgg(4,6, gpu_ids = self.gpu_ids)
if self.opt.netG =='RESNEXT':
self.netG = networks.define_G(4, 6, opt.ngf, 'RESNEXT', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netM = networks.define_G(7, 3, opt.ngf, 'unet_256', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netD = networks.define_D(3, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG.to(self.device)
self.netM.to(self.device)
print(self.netG)
print(self.netM)
if self.isTrain:
self.fake_AB_pool = ImagePool(opt.pool_size)
# define loss functions
self.MSELoss = torch.nn.MSELoss()
self.criterionL1 = torch.nn.L1Loss()
self.bce = torch.nn.BCEWithLogitsLoss()
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)
# initialize optimizers
self.optimizers = []
#self.optimizer_G = torch.optim.SGD(self.netG.parameters(),
# lr=0.002, momentum=0.9)
self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999),weight_decay=1e-5)
self.optimizer_M = torch.optim.Adam(self.netM.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999),weight_decay=1e-5)
self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999),weight_decay=1e-5)
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_M)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
self.input_img = input['A'].to(self.device)
self.shadow_mask = input['B'].to(self.device)
self.shadow_param = input['param'].to(self.device).type(torch.float)
self.shadow_mask = (self.shadow_mask>0.9).type(torch.float)*2-1
self.penumbra = input['penumbra'].to(self.device).type(torch.float)
self.penumbra = (self.penumbra>0).type(torch.float)
self.penumbra = self.penumbra.expand(self.input_img.shape)
self.shadowfree_img = input['C'].to(self.device)
self.shadow_mask_3d= (self.shadow_mask>0).type(torch.float).expand(self.input_img.shape)
if 'isreal' in input:
self.isreal = input['isreal']
def forward(self):
inputG = torch.cat([self.input_img,self.shadow_mask],1)
self.shadow_param_pred = torch.squeeze(self.netG(inputG))
n = self.shadow_param_pred.shape[0]
#m = self.shadow_param_pred.shape[1]
w = inputG.shape[2]
h = inputG.shape[3]
#self.shadow_param_pred = torch.mean(self.shadow_param_pred.view([n,m,-1]),dim=2)
add = self.shadow_param_pred[:,[0,2,4]]
mul = (self.shadow_param_pred[:,[1,3,5]]*2) +3
#mul = (mul +2) * 5/3
add = add.view(n,3,1,1).expand((n,3,w,h))
mul = mul.view(n,3,1,1).expand((n,3,w,h))
addgt = self.shadow_param[:,[0,2,4]]
mulgt = self.shadow_param[:,[1,3,5]]
addgt = addgt.view(n,3,1,1).expand((n,3,w,h))
mulgt = mulgt.view(n,3,1,1).expand((n,3,w,h))
self.litgt = self.input_img.clone()/2+0.5
self.lit = self.input_img.clone()/2+0.5
self.lit = self.lit*mul + add
self.litgt = (self.litgt*mulgt+addgt)*2-1
self.out = (self.input_img/2+0.5)*(1-self.shadow_mask_3d) + self.lit*self.shadow_mask_3d
self.out = self.out*2-1
#self.outgt = (self.input_img/2+0.5)*(1-self.alpha_3d) + self.lit*(self.alpha_3d)
#self.outgt = self.outgt*2-1
#lit.detach if no final loss for paramnet
inputM = torch.cat([self.input_img,self.lit,self.shadow_mask],1)
self.alpha_pred = self.netM(inputM)
self.alpha_pred = (self.alpha_pred +1) /2
self.final = (self.input_img/2+0.5)*(1-self.alpha_pred) + self.lit*(self.alpha_pred)
self.final = self.final*2-1
#GAN input:
self.masked_fake = self.final*self.penumbra
self.masked_real = self.shadowfree_img*self.penumbra
def backward(self):
criterion = self.criterionL1
lambda_ = self.opt.lambda_L1
self.shadow_param[:,[1,3,5]] = (self.shadow_param[:,[1,3,5]])/2 - 1.5
self.loss_G_param = criterion(self.shadow_param_pred, self.shadow_param) * lambda_
self.loss_rescontruction = criterion(self.final,self.shadowfree_img) * lambda_
pred_fake = self.netD(self.masked_fake)
self.loss_G_GAN = self.criterionGAN(pred_fake,True)*self.opt.lambda_GAN
self.loss = self.loss_rescontruction + self.loss_G_param + self.loss_G_GAN
self.loss.backward()
def backward_D(self):
pred_fake = self.netD(self.masked_fake.detach())
pred_real = self.netD(self.masked_real)
self.loss_D_real = self.criterionGAN(pred_real,True)
self.loss_D_fake = self.criterionGAN(pred_fake,False)
self.loss_D = (self.loss_D_real + self.loss_D_fake) * 0.5
self.loss_D.backward()
def optimize_parameters(self):
self.forward()
self.optimizer_G.zero_grad()
self.optimizer_M.zero_grad()
self.backward()
self.optimizer_G.step()
self.optimizer_M.step()
self.optimizer_D.zero_grad()
self.backward_D()
self.optimizer_D.step()
def get_current_visuals(self):
t= time.time()
nim = self.input_img.shape[0]
visual_ret = OrderedDict()
all =[]
for i in range(0,min(nim-1,5)):
row=[]
for name in self.visual_names:
if isinstance(name, str):
if hasattr(self,name):
im = util.tensor2im(getattr(self, name).data[i:i+1,:,:,:])
row.append(im)
row=tuple(row)
row = np.hstack(row)
if hasattr(self,'isreal'):
if self.isreal[i] == 0:
row = ImageOps.crop(Image.fromarray(row),border =5)
row = ImageOps.expand(row,border=5,fill=(0,200,0))
row = np.asarray(row)
all.append(row)
all = tuple(all)
allim = np.vstack(all)
return OrderedDict([(self.opt.name,allim)])
def get_prediction(self,input):
self.input_img = input['A'].to(self.device)
self.shadow_mask = input['B'].to(self.device)
inputG = torch.cat([self.input_img,self.shadow_mask],1)
self.shadow_mask = (self.shadow_mask>0.9).type(torch.float)*2-1
self.shadow_mask_3d= (self.shadow_mask>0).type(torch.float).expand(self.input_img.shape)
inputG = F.upsample(inputG,size=(256,256))
self.shadow_param_pred = self.netG(inputG)
w = self.input_img.shape[2]
h = self.input_img.shape[3]
n = self.input_img.shape[0]
m = self.input_img.shape[1]
self.shadow_param_pred = self.shadow_param_pred.view([n,6,-1])
self.shadow_param_pred = torch.mean(self.shadow_param_pred,dim=2)
self.shadow_param_pred[:,[1,3,5]] = (self.shadow_param_pred[:,[1,3,5]]*2)+3
self.lit = self.input_img.clone()/2+0.5
add = self.shadow_param_pred[:,[0,2,4]]
mul = self.shadow_param_pred[:,[1,3,5]]
#mul = (mul +2) * 5/3
n = self.shadow_param_pred.shape[0]
add = add.view(n,3,1,1).expand((n,3,w,h))
mul = mul.view(n,3,1,1).expand((n,3,w,h))
self.lit = self.lit*mul + add
self.out = (self.input_img/2+0.5)*(1-self.shadow_mask_3d) + self.lit*self.shadow_mask_3d
self.out = self.out*2-1
inputM = torch.cat([self.input_img,self.lit,self.shadow_mask],1)
self.alpha_pred = self.netM(inputM)
self.alpha_pred = (self.alpha_pred +1) /2
#self.alpha_pred_3d= self.alpha_pred.repeat(1,3,1,1)
self.final = (self.input_img/2+0.5)*(1-self.alpha_pred) + self.lit*self.alpha_pred
self.final = self.final*2-1
RES = dict()
RES['final']= util.tensor2im(self.final,scale =0)
#RES['phase1'] = util.tensor2im(self.out,scale =0)
#RES['param']= self.shadow_param_pred.detach().cpu()
#RES['matte'] = util.tensor2im(self.alpha_pred.detach().cpu()/2,scale =0)
'''
###EVAL on original size
input_img_ori = input['A_ori'].to(self.device)
input_img_ori = input_img_ori/2+0.5
lit_ori = input_img_ori
w = input_img_ori.shape[2]
h = input_img_ori.shape[3]
add = self.shadow_param_pred[:,[0,2,4]]
mul = self.shadow_param_pred[:,[1,3,5]]
#mul = (mul +2) * 5/3
n = self.shadow_param_pred.shape[0]
add = add.view(n,3,1,1).expand((n,3,w,h))
mul = mul.view(n,3,1,1).expand((n,3,w,h))
lit_ori = lit_ori*mul + add
alpha_pred = F.upsample(self.alpha_pred,(w,h),mode='bilinear',align_corners=True)
final = input_img_ori * (1-alpha_pred) + lit_ori*(alpha_pred)
final = final*2 -1
RES['ori_Size'] = util.tensor2im(final.detach().cpu())
'''
return RES
| {"/src/models/SIDPAMI_model.py": ["/src/models/distangle_model.py"]} |
68,097 | WeiGai/SID | refs/heads/main | /src/models/vgg.py | from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def create_vgg(num_ic,num_classes,use_pretrained=False,feature_extract=False):
model_ft = models.vgg16(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
modules =[]
for i in model_ft.features:
modules.append(i)
modules[0] = nn.Conv2d(num_ic, 64, kernel_size=3, padding=1)
model_ft.features=nn.Sequential(*modules)
modules2=[]
for i in model_ft.classifier:
modules2.append(i)
modules2.append(nn.Tanh())
model_ft.classifier = nn.Sequential(*modules2)
input_size = 224
return model_ft
if __name__=='__main__':
a = create_vgg(4,6)
print(a)
inp = torch.ones((1,4,128,128))
print(a(inp).shape)
| {"/src/models/SIDPAMI_model.py": ["/src/models/distangle_model.py"]} |
68,098 | WeiGai/SID | refs/heads/main | /src/models/distangle_model.py | import torch
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import util.util as util
class DistangleModel(BaseModel):
def name(self):
return 'DistangleModel'
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.set_defaults(pool_size=0, no_lsgan=True, norm='batch')
parser.set_defaults(dataset_mode='aligned')
parser.set_defaults(netG='RESNEXT')
if is_train:
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
self.loss_names = ['G']
# specify the images you want to save/display. The program will call base_model.get_current_visuals
self.visual_names = ['input_img', 'shadow_mask','out','outgt']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
if self.isTrain:
self.model_names = ['G']
else: # during test time, only load Gs
self.model_names = ['G']
# load/define networks
opt.output_nc= 3 if opt.task=='sr' else 1 #3 for shadow removal, 1 for detection
self.netG = networks.define_G(4, opt.output_nc, opt.ngf, 'RESNEXT', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG.to(self.device)
print(self.netG)
#self.netG.print_networks()
if self.isTrain:
self.fake_AB_pool = ImagePool(opt.pool_size)
# define loss functions
self.criterionL1 = torch.nn.L1Loss()
self.bce = torch.nn.BCEWithLogitsLoss()
# initialize optimizers
self.optimizers = []
self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999),weight_decay=1e-5)
self.optimizers.append(self.optimizer_G)
def set_input(self, input):
self.input_img = input['A'].to(self.device)
self.shadow_mask = input['B'].to(self.device)
self.shadow_param = input['param'].to(self.device).type(torch.float)
self.shadow_mask = (self.shadow_mask>0.9).type(torch.float)*2-1
self.nim = self.input_img.shape[1]
self.shadowfree_img = input['C'].to(self.device)
self.shadow_mask_3d= (self.shadow_mask>0).type(torch.float).expand(self.input_img.shape)
def forward(self):
inputG = torch.cat([self.input_img,self.shadow_mask],1)
self.Gout = self.netG(inputG)
self.lit = self.input_img.clone()/2+0.5
add = self.Gout[:,[0,2,4]]
mul = self.Gout[:,[1,3,5]]
n = self.Gout.shape[0]
add = add.view(n,3,1,1).expand((n,3,256,256))
mul = mul.view(n,3,1,1).expand((n,3,256,256))
addgt = self.shadow_param[:,[0,2,4]]
mulgt = self.shadow_param[:,[1,3,5]]
addgt = addgt.view(n,3,1,1).expand((n,3,256,256))
mulgt = mulgt.view(n,3,1,1).expand((n,3,256,256))
self.litgt = (self.input_img.clone()+1)/2
self.lit = self.lit*mul + add
self.litgt = self.litgt*mulgt+addgt
self.out = (self.input_img/2+0.5)*(1-self.shadow_mask_3d) + self.lit*self.shadow_mask_3d
self.out = self.out*2-1
self.outgt = (self.input_img/2+0.5)*(1-self.shadow_mask_3d) + self.litgt*self.shadow_mask_3d
self.outgt = self.outgt*2-1
self.alpha = torch.mean(self.shadowfree_img / self.lit,dim=1,keepdim=True)
def get_prediction(self,input):
self.input_img = input['A'].to(self.device)
self.shadow_mask = input['B'].to(self.device)
inputG = torch.cat([self.input_img,self.shadow_mask],1)
self.shadow_mask = (self.shadow_mask>0.9).type(torch.float)*2-1
self.shadow_mask_3d= (self.shadow_mask>0).type(torch.float).expand(self.input_img.shape)
self.Gout = self.netG(inputG)
self.lit = self.input_img.clone()/2+0.5
add = self.Gout[:,[0,2,4]]
mul = self.Gout[:,[1,3,5]]
n = self.Gout.shape[0]
add = add.view(n,3,1,1).expand((n,3,256,256))
mul = mul.view(n,3,1,1).expand((n,3,256,256))
self.lit = self.lit*mul + add
self.out = (self.input_img/2+0.5)*(1-self.shadow_mask_3d) + self.lit*self.shadow_mask_3d
self.out = self.out*2-1
return util.tensor2im(self.out,scale =0)
def backward_G(self):
criterion = self.criterionL1 if self.opt.task =='sr' else self.bce
lambda_ = self.opt.lambda_L1 if self.opt.task =='sr' else 1
self.loss_G = criterion(self.Gout, self.shadow_param) * lambda_
self.loss_G.backward()
def optimize_parameters(self):
self.forward()
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
if __name__=='__main__':
parser = argparse.ArgumentParser()
opt = parser.parse_args()
opt.dataroot = '=/nfs/bigbox/hieule/GAN/datasets/ISTD_Dataset/train/train_'
opt.name = 'test'
opt.model = 'jointdistangle'
opt.gpu_ids=[2]
opt.log_scale = 0
opt.ndf = 32
opt.ngf = 64
opt.norm ='batch'
opt.checkpoints_dir ='/nfs/bigbox/hieule/GAN/data/test'
opt.isTrain = False
opt.resize_or_crop = 'none'
opt.loadSize = 256
opt.init_type = 'xavier'
opt.init_gain = 0.02
opt.fineSize = 256
opt.nThreads = 1 # test code only supports nThreads = 1
opt.batchSize = 1 # test code only supports batchSize = 1
opt.serial_batches = False # no shuffle
opt.no_flip = True # no flip
opt.no_dropout = True
opt.use_our_mask = True
opt.task ='sr'
a = DistangleModel()
a.initialize(opt)
| {"/src/models/SIDPAMI_model.py": ["/src/models/distangle_model.py"]} |
68,099 | WeiGai/SID | refs/heads/main | /src/models/loss_function.py | import torch
from torch import nn
import torch.nn.functional as F
def smooth_loss(pred_map):
def gradient(pred):
D_dy = pred[:, :, 1:] - pred[:, :, :-1]
D_dx = pred[:, :, :, 1:] - pred[:, :, :, :-1]
return D_dx, D_dy
loss = 0
weight = 1.
dx, dy = gradient(pred_map)
dx2, dxdy = gradient(dx)
dydx, dy2 = gradient(dy)
loss += (dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean())*weight
return loss
| {"/src/models/SIDPAMI_model.py": ["/src/models/distangle_model.py"]} |
68,100 | WeiGai/SID | refs/heads/main | /src/data/single_dataset.py | import os.path
from PIL import ImageFilter
import torchvision.transforms as transforms
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import numpy as np
import torch
class SingleDataset(BaseDataset):
def __init__(self, dataroot,opt):
self.opt = opt
self.root = dataroot
self.dir_A = os.path.join(dataroot)
self.dir_B = opt.mask_test
print('A path %s'%self.dir_A)
self.A_paths,self.imname = make_dataset(self.dir_A)
self.B_paths,tmp = make_dataset(self.dir_B)
self.A_paths = sorted(self.A_paths)
self.B_paths = sorted(self.B_paths)
self.imname = sorted(self.imname)
self.transformB = transforms.Compose([transforms.ToTensor()])
def __getitem__(self, index):
imname = self.imname[index]
A_path= os.path.join(self.dir_A,imname)
B_path= os.path.join(self.dir_B,imname)
A_img = Image.open(A_path).convert('RGB')
if not os.path.isfile(B_path):
B_path=B_path[:-4]+'.png'
B_img = Image.open(B_path).convert('L')
ow = A_img.size[0]
oh = A_img.size[1]
loadsize = self.opt.fineSize if hasattr(self.opt,'fineSize') else 256
A_img_ori = A_img
A_img = A_img.resize((loadsize,loadsize))
B_img = B_img.resize((loadsize,loadsize))
A_img = torch.from_numpy(np.asarray(A_img,np.float32).transpose(2,0,1)).div(255)
A_img_ori = torch.from_numpy(np.asarray(A_img_ori,np.float32).transpose(2,0,1)).div(255)
B_img = self.transformB(B_img)
B_img = B_img*2-1
A_img = A_img*2-1
A_img_ori = A_img_ori*2-1
A_img = A_img.unsqueeze(0)
A_img_ori = A_img_ori.unsqueeze(0)
B_img = B_img.unsqueeze(0)
B_img = (B_img>0.2).type(torch.float)*2-1
return {'A': A_img,'B':B_img,'A_ori':A_img_ori, 'A_paths': A_path,'imname':imname,'w':ow,'h':oh}
def __len__(self):
return len(self.A_paths)
def name(self):
return 'SingleImageDataset'
| {"/src/models/SIDPAMI_model.py": ["/src/models/distangle_model.py"]} |
68,101 | WeiGai/SID | refs/heads/main | /src/infer.py | import sys
import time
import torch
import os
from options.test_options import TestOptions
from models import create_model
from data.single_dataset import SingleDataset
from util.visualizer import Visualizer
from pdb import set_trace as st
from util import html
import util.util as util
import numpy as np
from PIL import Image
from util.util import sdmkdir
class Predict:
def __init__(self,opt):
opt.gpu_ids=[3]
opt.checkpoints_dir ='../checkpoints/'
opt.netG = 'RESNEXT'
opt.fineSize = 256
opt.loadSize = 256
opt.isTrain = False
model = create_model(opt)
model.setup(opt)
model.eval()
self.model = model
self.opt = opt
def ISTD_test(self):
opt = self.opt
opt.mask_test = '/nfs/bigneuron/add_disk0/hieule/data/datasets/ISTD_Dataset/Mean_Teacher_SD/ISTD_crf'
dataset = SingleDataset('/nfs/bigneuron/add_disk0/hieule/data/datasets/ISTD_Dataset/test/test_A',opt)
opt.results_dir ='./ISTD_b/'
self.eval_backend_output_only(dataset,opt)
def eval_backend_output_only(self,dataset,opt):
opt.fresdir =opt.results_dir
if not os.path.exists(opt.fresdir):
os.makedirs(opt.fresdir)
print("%s: ; %d" %(dataset.name,len(dataset)))
st = time.time()
for i, data in enumerate(dataset):
sd = self.model.get_prediction(data)
resname = (data['imname'])
if isinstance(sd,dict):
for k in sd:
if k == 'final':
im = Image.fromarray(sd[k])
im.save(os.path.join(opt.fresdir,resname))
def eval_backend(self,dataset,opt):
opt.resdir = '//'+opt.name+'/'+opt.epoch +'/'+str(opt.loadSize)+'/'
opt.fresdir =opt.results_dir+ '/' + opt.resdir
evaldir = opt.fresdir+'/final/'
if not os.path.exists(opt.fresdir):
os.makedirs(opt.fresdir)
print("%s: ; %d" %(dataset.name,len(dataset)))
st = time.time()
for i, data in enumerate(dataset):
sd = self.model.get_prediction(data)
resname = (data['imname'])
if isinstance(sd,dict):
evaldir = opt.fresdir+'/final/'
for k in sd:
if k == 'param':
sdmkdir(os.path.join(opt.fresdir,k))
np.savetxt(os.path.join(opt.fresdir,k,resname+'.txt'), sd[k], delimiter=' ')
else:
sdmkdir(os.path.join(opt.fresdir,k))
im = Image.fromarray(sd[k])
im.save(os.path.join(opt.fresdir,k,resname))
else:
evaldir = opt.fresdir
sd = Image.fromarray(sd)
sd.save(os.path.join(opt.fresdir,resname))
if __name__=='__main__':
opt = TestOptions().parse() #args.parse()
a= Predict(opt)
a.ISTD_test()
| {"/src/models/SIDPAMI_model.py": ["/src/models/distangle_model.py"]} |
68,105 | tmurray19/genesysChallenge | refs/heads/master | /game_unit_tests.py | import unittest
from connect_five_logic import Game
from connect_five_errors import BoardFullErorr, NoNumberInputError, NumberOutOfBoundsError
class GameUnitTests(unittest.TestCase):
def setUp(self):
self.g = Game()
def test_game_setup(self):
self.assertIsNotNone(self.g)
def test_board_setup(self):
"""Testing that the board is set up correctly"""
# Check board is created
self.assertIsNotNone(self.g.board)
# check that there are 9 columns
self.assertEqual(len(self.g.board), 9)
# check that a column has 6 rows
self.assertEqual(len(self.g.board[0]), 6)
# check all boards are the same
for i in range(1,8):
self.assertEqual(self.g.board[i], self.g.board[i-1])
def test_add_to_board(self):
# Add a chip to to the first column of a board
self.g.insert_chip(1, "RED")
# Check that chip went to the end of the column (i.e the botton left)
self.assertEqual(self.g.board[0][-1], self.g.chips.get("RED"))
def test_board_full(self):
"""Testing that the board cannot accept a chip in a column that is full"""
# Add chips to a board until board is full
self.g.insert_chip(1, "RED")
self.g.insert_chip(1, "YELLOW")
self.g.insert_chip(1, "RED")
self.g.insert_chip(1, "YELLOW")
self.g.insert_chip(1, "RED")
self.g.insert_chip(1, "YELLOW")
# Check that adding another chip to the first column of the board (which is full) will raise the board full error
self.assertRaises(BoardFullErorr, self.g.insert_chip, 1, "RED")
def test_out_of_bounds_inputs(self):
"""Testing that errors are being raised when they should be"""
# Checking that it will only correctly accept number inputs
self.assertRaises(NoNumberInputError, self.g.insert_chip, "column 1", "RED")
self.assertRaises(NoNumberInputError, self.g.insert_chip, "garbage input", "RED")
self.assertRaises(NoNumberInputError, self.g.insert_chip, "fadsaedasgfa", "RED")
self.assertRaises(NoNumberInputError, self.g.insert_chip, "Here Please", "RED")
# Check that numbers too large won't be accepted
self.assertRaises(NumberOutOfBoundsError, self.g.insert_chip, 2000000, "RED")
self.assertRaises(NumberOutOfBoundsError, self.g.insert_chip, -9999999, "RED")
self.assertRaises(NumberOutOfBoundsError, self.g.insert_chip, 10, "RED")
self.assertRaises(NumberOutOfBoundsError, self.g.insert_chip, 0, "RED")
def test_turn_swaps(self):
"""Checking that turn swapping is being correctly handled"""
self.assertEqual(self.g.get_current_turn(), "RED")
self.g.insert_chip(1, "RED")
self.assertEqual(self.g.get_current_turn(), "YELLOW")
def tearDown(self):
self.g=None
if __name__ == "__main__":
unittest.main() | {"/game_unit_tests.py": ["/connect_five_logic.py", "/connect_five_errors.py"], "/connect_five_logic.py": ["/connect_five_errors.py"], "/game_server.py": ["/connect_five_logic.py", "/connect_five_errors.py"]} |
68,106 | tmurray19/genesysChallenge | refs/heads/master | /connect_five_logic.py | # Define constants to print out board to players
from connect_five_errors import BoardFullErorr, NoNumberInputError, NumberOutOfBoundsError, GameWonException
class Game:
"""
Game class will contain game logic;
- placing chips into board
- checking board for winner
- building and storing board memory
- checking board for winner
"""
def __init__(self):
"""Constructor defines amount of rows, columns, and the board as a 2d array"""
self.columns = 9
self.rows = 6
self.has_winner = False
self.current_turn = 'RED'
self.chips = {
"EMPTY" : '[ ]',
"RED": '[X]',
"YELLOW": '[O]'
}
# Create an empty board
self.board = [[self.chips.get("EMPTY")] * self.rows for _ in range(self.columns)]
def get_has_winner(self):
return self.has_winner
def get_current_turn(self):
return self.current_turn
def insert_chip(self, column, colour):
try:
print(f'{self.get_current_turn()} is making a move')
"""Drops a specific chip into a specified column"""
# Change input to integer
# Get absolute value to stop users going backwards through array
col = self.board[abs(int(column))-1]
chip = self.chips.get(colour)
# If the top of the column is not empty, we can't place a chip here
if col[0] != self.chips.get("EMPTY"):
print("Column is full")
raise BoardFullErorr('Column is full')
# Move through the column bottom to top until an empty slot is found
i = -1
while col[i] != self.chips.get("EMPTY"):
i -= 1
# Place chip in empty slot
col[i] = chip
# Check to see if the winner has been found
if(self.check_for_winner(chip)):
raise GameWonException(f"{colour} has won!!")
self.current_turn = 'RED' if colour=='YELLOW' else 'YELLOW'
except ValueError:
print("Value error has occured, user did not enter number")
raise NoNumberInputError('Player did not enter number')
except IndexError:
print("Number input was too large for the board")
raise NumberOutOfBoundsError('Invalid number for board size')
def print_board(self):
"""Builds board and prints in console"""
board = ""
for row in range(self.rows):
board += (
' '.join(
str(self.board[col][row]) for col in range(self.columns)
)
)
board += '\n'
return board
def check_for_winner(self, chip):
""" Check all lines (Horizontal, Vertical, Positive Diagonals, Negative Diagonals) in board to find winner."""
# Check horizontal
# Check all columns up to 5 columns from end of board
for col in range(self.columns - 4):
# check all rows
for row in range(self.rows):
# if, for a given row, there are 5 of the same chip on subsequent columns, a player has won
if (
self.board[col][row] == chip
and self.board[col+1][row] == chip
and self.board[col+2][row] == chip
and self.board[col+3][row] == chip
and self.board[col+4][row] == chip
):
self.has_winner = True
return True
# Check vertical
# Same as above, but we flip the columns and rows
for col in range(self.columns):
for row in range(self.rows - 4):
if (
self.board[col][row] == chip
and self.board[col][row+1] == chip
and self.board[col][row+2] == chip
and self.board[col][row+3] == chip
and self.board[col][row+4] == chip
):
self.has_winner = True
return True
# Check positive diagonals
for col in range(self.columns - 4):
for row in range(self.rows - 4):
# Start in the bottom left, look for diagonals up and to the right
if (
self.board[col][row] == chip
and self.board[col+1][row+1] == chip
and self.board[col+2][row+2] == chip
and self.board[col+3][row+3] == chip
and self.board[col+4][row+4] == chip
):
self.has_winner = True
return True
# Check negative diagonals
for col in range(self.columns):
for row in range(self.rows):
# Start in the top left, look for diagonals down and to the right
if (
self.board[col][row] == chip
and self.board[col+1][row-1] == chip
and self.board[col+2][row-2] == chip
and self.board[col+3][row-3] == chip
and self.board[col+4][row-4] == chip
):
self.has_winner = True
return True
| {"/game_unit_tests.py": ["/connect_five_logic.py", "/connect_five_errors.py"], "/connect_five_logic.py": ["/connect_five_errors.py"], "/game_server.py": ["/connect_five_logic.py", "/connect_five_errors.py"]} |
68,107 | tmurray19/genesysChallenge | refs/heads/master | /connect_five_errors.py | # Some custom errors for invalid game states
class InvalidGameStateError(Exception):
"""Base class for other exceptions"""
pass
class BoardFullErorr(InvalidGameStateError):
"""For when a board's column is full"""
pass
class NoNumberInputError(InvalidGameStateError):
"""For when a player does not input a parsable number"""
pass
class NumberOutOfBoundsError(InvalidGameStateError):
"""For when a player inputs a number outside the bounds of the board"""
pass
class GameWonException(InvalidGameStateError):
"""For when a player has won the game"""
pass | {"/game_unit_tests.py": ["/connect_five_logic.py", "/connect_five_errors.py"], "/connect_five_logic.py": ["/connect_five_errors.py"], "/game_server.py": ["/connect_five_logic.py", "/connect_five_errors.py"]} |
68,108 | tmurray19/genesysChallenge | refs/heads/master | /game_server.py | ## Multithreaded socket server to read inputs from clients for connect five game
import socket
from threading import Thread
import connect_five_logic
from connect_five_errors import GameWonException, InvalidGameStateError
import time
# Define port and host for server
host = '127.0.0.1'
port = 5000
# For assigning colours to players
red_taken = False
# Client count
count = 0
# Create a new socket
# AF_INET is the address field for IPv4
# SOCK_STREAM is to input a strema of data
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Try to bind the socket to the port and host
try:
server_socket.bind((host,port))
except socket.error as e:
print(str(e))
# Client class holds the connection details for both players, as well as their names
class Client(Thread):
# Each client has a socket and an address associated with it
# They also have a name, and a colour for the purpose of the game
def __init__(self, socket, address, colour):
Thread.__init__(self)
self.socket = socket
self.address = address
self.player_name = None
self.player_colour = colour
# Start the thread
self.start()
def run(self):
global count
try:
while True:
server_msg = b''
# Set the players name
if self.player_name == None:
self.socket.send(f'Hello, [USER]. Please enter your name:'.encode())
self.player_name = self.socket.recv(1024).decode()
# Have the user wait until a second client connects
while count < 2:
self.socket.send(f'Hi {self.player_name}. Please wait, we are looking for an opponent...'.encode())
time.sleep(5)
server_msg += f'Opponent found, beginning game. Your colour is {self.player_colour}({g.chips.get(self.player_colour)}).'.encode()
# The game continues to be played until a winner is found
while not g.get_has_winner():
# This code block needs to run until the player enters a legal move
# A legal move is the user entering a number between 1 and 9 (corresponding to the columns of the board), and there needs to be space for a chip to be placed there
while g.get_current_turn() == self.player_colour:
server_msg += f'\n{g.print_board()}'.encode()
server_msg += f"\nIt's your turn {self.player_name}. You are {self.player_colour}({g.chips.get(self.player_colour)}) Pick a column to place your chip (1-9):".encode()
self.socket.sendall(server_msg)
server_msg = b''
try:
g.insert_chip(self.socket.recv(1024), self.player_colour)
except GameWonException as winner:
print(winner)
break
except InvalidGameStateError as e:
print(e)
server_msg += f"Error occured: {e}".encode()
# This code handles abrupt disconnections from a client for the other client
if count < 2:
server_msg += "Error, opponent disconnected. Game has been abruptly ended".encode()
self.socket.sendall(server_msg)
server_msg = b''
break
# This is to ensure only the correct player can submit a move
else:
server_msg += f'\n{g.print_board()}'.encode()
server_msg += f"\nHi {self.player_name}. You are {self.player_colour}({g.chips.get(self.player_colour)}). Please wait, your opponent is making their move...".encode()
self.socket.sendall(server_msg)
server_msg = b''
time.sleep(5)
else:
self.socket.send(f'{g.print_board()}\n{g.get_current_turn()} has won! Congratulations.'.encode())
print("Game is over, closing")
break
server_socket.close()
# For each exception, we decrement the current active connection counter
except ConnectionResetError as e:
print(e)
print("Client unexpectedly disconnected, breaking connection")
count -= 1
except ConnectionAbortedError as e:
print(e)
print("Client closed unexpectedly")
count -= 1
except ValueError:
print(e)
print("Value error occured, closing connection")
count -= 1
# Specify that the socket server will accept exactly two connections
server_socket.listen(2)
# Create a new thread to handle each new connection
print("Server started")
# Create an instance of our game
g = connect_five_logic.Game()
while True:
# Accept a client
try:
client_socket, address = server_socket.accept()
except OSError as e:
print(f"Error occured: {e}")
print("Closing server")
break
# increment the player counter
count+=1
# Set the colour of the player
if not red_taken:
colour = 'RED'
red_taken = True
else:
colour = 'YELLOW'
red_taken = False
# Create a new client instance
Client(client_socket, address, colour)
| {"/game_unit_tests.py": ["/connect_five_logic.py", "/connect_five_errors.py"], "/connect_five_logic.py": ["/connect_five_errors.py"], "/game_server.py": ["/connect_five_logic.py", "/connect_five_errors.py"]} |
68,109 | tmurray19/genesysChallenge | refs/heads/master | /game_client.py | import socket
import time
# Create a new socket
# AF_INET is the address field for IPv4
# SOCK_STREAM is to input a strema of data
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def client(host = '127.0.0.1', port = 5000):
# Connect client to server
client_socket.connect((host,port))
while True:
# Get message from server
server_msg = client_socket.recv(1024).decode()
# Print the message if the server asks for a wait
if ('wait' in server_msg):
print(server_msg)
# Close the connection if the game is won or an abrupt message is sent in
elif (('Congratulations' in server_msg) or ('abruptly' in server_msg)):
print(server_msg)
print("Game over, closing connection")
client_socket.close()
break
# Otherwise, ask user for input
else:
user_in = ""
# Stops user from inputting empty strings
while (len(user_in) < 1):
user_in = input(server_msg)
client_socket.send(user_in.encode())
if __name__ == '__main__':
client()
| {"/game_unit_tests.py": ["/connect_five_logic.py", "/connect_five_errors.py"], "/connect_five_logic.py": ["/connect_five_errors.py"], "/game_server.py": ["/connect_five_logic.py", "/connect_five_errors.py"]} |
68,119 | sizday/educatiON | refs/heads/master | /testing/pictures.py | import cv2
import os
def calc_image_hash(filename):
image = cv2.imread(filename)
resized = cv2.resize(image, (8, 8), interpolation=cv2.INTER_AREA)
gray_image = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
avg = gray_image.mean()
ret, threshold_image = cv2.threshold(gray_image, avg, 255, 0)
_hash = ""
for x in range(8):
for y in range(8):
val = threshold_image[x, y]
if val == 255:
_hash = _hash + "1"
else:
_hash = _hash + "0"
return _hash
def compare_hash(hash_1pic, hash_2pic):
i = 0
count = 0
while i < len(hash_1pic):
if hash_1pic[i] != hash_2pic[i]:
count = count + 1
i = i + 1
percent = round((1 - count / len(hash_1pic)) * 100)
return percent
def compare_picture(original, test):
temp_origin = "origin.png"
with open(temp_origin, 'wb') as original_file:
original_file.write(original)
temp_test = "test.png"
with open(temp_test, 'wb') as test_file:
test_file.write(test)
hash1 = calc_image_hash(temp_test)
hash2 = calc_image_hash(temp_origin)
os.remove(temp_test)
os.remove(temp_origin)
percent = compare_hash(hash1, hash2)
mark = (percent // 20) + 1
return mark
| {"/database/database.py": ["/preload/config.py", "/database/models.py", "/testing/test.py", "/testing/program.py"], "/main.py": ["/preload/config.py", "/database/database.py", "/database/models.py"]} |
68,120 | sizday/educatiON | refs/heads/master | /testing/test.py | def open_file(answer_file, test_file):
answer_text = str(answer_file, 'utf-8')
test_text = str(test_file, 'utf-8')
answer = file_to_list(answer_text)
test = file_to_list(test_text)
return compare_answer(answer, test)
def file_to_list(text):
lists = text.split('\n')
if lists[-1] == '':
lists.pop()
return lists
def compare_answer(answer_list, test_list):
count = 0
for i in range(len(answer_list)):
if answer_list[i] == test_list[i]:
count += 1
mark = round(count/len(answer_list)*100) // 20
return mark
| {"/database/database.py": ["/preload/config.py", "/database/models.py", "/testing/test.py", "/testing/program.py"], "/main.py": ["/preload/config.py", "/database/database.py", "/database/models.py"]} |
68,121 | sizday/educatiON | refs/heads/master | /testing/program.py | import subprocess
import os
def compare_files(answer_file, program_file):
answer_text = str(answer_file, 'utf-8')
temp_origin = "my_program.py"
with open("my_program.py", 'wb') as original_file:
original_file.write(program_file)
program = 'python my_program.py'
data_text = subprocess.check_output(program, encoding='utf-8', shell=True)
os.remove(temp_origin)
if answer_text == data_text:
mark = 5
else:
mark = 2
return mark
| {"/database/database.py": ["/preload/config.py", "/database/models.py", "/testing/test.py", "/testing/program.py"], "/main.py": ["/preload/config.py", "/database/database.py", "/database/models.py"]} |
68,122 | sizday/educatiON | refs/heads/master | /preload/config.py | host = '34.89.76.230'
db_user = 'sizday'
db_pass = 'dionis0799'
db_name = 'gino'
| {"/database/database.py": ["/preload/config.py", "/database/models.py", "/testing/test.py", "/testing/program.py"], "/main.py": ["/preload/config.py", "/database/database.py", "/database/models.py"]} |
68,123 | sizday/educatiON | refs/heads/master | /database/database.py | from datetime import datetime
from time import mktime
import asyncio
from gino import Gino
from sqlalchemy import sql, Column, Integer, String, Sequence, Boolean, ForeignKey, Binary
from preload.config import db_pass, db_user, host, db_name
from database.models import SubjectByUser
# from testing.pictures import compare_picture
from testing.test import open_file
from testing.program import compare_files
db = Gino()
class User(db.Model):
__tablename__ = 'user'
id = Column(Integer, Sequence('user_seq'), primary_key=True)
name = Column(String(200))
surname = Column(String(200))
email = Column(String(200))
password = Column(String(200))
birthday = Column(Integer)
avatar = Column(Binary, default=None)
is_teacher = Column(Boolean)
query: sql.Select
class Lesson(db.Model):
__tablename__ = 'lesson'
id = Column(Integer, Sequence('lesson_seq'), primary_key=True)
subject = Column(ForeignKey('subject.id'))
title = Column(String(200))
description = Column(String(200))
date = Column(Integer)
check_file = Column(Binary)
query: sql.Select
class Subject(db.Model):
__tablename__ = 'subject'
id = Column(Integer, Sequence('subject_seq'), primary_key=True)
title = Column(String(200))
type_checking = Column(String(200))
user = Column(ForeignKey('user.id'))
query: sql.Select
class Follower(db.Model):
__tablename__ = 'follower'
id = Column(Integer, Sequence('follower.seq'), primary_key=True)
user = Column(ForeignKey('user.id'))
subject = Column(ForeignKey('subject.id'))
class Evaluation(db.Model):
__tablename__ = 'evaluation'
id = Column(Integer, Sequence('evaluation.seq'), primary_key=True)
user = Column(ForeignKey('user.id'))
mark = Column(Integer, default=None)
lesson = Column(ForeignKey('lesson.id'))
query: sql.Select
class DBCommands:
# profile
async def get_user(self, user_id) -> User:
user = await User.query.where(User.id == user_id).gino.first()
return user
async def get_subject_by_user(self, user_id) -> SubjectByUser:
follower = await Follower.query.where(Follower.user == user_id).gino.first()
subject = await Subject.query.where(Subject.id == follower.subject).gino.first()
evaluations = await Evaluation.query.where(Evaluation.user == user_id).gino.all()
count_pass_less = count_less = 0
for num, evaluation in enumerate(evaluations):
if evaluation.mark is not None:
count_pass_less += 1
count_less += 1
subject_by_user = SubjectByUser(subject_title=subject.title,
count_passed_lessons=count_pass_less,
count_lessons=count_less)
return subject_by_user
async def update_photo(self, user_id, photo):
current_user = await self.get_user(user_id)
await current_user.update(avatar=photo).apply()
# registration
async def create_user(self, name, surname, email, password, birthday, is_teacher=False):
new_user = User()
new_user.name = name
new_user.surname = surname
new_user.email = email
new_user.password = password
new_user.birthday = birthday
new_user.is_teacher = is_teacher
await new_user.create()
return new_user
# authorisation
async def authorisation(self, email, password):
user = await User.query.where((User.email == email) & (User.password == password)).gino.first()
if user:
return user
# user
async def get_lessons_by_user(self, user_id):
follower = await Follower.query.where(Follower.user == user_id).gino.first()
if follower is not None:
subject = await Subject.query.where(Subject.id == follower.subject).gino.first()
lessons = await Lesson.query.where(Lesson.subject == subject.id).gino.all()
return lessons
async def get_evaluations_by_user(self, user_id):
evaluation = await Evaluation.query.where(Evaluation.user == user_id).gino.all()
return evaluation
# teacher
async def is_teacher(self, user_id):
user = await self.get_user(user_id)
return user.is_teacher
async def get_lesson_by_id(self, lesson_id) -> User:
lesson = await Lesson.query.where(Lesson.id == lesson_id).gino.first()
return lesson
async def get_subject_by_teacher(self, user_id):
if await self.is_teacher(user_id):
subject = await Subject.query.where(Subject.user == user_id).gino.first()
return subject
async def get_lessons_by_teacher(self, user_id):
if await self.is_teacher(user_id):
subject = await self.get_subject_by_teacher(user_id)
lessons = await Lesson.query.where(Lesson.subject == subject.id).gino.all()
return lessons
async def create_lesson(self, user_id, title, description, date, check_file=None):
if await self.is_teacher(user_id):
subject = await self.get_subject_by_teacher(user_id)
new_lesson = Lesson()
new_lesson.subject = subject.id
new_lesson.title = title
new_lesson.description = description
new_lesson.date = date
new_lesson.check_file = check_file
await new_lesson.create()
followers = await Follower.query.where(Follower.subject == subject.id).gino.all()
for num, follower in enumerate(followers):
new_evaluation = Evaluation(user=follower.user, lesson=new_lesson.id)
await new_evaluation.create()
async def create_subject(self, user_id, title, type_checking):
if await self.is_teacher(user_id):
new_subject = Subject()
new_subject.user = user_id
new_subject.title = title
new_subject.type_checking = type_checking
await new_subject.create()
async def get_evaluations_by_teacher(self, user_id):
if await self.is_teacher(user_id):
lessons = await self.get_lessons_by_teacher(user_id)
evaluation = [await Evaluation.query.where(Evaluation.lesson == lesson.id).gino.all()
for num, lesson in enumerate(lessons)]
return evaluation
async def make_follower(self, user_id, subject_id):
new_follower = Follower(user=user_id, subject=subject_id)
await new_follower.create()
lessons = await Lesson.query.where(Lesson.subject == subject_id).gino.all()
for num, lesson in enumerate(lessons):
new_evaluation = Evaluation(user=user_id, lesson=lesson.id)
await new_evaluation.create()
# testing
async def update_mark(self, user_id, lesson_id, file):
lesson = await Lesson.query.where(Lesson.id == lesson_id).gino.first()
subject = await Subject.query.where(Subject.id == lesson.subject).gino.first()
if subject.type_checking == 'py':
mark = compare_files(lesson.check_file, file)
# elif subject.type_checking == 'img':
# mark = compare_picture(lesson.check_file, file)
elif subject.type_checking == 'test':
mark = open_file(lesson.check_file, file)
else:
mark = None
evaluation = await Evaluation.query.where((Evaluation.user == user_id) & (Evaluation.lesson == lesson_id)).gino.first()
await evaluation.update(mark=mark).apply()
return mark
async def create_db():
await db.set_bind(f'postgresql://{db_user}:{db_pass}@{host}/{db_name}')
# await db.gino.drop_all()
await db.gino.create_all()
dbc = DBCommands()
"""
await dbc.create_user('Denis', 'Sizov', 'dsizov1999@mail.ru', 'dionis0799', mktime(datetime(1999, 7, 19).timetuple()),
is_teacher=True)
await dbc.create_user('Andrey', 'Kim', 'kummu-97@mail.ru', 'andrey', mktime(datetime(1997, 7, 8).timetuple()),
is_teacher=True)
await dbc.create_user('Лаптева', 'Надежда', 'test1@mail.ru', 'qwerty', mktime(datetime(2010, 1, 1).timetuple()))
await dbc.create_user('Дружинин', 'Владимир', 'test2@mail.ru', 'qwerty', mktime(datetime(2010, 1, 1).timetuple()))
await dbc.create_user('Анисимов', 'Сергей', 'test3@mail.ru', 'qwerty', mktime(datetime(2010, 1, 1).timetuple()))
await dbc.create_subject(1, 'Python', 'test')
# await dbc.create_subject(2, 'UI/UX', 'img')
await dbc.make_follower(3, 1)
await dbc.make_follower(4, 1)
await dbc.make_follower(5, 1)
file_text = open('../files/test_original.txt', 'rb').read()
file_py = open('../files/program_original.txt', 'rb').read()
await dbc.create_lesson(1, 'Loop FOR', 'Learning cycle with counter', mktime(datetime(2020, 12, 1).timetuple()), file_text)
await dbc.create_lesson(1, 'Loop WHILE', 'Learning cycle with condition', mktime(datetime(2020, 12, 9).timetuple()), file_text)
await dbc.create_lesson(1, 'IF ELSE', 'Conditional operations', mktime(datetime(2020, 12, 15).timetuple()), file_text)
await dbc.create_lesson(1, 'Lambda-function', 'Learning functions using a variable lambda', mktime(datetime(2020, 12, 15).timetuple()), file_text)
"""
asyncio.get_event_loop().run_until_complete(create_db())
| {"/database/database.py": ["/preload/config.py", "/database/models.py", "/testing/test.py", "/testing/program.py"], "/main.py": ["/preload/config.py", "/database/database.py", "/database/models.py"]} |
68,124 | sizday/educatiON | refs/heads/master | /database/models.py | from pydantic import BaseModel
class SubjectByUser(BaseModel):
subject_title: str
count_passed_lessons: int
count_lessons: int
class MakeFollower(BaseModel):
user_id: int
subject_id: int
class Registration(BaseModel):
name: str
surname: str
email: str
password: str
birthday: int
is_teacher: bool
class Lesson(BaseModel):
user_id: int
title: str
description: str
date: int
check_file: bytes
class Subject(BaseModel):
user_id: int
title: str
type_checking: str
class Mark(BaseModel):
user_id: int
lesson_id: int
file: bytes
class Photo(BaseModel):
user_id: int
photo: bytes
| {"/database/database.py": ["/preload/config.py", "/database/models.py", "/testing/test.py", "/testing/program.py"], "/main.py": ["/preload/config.py", "/database/database.py", "/database/models.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.